diff --git a/aimodel/slurm-TEST-deeplabv3p-rainfall.job b/aimodel/slurm-TEST-deeplabv3p-rainfall.job index 4d5fc22..6e0e792 100755 --- a/aimodel/slurm-TEST-deeplabv3p-rainfall.job +++ b/aimodel/slurm-TEST-deeplabv3p-rainfall.job @@ -32,6 +32,8 @@ show_help() { echo -e " PATH_COLOURMAP The path to the colourmap for predictive purposes." >&2; echo -e " PATH_CHECKPOINT The path to a checkcpoint to load. If specified, a model will be loaded instead of being trained." >&2; echo -e " STEPS_PER_EPOCH The number of steps to consider an epoch. Defaults to None, which means use the entire dataset." >&2; + echo -e " EPOCHS The number of epochs to train for." >&2; + echo -e " PREDICT_COUNT The number of items from the (SCRAMBLED) dataset to make a prediction for." >&2; echo -e " POSTFIX Postfix to append to the output dir (auto calculated)." >&2; echo -e " ARGS Optional. Any additional arguments to pass to the python program." >&2; echo -e "" >&2; @@ -57,7 +59,7 @@ DIR_OUTPUT="output/$(date -u --rfc-3339=date)_${CODE}"; echo -e ">>> Additional args: ${ARGS}"; export PATH=$HOME/software/bin:$PATH; -export IMAGE_SIZE BATCH_SIZE DIR_RAINFALLWATER PATH_HEIGHTMAP PATH_COLOURMAP STEPS_PER_EPOCH DIR_OUTPUT PATH_CHECKPOINT; +export IMAGE_SIZE BATCH_SIZE DIR_RAINFALLWATER PATH_HEIGHTMAP PATH_COLOURMAP STEPS_PER_EPOCH DIR_OUTPUT PATH_CHECKPOINT EPOCHS; echo ">>> Installing requirements"; conda run -n py38 pip install -q -r requirements.txt; diff --git a/aimodel/src/deeplabv3_plus_test_rainfall.py b/aimodel/src/deeplabv3_plus_test_rainfall.py index 1180ee3..336c802 100755 --- a/aimodel/src/deeplabv3_plus_test_rainfall.py +++ b/aimodel/src/deeplabv3_plus_test_rainfall.py @@ -25,6 +25,9 @@ DIR_RAINFALLWATER = os.environ["DIR_RAINFALLWATER"] PATH_HEIGHTMAP = os.environ["PATH_HEIGHTMAP"] PATH_COLOURMAP = os.environ["PATH_COLOURMAP"] STEPS_PER_EPOCH = int(os.environ["STEPS_PER_EPOCH"]) if "STEPS_PER_EPOCH" in os.environ else None +EPOCHS = int(os.environ["EPOCHS"]) if "EPOCHS" in os.environ else 25 +PREDICT_COUNT = int(os.environ["PREDICT_COUNT"]) if "PREDICT_COUNT" in os.environ else 4 + DIR_OUTPUT=os.environ["DIR_OUTPUT"] if "DIR_OUTPUT" in os.environ else f"output/{datetime.utcnow().date().isoformat()}_deeplabv3plus_rainfall_TEST" @@ -39,8 +42,10 @@ logger.info(f"> DIR_RAINFALLWATER {DIR_RAINFALLWATER}") logger.info(f"> PATH_HEIGHTMAP {PATH_HEIGHTMAP}") logger.info(f"> PATH_COLOURMAP {PATH_COLOURMAP}") logger.info(f"> STEPS_PER_EPOCH {STEPS_PER_EPOCH}") +logger.info(f"> EPOCHS {EPOCHS}") logger.info(f"> DIR_OUTPUT {DIR_OUTPUT}") logger.info(f"> PATH_CHECKPOINT {PATH_CHECKPOINT}") +logger.info(f"> PREDICT_COUNT {PREDICT_COUNT}") dataset_train, dataset_validate = dataset_mono( @@ -153,7 +158,7 @@ if PATH_CHECKPOINT is None: logger.info(">>> Beginning training") history = model.fit(dataset_train, validation_data=dataset_validate, - epochs=25, + epochs=EPOCHS, callbacks=[ tf.keras.callbacks.CSVLogger( filename=os.path.join(DIR_OUTPUT, "metrics.tsv"), @@ -287,13 +292,13 @@ def get_from_batched(dataset, count): plot_predictions( os.path.join(DIR_OUTPUT, "predict_train_$$.png"), - get_from_batched(dataset_train, 4), + get_from_batched(dataset_train, PREDICT_COUNT), colormap, model=model ) plot_predictions( os.path.join(DIR_OUTPUT, "predict_validate_$$.png"), - get_from_batched(dataset_validate, 4), + get_from_batched(dataset_validate, PREDICT_COUNT), colormap, model=model )