dlr: add PREDICT_AS_ONE

This commit is contained in:
Starbeamrainbowlabs 2023-06-16 18:23:40 +01:00
parent 4bbc4c29c4
commit 7869505cfb
Signed by: sbrl
GPG key ID: 1BE5172E637709C2
2 changed files with 37 additions and 22 deletions

View file

@ -46,6 +46,7 @@ show_help() {
echo -e " RANDSEED The random seed to use when shuffling filepaths. Default: unset, which means use a random value." >&2; echo -e " RANDSEED The random seed to use when shuffling filepaths. Default: unset, which means use a random value." >&2;
echo -e " JIT_COMPILE Set to any value to compile the model with XLA." >&2; echo -e " JIT_COMPILE Set to any value to compile the model with XLA." >&2;
echo -e " PREDICT_COUNT The number of items from the (SCRAMBLED) dataset to make a prediction for." >&2; echo -e " PREDICT_COUNT The number of items from the (SCRAMBLED) dataset to make a prediction for." >&2;
echo -e " PREDICT_AS_ONE [prediction only] Set to any value to avoid splitting the input dataset into training/validation and instead treat it as a single dataset. Default: False (treat it as training/validation)" >&2;
echo -e " POSTFIX Postfix to append to the output dir (auto calculated)." >&2; echo -e " POSTFIX Postfix to append to the output dir (auto calculated)." >&2;
echo -e " ARGS Optional. Any additional arguments to pass to the python program." >&2; echo -e " ARGS Optional. Any additional arguments to pass to the python program." >&2;
echo -e "" >&2; echo -e "" >&2;
@ -75,7 +76,7 @@ echo -e ">>> DIR_OUTPUT: ${DIR_OUTPUT}";
echo -e ">>> Additional args: ${ARGS}"; echo -e ">>> Additional args: ${ARGS}";
export PATH=$HOME/software/bin:$PATH; export PATH=$HOME/software/bin:$PATH;
export IMAGE_SIZE BATCH_SIZE DIR_RAINFALLWATER PATH_HEIGHTMAP PATH_COLOURMAP STEPS_PER_EPOCH DIR_OUTPUT PATH_CHECKPOINT EPOCHS PREDICT_COUNT NO_REMOVE_ISOLATED_PIXELS LOSS LEARNING_RATE DICE_LOG_COSH WATER_THRESHOLD UPSAMPLE STEPS_PER_EXECUTION JIT_COMPILE RANDSEED; export IMAGE_SIZE BATCH_SIZE DIR_RAINFALLWATER PATH_HEIGHTMAP PATH_COLOURMAP STEPS_PER_EPOCH DIR_OUTPUT PATH_CHECKPOINT EPOCHS PREDICT_COUNT NO_REMOVE_ISOLATED_PIXELS LOSS LEARNING_RATE DICE_LOG_COSH WATER_THRESHOLD UPSAMPLE STEPS_PER_EXECUTION JIT_COMPILE RANDSEED PREDICT_AS_ONE;
echo ">>> Installing requirements"; echo ">>> Installing requirements";
conda run -n py38 pip install -q -r requirements.txt; conda run -n py38 pip install -q -r requirements.txt;

View file

@ -20,7 +20,7 @@ import matplotlib.pyplot as plt
import tensorflow as tf import tensorflow as tf
from lib.dataset.dataset_mono import dataset_mono from lib.dataset.dataset_mono import dataset_mono, dataset_mono_predict
from lib.ai.components.LossCrossEntropyDice import LossCrossEntropyDice from lib.ai.components.LossCrossEntropyDice import LossCrossEntropyDice
from lib.ai.components.MetricDice import metric_dice_coefficient as dice_coefficient from lib.ai.components.MetricDice import metric_dice_coefficient as dice_coefficient
from lib.ai.components.MetricSensitivity import make_sensitivity as sensitivity from lib.ai.components.MetricSensitivity import make_sensitivity as sensitivity
@ -59,6 +59,7 @@ DIR_OUTPUT=os.environ["DIR_OUTPUT"] if "DIR_OUTPUT" in os.environ else f"output/
PATH_CHECKPOINT = os.environ["PATH_CHECKPOINT"] if "PATH_CHECKPOINT" in os.environ else None PATH_CHECKPOINT = os.environ["PATH_CHECKPOINT"] if "PATH_CHECKPOINT" in os.environ else None
PREDICT_COUNT = int(os.environ["PREDICT_COUNT"]) if "PREDICT_COUNT" in os.environ else 25 PREDICT_COUNT = int(os.environ["PREDICT_COUNT"]) if "PREDICT_COUNT" in os.environ else 25
PREDICT_AS_ONE = True if "PREDICT_AS_ONE" in os.environ else False
# ~~~ # ~~~
@ -68,7 +69,7 @@ if not os.path.exists(DIR_OUTPUT):
# ~~~ # ~~~
logger.info("DeepLabV3+ rainfall radar TEST") logger.info("DeepLabV3+ rainfall radar TEST")
for env_name in [ "BATCH_SIZE","NUM_CLASSES", "DIR_RAINFALLWATER", "PATH_HEIGHTMAP", "PATH_COLOURMAP", "STEPS_PER_EPOCH", "REMOVE_ISOLATED_PIXELS", "EPOCHS", "LOSS", "LEARNING_RATE", "DIR_OUTPUT", "PATH_CHECKPOINT", "PREDICT_COUNT", "DICE_LOG_COSH", "WATER_THRESHOLD", "UPSAMPLE", "STEPS_PER_EXECUTION", "JIT_COMPILE" ]: for env_name in [ "BATCH_SIZE","NUM_CLASSES", "DIR_RAINFALLWATER", "PATH_HEIGHTMAP", "PATH_COLOURMAP", "STEPS_PER_EPOCH", "REMOVE_ISOLATED_PIXELS", "EPOCHS", "LOSS", "LEARNING_RATE", "DIR_OUTPUT", "PATH_CHECKPOINT", "PREDICT_COUNT", "DICE_LOG_COSH", "WATER_THRESHOLD", "UPSAMPLE", "STEPS_PER_EXECUTION", "JIT_COMPILE", "PREDICT_AS_ONE" ]:
logger.info(f"> {env_name} {str(globals()[env_name])}") logger.info(f"> {env_name} {str(globals()[env_name])}")
@ -78,20 +79,32 @@ for env_name in [ "BATCH_SIZE","NUM_CLASSES", "DIR_RAINFALLWATER", "PATH_HEIGHTM
# ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ # ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
# ██████ ██ ██ ██ ██ ██ ███████ ███████ ██ # ██████ ██ ██ ██ ██ ██ ███████ ███████ ██
dataset_train, dataset_validate = dataset_mono( if not PREDICT_AS_ONE:
dirpath_input=DIR_RAINFALLWATER, dataset_train, dataset_validate = dataset_mono(
batch_size=BATCH_SIZE, dirpath_input=DIR_RAINFALLWATER,
water_threshold=WATER_THRESHOLD, batch_size=BATCH_SIZE,
rainfall_scale_up=2, # done BEFORE cropping to the below size water_threshold=WATER_THRESHOLD,
output_size=IMAGE_SIZE, rainfall_scale_up=2, # done BEFORE cropping to the below size
input_size="same", output_size=IMAGE_SIZE,
filepath_heightmap=PATH_HEIGHTMAP, input_size="same",
do_remove_isolated_pixels=REMOVE_ISOLATED_PIXELS filepath_heightmap=PATH_HEIGHTMAP,
) do_remove_isolated_pixels=REMOVE_ISOLATED_PIXELS
)
logger.info("Train Dataset:", dataset_train)
logger.info("Validation Dataset:", dataset_validate)
logger.info("Train Dataset:", dataset_train)
logger.info("Validation Dataset:", dataset_validate)
else:
dataset_train = dataset_mono_predict(
dirpath_input=DIR_RAINFALLWATER,
batch_size=BATCH_SIZE,
water_threshold=WATER_THRESHOLD,
rainfall_scale_up=2, # done BEFORE cropping to the below size
output_size=IMAGE_SIZE,
input_size="same",
filepath_heightmap=PATH_HEIGHTMAP,
do_remove_isolated_pixels=REMOVE_ISOLATED_PIXELS
)
logger.info("Dataset AS_ONE:", dataset_train)
# ███ ███ ██████ ██████ ███████ ██ # ███ ███ ██████ ██████ ███████ ██
# ████ ████ ██ ██ ██ ██ ██ ██ # ████ ████ ██ ██ ██ ██ ██ ██
@ -372,11 +385,12 @@ plot_predictions(
colormap, colormap,
model=model model=model
) )
plot_predictions( if not PREDICT_AS_ONE:
os.path.join(DIR_OUTPUT, "predict_validate_$$.png"), plot_predictions(
get_from_batched(dataset_validate, PREDICT_COUNT), os.path.join(DIR_OUTPUT, "predict_validate_$$.png"),
colormap, get_from_batched(dataset_validate, PREDICT_COUNT),
model=model colormap,
) model=model
)
logger.info(f"Complete at {str(datetime.now().isoformat())}, elapsed {str((datetime.now() - time_start).total_seconds())} seconds") logger.info(f"Complete at {str(datetime.now().isoformat())}, elapsed {str((datetime.now() - time_start).total_seconds())} seconds")