From afc1cdcf02f8d0a3a93af2e856ad5b8243e8f377 Mon Sep 17 00:00:00 2001 From: Starbeamrainbowlabs Date: Thu, 24 Nov 2022 19:02:58 +0000 Subject: [PATCH] fixup --- aimodel/src/subcommands/rainfall_stats.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aimodel/src/subcommands/rainfall_stats.py b/aimodel/src/subcommands/rainfall_stats.py index 56c4af5..ffa5fe5 100755 --- a/aimodel/src/subcommands/rainfall_stats.py +++ b/aimodel/src/subcommands/rainfall_stats.py @@ -19,8 +19,8 @@ def parse_args(): parser = argparse.ArgumentParser(description="Output water depth image segmentation maps using a given pretrained mono model.") # parser.add_argument("--config", "-c", help="Filepath to the TOML config file to load.", required=True) parser.add_argument("--input", "-i", help="Path to input directory containing the .tfrecord(.gz) files to predict for. If a single file is passed instead, then only that file will be converted.", required=True) - parser.add_argument("--reads-multiplier", help="Optional. The multiplier for the number of files we should read from at once. Defaults to 0. When using this start with 1.5, which means read ceil(NUMBER_OF_CORES * 1.5). Set to a higher number of systems with high read latency to avoid starving the GPU of data. SETTING THIS WILL SCRAMBLE THE ORDER OF THE DATASET.") - parser.add_argument("--batch-size", help="Optional. The batch size to calculate statistics with. Can be larger than normal since we don't have a model loaded. Default: 1024") + parser.add_argument("--reads-multiplier", help="Optional. The multiplier for the number of files we should read from at once. Defaults to 0. When using this start with 1.5, which means read ceil(NUMBER_OF_CORES * 1.5). Set to a higher number of systems with high read latency to avoid starving the GPU of data. SETTING THIS WILL SCRAMBLE THE ORDER OF THE DATASET.", type=int) + parser.add_argument("--batch-size", help="Optional. The batch size to calculate statistics with. Can be larger than normal since we don't have a model loaded. Default: 1024", type=int) return parser def run(args): @@ -30,7 +30,6 @@ def run(args): if (not hasattr(args, "batch_size")) or args.batch_size == None: args.batch_size = 1024 - sys.stderr.write(f"\n\n>>> This is TensorFlow {tf.__version__}\n\n\n") # Note that if using a directory of input files, the output order is NOT GUARANTEED TO BE THE SAME. In fact, it probably won't be (see dataset_mono for more details).