From 65a2e16a4cd321066b711dd305e0cab0911b07dc Mon Sep 17 00:00:00 2001 From: Starbeamrainbowlabs Date: Fri, 20 Jan 2023 18:55:52 +0000 Subject: [PATCH] ds_eo: lower memory usage --- aimodel/src/lib/dataset/dataset_encoderonly.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/aimodel/src/lib/dataset/dataset_encoderonly.py b/aimodel/src/lib/dataset/dataset_encoderonly.py index a529648..16946bf 100644 --- a/aimodel/src/lib/dataset/dataset_encoderonly.py +++ b/aimodel/src/lib/dataset/dataset_encoderonly.py @@ -116,9 +116,7 @@ def make_dataset(filepaths, compression_type="GZIP", parallel_reads_multiplier=3 compression_type=compression_type, num_parallel_reads=math.ceil(os.cpu_count() * parallel_reads_multiplier) if parallel_reads_multiplier > 0 else None ) - if shuffle: - dataset = dataset.shuffle(128) # additional shuffle buffer to mix things up - + # If we want another shuffle buffer here, we'd need to split the map function which we don't really want to do dataset = dataset.map(parse_item(heightmap=heightmap, **kwargs), num_parallel_calls=tf.data.AUTOTUNE) \ .unbatch()