mirror of
https://github.com/sbrl/research-rainfallradar
synced 2024-12-22 14:15:01 +00:00
Fix lots of ruff linter warnings
This commit is contained in:
parent
0260e626db
commit
f8a1e1b594
19 changed files with 31 additions and 56 deletions
|
@ -6,8 +6,8 @@ import tensorflow as tf
|
||||||
|
|
||||||
from ..dataset.batched_iterator import batched_iterator
|
from ..dataset.batched_iterator import batched_iterator
|
||||||
|
|
||||||
from ..io.find_paramsjson import find_paramsjson
|
# from ..io.find_paramsjson import find_paramsjson
|
||||||
from ..io.readfile import readfile
|
# from ..io.readfile import readfile
|
||||||
from ..io.writefile import writefile
|
from ..io.writefile import writefile
|
||||||
|
|
||||||
from .model_rainfallwater_contrastive import model_rainfallwater_contrastive
|
from .model_rainfallwater_contrastive import model_rainfallwater_contrastive
|
||||||
|
@ -16,7 +16,7 @@ from .helpers import summarywriter
|
||||||
from .components.LayerContrastiveEncoder import LayerContrastiveEncoder
|
from .components.LayerContrastiveEncoder import LayerContrastiveEncoder
|
||||||
from .components.LayerConvNeXtGamma import LayerConvNeXtGamma
|
from .components.LayerConvNeXtGamma import LayerConvNeXtGamma
|
||||||
from .components.LayerCheeseMultipleOut import LayerCheeseMultipleOut
|
from .components.LayerCheeseMultipleOut import LayerCheeseMultipleOut
|
||||||
from .helpers.summarywriter import summarywriter
|
|
||||||
|
|
||||||
class RainfallWaterContraster(object):
|
class RainfallWaterContraster(object):
|
||||||
def __init__(self, dir_output=None, filepath_checkpoint=None, epochs=50, batch_size=64, **kwargs):
|
def __init__(self, dir_output=None, filepath_checkpoint=None, epochs=50, batch_size=64, **kwargs):
|
||||||
|
@ -28,8 +28,8 @@ class RainfallWaterContraster(object):
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
|
|
||||||
|
|
||||||
if filepath_checkpoint == None:
|
if filepath_checkpoint is None:
|
||||||
if self.dir_output == None:
|
if self.dir_output is None:
|
||||||
raise Exception("Error: dir_output was not specified, and since no checkpoint was loaded training mode is activated.")
|
raise Exception("Error: dir_output was not specified, and since no checkpoint was loaded training mode is activated.")
|
||||||
if not os.path.exists(self.dir_output):
|
if not os.path.exists(self.dir_output):
|
||||||
os.mkdir(self.dir_output)
|
os.mkdir(self.dir_output)
|
||||||
|
|
|
@ -4,10 +4,10 @@ import json
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from ..dataset.batched_iterator import batched_iterator
|
# from ..dataset.batched_iterator import batched_iterator
|
||||||
|
|
||||||
from ..io.find_paramsjson import find_paramsjson
|
# from ..io.find_paramsjson import find_paramsjson
|
||||||
from ..io.readfile import readfile
|
# from ..io.readfile import readfile
|
||||||
from ..io.writefile import writefile
|
from ..io.writefile import writefile
|
||||||
|
|
||||||
from .model_rainfallwater_mono import model_rainfallwater_mono
|
from .model_rainfallwater_mono import model_rainfallwater_mono
|
||||||
|
@ -16,7 +16,6 @@ from .helpers import summarywriter
|
||||||
from .components.LayerConvNeXtGamma import LayerConvNeXtGamma
|
from .components.LayerConvNeXtGamma import LayerConvNeXtGamma
|
||||||
from .components.LayerStack2Image import LayerStack2Image
|
from .components.LayerStack2Image import LayerStack2Image
|
||||||
from .components.LossCrossentropy import LossCrossentropy
|
from .components.LossCrossentropy import LossCrossentropy
|
||||||
from .helpers.summarywriter import summarywriter
|
|
||||||
|
|
||||||
class RainfallWaterMono(object):
|
class RainfallWaterMono(object):
|
||||||
def __init__(self, dir_output=None, filepath_checkpoint=None, epochs=50, batch_size=64, **kwargs):
|
def __init__(self, dir_output=None, filepath_checkpoint=None, epochs=50, batch_size=64, **kwargs):
|
||||||
|
@ -28,8 +27,8 @@ class RainfallWaterMono(object):
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
|
|
||||||
|
|
||||||
if filepath_checkpoint == None:
|
if filepath_checkpoint is None:
|
||||||
if self.dir_output == None:
|
if self.dir_output is None:
|
||||||
raise Exception("Error: dir_output was not specified, and since no checkpoint was loaded training mode is activated.")
|
raise Exception("Error: dir_output was not specified, and since no checkpoint was loaded training mode is activated.")
|
||||||
if not os.path.exists(self.dir_output):
|
if not os.path.exists(self.dir_output):
|
||||||
os.mkdir(self.dir_output)
|
os.mkdir(self.dir_output)
|
||||||
|
|
|
@ -4,10 +4,10 @@ import json
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from ..dataset.batched_iterator import batched_iterator
|
# from ..dataset.batched_iterator import batched_iterator
|
||||||
|
|
||||||
from ..io.find_paramsjson import find_paramsjson
|
# from ..io.find_paramsjson import find_paramsjson
|
||||||
from ..io.readfile import readfile
|
# from ..io.readfile import readfile
|
||||||
from ..io.writefile import writefile
|
from ..io.writefile import writefile
|
||||||
|
|
||||||
from .model_rainfallwater_segmentation import model_rainfallwater_segmentation
|
from .model_rainfallwater_segmentation import model_rainfallwater_segmentation
|
||||||
|
@ -15,7 +15,6 @@ from .helpers import make_callbacks
|
||||||
from .helpers import summarywriter
|
from .helpers import summarywriter
|
||||||
from .components.LayerConvNeXtGamma import LayerConvNeXtGamma
|
from .components.LayerConvNeXtGamma import LayerConvNeXtGamma
|
||||||
from .components.LayerStack2Image import LayerStack2Image
|
from .components.LayerStack2Image import LayerStack2Image
|
||||||
from .helpers.summarywriter import summarywriter
|
|
||||||
|
|
||||||
class RainfallWaterSegmenter(object):
|
class RainfallWaterSegmenter(object):
|
||||||
def __init__(self, dir_output=None, filepath_checkpoint=None, epochs=50, batch_size=64, **kwargs):
|
def __init__(self, dir_output=None, filepath_checkpoint=None, epochs=50, batch_size=64, **kwargs):
|
||||||
|
@ -27,8 +26,8 @@ class RainfallWaterSegmenter(object):
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
|
|
||||||
|
|
||||||
if filepath_checkpoint == None:
|
if filepath_checkpoint is None:
|
||||||
if self.dir_output == None:
|
if self.dir_output is None:
|
||||||
raise Exception("Error: dir_output was not specified, and since no checkpoint was loaded training mode is activated.")
|
raise Exception("Error: dir_output was not specified, and since no checkpoint was loaded training mode is activated.")
|
||||||
if not os.path.exists(self.dir_output):
|
if not os.path.exists(self.dir_output):
|
||||||
os.mkdir(self.dir_output)
|
os.mkdir(self.dir_output)
|
||||||
|
|
|
@ -27,7 +27,7 @@ class CallbackExtraValidation(tf.keras.callbacks.Callback):
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
|
|
||||||
def on_epoch_end(self, epoch, logs=None):
|
def on_epoch_end(self, epoch, logs=None):
|
||||||
if logs == None:
|
if logs is None:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"[CallbackExtraValidation] logs is None! Can't do anything here.")
|
"[CallbackExtraValidation] logs is None! Can't do anything here.")
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
def dice_coef(y_true, y_pred, smooth=100):
|
def dice_coef(y_true, y_pred, smooth=100):
|
||||||
|
@ -27,7 +25,7 @@ def dice_coef_loss(y_true, y_pred, **kwargs):
|
||||||
y_true (Tensor): The ground truth
|
y_true (Tensor): The ground truth
|
||||||
y_pred (Tensor): The predicted output.
|
y_pred (Tensor): The predicted output.
|
||||||
Returns:
|
Returns:
|
||||||
Tensor: The Dice coefficient, but as a loss value that decreases instead fo increases as the model learns.
|
Tensor: The Dice coefficient, but as a loss value that decreases instead of increases as the model learns.
|
||||||
"""
|
"""
|
||||||
return 1 - dice_coef(y_true, y_pred, **kwargs)
|
return 1 - dice_coef(y_true, y_pred, **kwargs)
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
from curses import meta
|
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import math
|
|
||||||
|
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import os
|
import os
|
||||||
import math
|
import math
|
||||||
import json
|
|
||||||
|
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
|
||||||
|
@ -8,7 +7,6 @@ import tensorflow as tf
|
||||||
|
|
||||||
from lib.dataset.read_metadata import read_metadata
|
from lib.dataset.read_metadata import read_metadata
|
||||||
|
|
||||||
from ..io.readfile import readfile
|
|
||||||
from .primitives.shuffle import shuffle
|
from .primitives.shuffle import shuffle
|
||||||
|
|
||||||
|
|
||||||
|
@ -63,7 +61,7 @@ def make_dataset(filepaths, metadata, shape_water_desired=[100,100], compression
|
||||||
dataset = dataset.shuffle(shuffle_buffer_size)
|
dataset = dataset.shuffle(shuffle_buffer_size)
|
||||||
dataset = dataset.map(parse_item(metadata, shape_water_desired=shape_water_desired, dummy_label=dummy_label), num_parallel_calls=tf.data.AUTOTUNE)
|
dataset = dataset.map(parse_item(metadata, shape_water_desired=shape_water_desired, dummy_label=dummy_label), num_parallel_calls=tf.data.AUTOTUNE)
|
||||||
|
|
||||||
if batch_size != None:
|
if batch_size is not None:
|
||||||
dataset = dataset.batch(batch_size, drop_remainder=True)
|
dataset = dataset.batch(batch_size, drop_remainder=True)
|
||||||
if prefetch:
|
if prefetch:
|
||||||
dataset = dataset.prefetch(0 if "NO_PREFETCH" in os.environ else tf.data.AUTOTUNE)
|
dataset = dataset.prefetch(0 if "NO_PREFETCH" in os.environ else tf.data.AUTOTUNE)
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import os
|
import os
|
||||||
import math
|
import math
|
||||||
import json
|
|
||||||
|
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
|
||||||
|
@ -8,9 +7,9 @@ import tensorflow as tf
|
||||||
|
|
||||||
from lib.dataset.read_metadata import read_metadata
|
from lib.dataset.read_metadata import read_metadata
|
||||||
|
|
||||||
from ..io.readfile import readfile
|
|
||||||
from .primitives.shuffle import shuffle
|
from .primitives.shuffle import shuffle
|
||||||
from .parse_heightmap import parse_heightmap
|
from .parse_heightmap import parse_heightmap
|
||||||
|
from .dataset_mono import dataset_mono
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -125,7 +124,7 @@ def make_dataset(filepaths, compression_type="GZIP", parallel_reads_multiplier=3
|
||||||
# defaults = (33*33 + 1) * 2**16 * 8 = about 2.219GiB
|
# defaults = (33*33 + 1) * 2**16 * 8 = about 2.219GiB
|
||||||
dataset = dataset.shuffle(shuffle_buffer_size)
|
dataset = dataset.shuffle(shuffle_buffer_size)
|
||||||
|
|
||||||
if batch_size != None:
|
if batch_size is not None:
|
||||||
dataset = dataset.batch(batch_size, drop_remainder=True)
|
dataset = dataset.batch(batch_size, drop_remainder=True)
|
||||||
if prefetch:
|
if prefetch:
|
||||||
dataset = dataset.prefetch(0 if "NO_PREFETCH" in os.environ else tf.data.AUTOTUNE)
|
dataset = dataset.prefetch(0 if "NO_PREFETCH" in os.environ else tf.data.AUTOTUNE)
|
||||||
|
|
|
@ -1,12 +1,10 @@
|
||||||
import os
|
import os
|
||||||
import math
|
import math
|
||||||
import json
|
|
||||||
|
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from lib.dataset.read_metadata import read_metadata
|
from lib.dataset.read_metadata import read_metadata
|
||||||
from ..io.readfile import readfile
|
|
||||||
from .primitives.shuffle import shuffle
|
from .primitives.shuffle import shuffle
|
||||||
|
|
||||||
|
|
||||||
|
@ -62,7 +60,7 @@ def make_dataset(filepaths, metadata, shape_water_desired=[100,100], water_thres
|
||||||
dataset = dataset.shuffle(shuffle_buffer_size)
|
dataset = dataset.shuffle(shuffle_buffer_size)
|
||||||
dataset = dataset.map(parse_item(metadata, shape_water_desired=shape_water_desired, water_threshold=water_threshold), num_parallel_calls=tf.data.AUTOTUNE)
|
dataset = dataset.map(parse_item(metadata, shape_water_desired=shape_water_desired, water_threshold=water_threshold), num_parallel_calls=tf.data.AUTOTUNE)
|
||||||
|
|
||||||
if batch_size != None:
|
if batch_size is not None:
|
||||||
dataset = dataset.batch(batch_size, drop_remainder=True)
|
dataset = dataset.batch(batch_size, drop_remainder=True)
|
||||||
if prefetch:
|
if prefetch:
|
||||||
dataset = dataset.prefetch(0 if "NO_PREFETCH" in os.environ else tf.data.AUTOTUNE)
|
dataset = dataset.prefetch(0 if "NO_PREFETCH" in os.environ else tf.data.AUTOTUNE)
|
||||||
|
|
|
@ -51,7 +51,7 @@ def read(name, type_class, default=SYM_RAISE_EXCEPTION):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if name not in os.environ:
|
if name not in os.environ:
|
||||||
if type_class == bool and default == SYM_RAISE_EXCEPTION:
|
if type_class is bool and default == SYM_RAISE_EXCEPTION:
|
||||||
default = False
|
default = False
|
||||||
if default == SYM_RAISE_EXCEPTION:
|
if default == SYM_RAISE_EXCEPTION:
|
||||||
raise Exception(f"Error: Environment variable {name} does not exist")
|
raise Exception(f"Error: Environment variable {name} does not exist")
|
||||||
|
@ -59,8 +59,8 @@ def read(name, type_class, default=SYM_RAISE_EXCEPTION):
|
||||||
return default
|
return default
|
||||||
|
|
||||||
result = os.environ[name]
|
result = os.environ[name]
|
||||||
if type_class == bool:
|
if type_class is bool:
|
||||||
result = False if default == True else True
|
result = False if default is True else True
|
||||||
else:
|
else:
|
||||||
result = type_class(result)
|
result = type_class(result)
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ def print_all(table=True):
|
||||||
for env in envs_read:
|
for env in envs_read:
|
||||||
key, value, is_default = env
|
key, value, is_default = env
|
||||||
prefix = "* " if is_default else ""
|
prefix = "* " if is_default else ""
|
||||||
print(f"> {key.ljust(width_name)} {value}")
|
print(f"> {prefix}{key.ljust(width_name)} {value}")
|
||||||
print(f"Total {len(envs_read)} values")
|
print(f"Total {len(envs_read)} values")
|
||||||
print("===================================")
|
print("===================================")
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import os
|
# import os
|
||||||
|
|
||||||
import umap
|
import umap
|
||||||
import umap.plot
|
import umap.plot
|
||||||
import numpy as np
|
# import numpy as np
|
||||||
import matplotlib.pylab as plt
|
import matplotlib.pylab as plt
|
||||||
import pandas
|
import pandas
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ def vis_embeddings(filepath_output, features):
|
||||||
umap.plot.points(dimreducer,
|
umap.plot.points(dimreducer,
|
||||||
ax=axes["A"]
|
ax=axes["A"]
|
||||||
)
|
)
|
||||||
axes["A"].set_title(f"UMAP Dimensionality Reduction", fontsize=20)
|
axes["A"].set_title("UMAP Dimensionality Reduction", fontsize=20)
|
||||||
|
|
||||||
# 2: Parallel coordinates
|
# 2: Parallel coordinates
|
||||||
dataframe = pandas.DataFrame(features)
|
dataframe = pandas.DataFrame(features)
|
||||||
|
@ -39,7 +39,7 @@ def vis_embeddings(filepath_output, features):
|
||||||
sort_labels=True
|
sort_labels=True
|
||||||
)
|
)
|
||||||
|
|
||||||
axes["B"].set_title(f"Parallel coordinates plot", fontsize=20)
|
axes["B"].set_title("Parallel coordinates plot", fontsize=20)
|
||||||
|
|
||||||
plt.suptitle(f"RainfallContrastive embeddings | rainfall | E2 ConvNeXt | {len(features)} items", fontsize=28, weight="bold")
|
plt.suptitle(f"RainfallContrastive embeddings | rainfall | E2 ConvNeXt | {len(features)} items", fontsize=28, weight="bold")
|
||||||
plt.savefig(filepath_output)
|
plt.savefig(filepath_output)
|
||||||
|
|
|
@ -17,11 +17,11 @@ def segmentation_plot(water_actual, water_predict, model_code, filepath_output):
|
||||||
figure, axes = plt.subplot_mosaic("AB", figsize=(width*px, height*px))
|
figure, axes = plt.subplot_mosaic("AB", figsize=(width*px, height*px))
|
||||||
|
|
||||||
axes["A"].imshow(water_actual)
|
axes["A"].imshow(water_actual)
|
||||||
axes["A"].set_title(f"Actual", fontsize=20)
|
axes["A"].set_title("Actual", fontsize=20)
|
||||||
|
|
||||||
|
|
||||||
axes["B"].imshow(water_predict)
|
axes["B"].imshow(water_predict)
|
||||||
axes["B"].set_title(f"Predicted", fontsize=20)
|
axes["B"].set_title("Predicted", fontsize=20)
|
||||||
|
|
||||||
|
|
||||||
plt.suptitle(f"Rainfall → Water depth prediction | {model_code}", fontsize=28, weight="bold")
|
plt.suptitle(f"Rainfall → Water depth prediction | {model_code}", fontsize=28, weight="bold")
|
||||||
|
|
Loading…
Reference in a new issue