2022-09-07 16:45:38 +00:00
|
|
|
import math
|
|
|
|
|
|
|
|
from loguru import logger
|
|
|
|
import tensorflow as tf
|
|
|
|
|
|
|
|
from .components.convnext_inverse import do_convnext_inverse
|
2022-10-25 20:25:15 +00:00
|
|
|
from .components.LayerStack2Image import LayerStack2Image
|
2022-10-13 16:37:16 +00:00
|
|
|
|
2022-10-13 16:50:16 +00:00
|
|
|
def model_rainfallwater_segmentation(metadata, shape_water_out, model_arch="convnext_i_xtiny", batch_size=64, water_bins=2):
|
2022-10-13 16:37:16 +00:00
|
|
|
"""Makes a new rainfall / waterdepth segmentation head model.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
metadata (dict): A dictionary of metadata about the dataset to use to build the model with.
|
2022-10-13 16:50:16 +00:00
|
|
|
shape_water_out (int[]): The width and height (in that order) that should dictate the output shape of the segmentation head. CURRENTLY NOT USED.
|
|
|
|
model_arch (str, optional): The architecture code for the underlying (inverted) ConvNeXt model. Defaults to "convnext_i_xtiny".
|
|
|
|
batch_size (int, optional): The batch size. Reduce to save memory. Defaults to 64.
|
|
|
|
water_bins (int, optional): The number of classes that the water depth output oft he segmentation head should be binned into. Defaults to 2.
|
2022-10-13 16:37:16 +00:00
|
|
|
|
|
|
|
Returns:
|
2022-10-13 16:50:16 +00:00
|
|
|
tf.keras.Model: The new model, freshly compiled for your convenience! :D
|
2022-10-13 16:37:16 +00:00
|
|
|
"""
|
2022-09-28 17:14:09 +00:00
|
|
|
out_water_width, out_water_height = shape_water_out
|
2022-10-13 16:37:16 +00:00
|
|
|
feature_dim_in = metadata["rainfallradar"][0]
|
2022-09-07 16:45:38 +00:00
|
|
|
|
|
|
|
layer_input = tf.keras.layers.Input(
|
|
|
|
shape=(feature_dim_in)
|
|
|
|
)
|
|
|
|
|
|
|
|
# BEGIN
|
2022-10-03 16:27:37 +00:00
|
|
|
layer_next = tf.keras.layers.Dense(name="cns.stage.begin.dense1", units=feature_dim_in)(layer_input)
|
|
|
|
layer_next = tf.keras.layers.ReLU(name="cns.stage_begin.relu1")(layer_next)
|
2022-10-03 16:33:06 +00:00
|
|
|
layer_next = tf.keras.layers.LayerNormalization(name="cns.stage_begin.norm1", epsilon=1e-6)(layer_next)
|
2022-09-28 17:22:48 +00:00
|
|
|
|
2022-10-25 20:25:15 +00:00
|
|
|
layer_next = LayerStack2Image(target_width=4, target_height=4)(layer_next)
|
|
|
|
# layer_next = tf.keras.layers.Reshape((4, 4, math.floor(feature_dim_in/(4*4))), name="cns.stable_begin.reshape")(layer_next)
|
2022-10-03 16:27:37 +00:00
|
|
|
layer_next = tf.keras.layers.Dense(name="cns.stage.begin.dense2", units=feature_dim_in)(layer_next)
|
|
|
|
layer_next = tf.keras.layers.ReLU(name="cns.stage_begin.relu2")(layer_next)
|
2022-10-03 16:33:06 +00:00
|
|
|
layer_next = tf.keras.layers.LayerNormalization(name="cns.stage_begin.norm2", epsilon=1e-6)(layer_next)
|
2022-10-03 16:27:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
# layer_next = tf.keras.layers.Reshape((1, 1, feature_dim_in), name="cns.stable_begin.reshape")(layer_next)
|
2022-09-07 16:45:38 +00:00
|
|
|
|
2022-10-12 15:50:06 +00:00
|
|
|
layer_next = do_convnext_inverse(layer_next, arch_name=model_arch)
|
2022-09-07 16:45:38 +00:00
|
|
|
|
2022-09-16 14:36:01 +00:00
|
|
|
# TODO: An attention layer here instead of a dense layer, with a skip connection perhaps?
|
2022-10-06 18:17:03 +00:00
|
|
|
logger.warning("Warning: TODO implement attention from https://ieeexplore.ieee.org/document/9076883")
|
2022-11-10 20:53:37 +00:00
|
|
|
layer_next = tf.keras.layers.Dense(32, activation="relu")(layer_next)
|
|
|
|
layer_next = tf.keras.layers.Conv2D(water_bins, activation="relu", kernel_size=1, padding="same")(layer_next)
|
2022-10-13 20:02:57 +00:00
|
|
|
layer_next = tf.keras.layers.Softmax(axis=-1)(layer_next)
|
2022-09-07 16:45:38 +00:00
|
|
|
|
|
|
|
model = tf.keras.Model(
|
|
|
|
inputs = layer_input,
|
|
|
|
outputs = layer_next
|
|
|
|
)
|
|
|
|
|
|
|
|
model.compile(
|
|
|
|
optimizer="Adam",
|
2022-11-10 20:50:56 +00:00
|
|
|
loss=tf.keras.losses.CategoricalCrossentropy(),
|
2022-10-21 15:51:20 +00:00
|
|
|
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
|
2022-09-07 16:45:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return model
|