diff --git a/aimodel/src/lib/ai/components/convnext_inverse.py b/aimodel/src/lib/ai/components/convnext_inverse.py index b866fb9..94e23c1 100644 --- a/aimodel/src/lib/ai/components/convnext_inverse.py +++ b/aimodel/src/lib/ai/components/convnext_inverse.py @@ -36,6 +36,7 @@ def convnext_inverse(layer_in, depths, dims): def block_upscale(layer_in, block_number, depth, dim): layer_next = layer_in + # Ref https://machinelearningmastery.com/upsampling-and-transpose-convolution-layers-for-generative-adversarial-networks/ to understand Conv2DTranspose layer_next = tf.keras.layers.Conv2DTranspose( name=f"cns.stage{block_number}.end.convtp", filters=dim, diff --git a/aimodel/src/lib/ai/model_rainfallwater_segmentation.py b/aimodel/src/lib/ai/model_rainfallwater_segmentation.py index 7d1b2b8..8f7e16a 100644 --- a/aimodel/src/lib/ai/model_rainfallwater_segmentation.py +++ b/aimodel/src/lib/ai/model_rainfallwater_segmentation.py @@ -29,6 +29,7 @@ def model_rainfallwater_segmentation(metadata, feature_dim_in, shape_water_out, layer_next = do_convnext_inverse(layer_next, arch_name="convnext_i_tiny") # TODO: An attention layer here instead of a dense layer, with a skip connection perhaps? + raise Exception("Error: read and implement attention from https://ieeexplore.ieee.org/document/9076883") layer_next = tf.keras.layers.Dense(32)(layer_next) layer_next = tf.keras.layers.Conv2D(1, kernel_size=1, activation="softmax", padding="same")(layer_next)