mirror of
https://github.com/sbrl/research-rainfallradar
synced 2024-11-22 09:13:01 +00:00
start implementing core image segmentation model
This commit is contained in:
parent
22620a1854
commit
7130c4fdf8
2 changed files with 73 additions and 0 deletions
40
aimodel/src/lib/ai/components/convnext_inverse.py
Normal file
40
aimodel/src/lib/ai/components/convnext_inverse.py
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from .convnext import add_convnext_block
|
||||||
|
|
||||||
|
depths_dims = dict(
|
||||||
|
# architectures from: https://github.com/facebookresearch/ConvNeXt
|
||||||
|
# A ConvNet for the 2020s: https://arxiv.org/abs/2201.03545
|
||||||
|
convnext_i_xtiny = (dict(depths=[3, 6, 3, 3], dims=[528, 264, 132, 66])),
|
||||||
|
convnext_i_tiny = (dict(depths=[3, 9, 3, 3], dims=[768, 384, 192, 96])),
|
||||||
|
convnext_i_small = (dict(depths=[3, 27, 3, 3], dims=[768, 384, 192, 96])),
|
||||||
|
convnext_i_base = (dict(depths=[3, 27, 3, 3], dims=[1024, 512, 256, 128])),
|
||||||
|
convnext_i_large = (dict(depths=[3, 27, 3, 3], dims=[1536, 768, 384, 192])),
|
||||||
|
convnext_i_xlarge = (dict(depths=[3, 27, 3, 3], dims=[2048, 1024, 512, 256])),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def do_convnext_inverse(layer_in, arch_name="convnext_tiny"):
|
||||||
|
return convnext_inverse(layer_in,
|
||||||
|
depths=depths_dims[arch_name]["depths"],
|
||||||
|
dims=depths_dims[arch_name]["dims"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def convnext_inverse(layer_in, depths, dims):
|
||||||
|
layer_next = layer_in
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
for depth, dim in zip(depths, dims):
|
||||||
|
layer_next = block_upscale(layer_next, i, depth=depth, dim=dim)
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
|
||||||
|
def block_upscale(layer_in, block_number, depth, dim):
|
||||||
|
layer_next = layer_in
|
||||||
|
for i in range(depth):
|
||||||
|
layer_next = add_convnext_block(layer_next, dim=dim, prefix=f"cns.stage{block_number}.block.{i}")
|
||||||
|
|
||||||
|
layer_next = tf.keras.layers.LayerNormalization(name=f"cns.stage{block_number}.end.norm", epsilon=1e-6)(layer_next)
|
||||||
|
layer_next = tf.keras.layers.Conv2DTranspose(name=f"cns.stage{block_number}.end.convtp", filters=dim, kernel_size=4, padding="same")(layer_next)
|
33
aimodel/src/lib/ai/model_rainfallwater_segmentation.py
Normal file
33
aimodel/src/lib/ai/model_rainfallwater_segmentation.py
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
import math
|
||||||
|
|
||||||
|
from loguru import logger
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from .components.convnext_inverse import do_convnext_inverse
|
||||||
|
|
||||||
|
def model_rainfallwater_segmentation(metadata, feature_dim_in, shape_water_out, batch_size=64, summary_file=None):
|
||||||
|
|
||||||
|
layer_input = tf.keras.layers.Input(
|
||||||
|
shape=(feature_dim_in)
|
||||||
|
)
|
||||||
|
|
||||||
|
# BEGIN
|
||||||
|
layer_next = tf.keras.layers.Dense(name="cns.stage.begin.dense")(layer_input)
|
||||||
|
layer_next = tf.keras.layers.LayerNormalisation(name="stage_begin.norm", epsilon=1e-6)(layer_next)
|
||||||
|
layer_next = tf.keras.layers.ReLU(name="stage_begin.relu")(layer_next)
|
||||||
|
|
||||||
|
layer_next = do_convnext_inverse(layer_next, arch_name="convnext_i_tiny")
|
||||||
|
|
||||||
|
# TODO: Implement projection head here
|
||||||
|
|
||||||
|
model = tf.keras.Model(
|
||||||
|
inputs = layer_input,
|
||||||
|
outputs = layer_next
|
||||||
|
)
|
||||||
|
|
||||||
|
model.compile(
|
||||||
|
optimizer="Adam",
|
||||||
|
loss="" # TODO: set this to binary cross-entropy loss
|
||||||
|
)
|
||||||
|
|
||||||
|
return model
|
Loading…
Reference in a new issue