mirror of
https://github.com/sbrl/research-rainfallradar
synced 2024-11-22 09:13:01 +00:00
add arg to adjust learning rate
This commit is contained in:
parent
8e23e9d341
commit
c384d55dff
2 changed files with 18 additions and 8 deletions
|
@ -8,15 +8,18 @@ from .components.convnext_inverse import do_convnext_inverse
|
|||
from .components.LayerStack2Image import LayerStack2Image
|
||||
from .components.LossCrossentropy import LossCrossentropy
|
||||
|
||||
def model_rainfallwater_mono(metadata, shape_water_out, model_arch_enc="convnext_xtiny", model_arch_dec="convnext_i_xtiny", feature_dim=512, batch_size=64, water_bins=2):
|
||||
def model_rainfallwater_mono(metadata, shape_water_out, model_arch_enc="convnext_xtiny", model_arch_dec="convnext_i_xtiny", feature_dim=512, batch_size=64, water_bins=2, learning_rate=None):
|
||||
"""Makes a new rainfall / waterdepth mono model.
|
||||
|
||||
Args:
|
||||
metadata (dict): A dictionary of metadata about the dataset to use to build the model with.
|
||||
shape_water_out (int[]): The width and height (in that order) that should dictate the output shape of the segmentation head. CURRENTLY NOT USED.
|
||||
model_arch (str, optional): The architecture code for the underlying (inverted) ConvNeXt model. Defaults to "convnext_i_xtiny".
|
||||
feature_dim (int, optiona): The size of the bottleneck. Defaults to 512.
|
||||
model_arch_enc (str, optional): The architecture code for the underlying (inverted) ConvNeXt model for the encoder. Defaults to "convnext_xtiny".
|
||||
model_arch_dec (str, optional): The architecture code for the underlying (inverted) ConvNeXt model for the decoder. Defaults to "convnext_i_xtiny".
|
||||
batch_size (int, optional): The batch size. Reduce to save memory. Defaults to 64.
|
||||
water_bins (int, optional): The number of classes that the water depth output oft he segmentation head should be binned into. Defaults to 2.
|
||||
learning_rate (float, optional): The (initial) learning rate. YOU DO NOT USUALLY NEED TO CHANGE THIS. For experimental purposes only. Defaults to None, which means it will be determined automatically.
|
||||
|
||||
Returns:
|
||||
tf.keras.Model: The new model, freshly compiled for your convenience! :D
|
||||
|
@ -70,8 +73,11 @@ def model_rainfallwater_mono(metadata, shape_water_out, model_arch_enc="convnext
|
|||
outputs = layer_next
|
||||
)
|
||||
|
||||
optimizer = "Adam"
|
||||
if learning_rate is not None:
|
||||
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
|
||||
model.compile(
|
||||
optimizer="Adam",
|
||||
optimizer=optimizer,
|
||||
loss=LossCrossentropy(batch_size=batch_size),
|
||||
# loss=tf.keras.losses.CategoricalCrossentropy(),
|
||||
metrics=[tf.keras.metrics.CategoricalAccuracy()]
|
||||
|
|
|
@ -20,6 +20,7 @@ def parse_args():
|
|||
parser.add_argument("--bottleneck", help="The size of the bottleneck [default: 512].", type=int)
|
||||
parser.add_argument("--arch-enc", help="Next of the underlying encoder convnext model to use [default: convnext_xtiny].")
|
||||
parser.add_argument("--arch-dec", help="Next of the underlying decoder convnext model to use [default: convnext_i_xtiny].")
|
||||
parser.add_argument("--learning-rate", help="The initial learning rate. YOU DO NOT USUALLY NEED TO CHANGE THIS. For experimental use only [default: determined automatically].", type=int)
|
||||
|
||||
|
||||
return parser
|
||||
|
@ -43,6 +44,8 @@ def run(args):
|
|||
args.arch_enc = "convnext_xtiny"
|
||||
if (not hasattr(args, "arch_dec")) or args.arch_dec == None:
|
||||
args.arch_dec = "convnext_i_xtiny"
|
||||
if (not hasattr(args, "learning_rate")) or args.learning_rate == None:
|
||||
args.learning_rate = None
|
||||
|
||||
|
||||
# TODO: Validate args here.
|
||||
|
@ -66,12 +69,13 @@ def run(args):
|
|||
|
||||
|
||||
ai = RainfallWaterMono(
|
||||
dir_output=args.output,
|
||||
batch_size=args.batch_size,
|
||||
dir_output = args.output,
|
||||
batch_size = args.batch_size,
|
||||
|
||||
feature_dim=args.bottleneck,
|
||||
model_arch_enc=args.arch_enc,
|
||||
model_arch_dec=args.arch_dec,
|
||||
feature_dim = args.bottleneck,
|
||||
model_arch_enc = args.arch_enc,
|
||||
model_arch_dec = args.arch_dec,
|
||||
learning_rate = args.learning_rate,
|
||||
|
||||
metadata = read_metadata(args.input),
|
||||
shape_water_out=[ args.water_size, args.water_size ], # The DESIRED output shape. the actual data will be cropped to match this.
|
||||
|
|
Loading…
Reference in a new issue