dlr: add dice loss as metric

more metrics to go tho
This commit is contained in:
Starbeamrainbowlabs 2023-03-03 19:34:55 +00:00
parent f70083bea4
commit d464c9f57d
Signed by: sbrl
GPG key ID: 1BE5172E637709C2
2 changed files with 54 additions and 2 deletions

View file

@ -18,6 +18,7 @@ import tensorflow as tf
from lib.dataset.dataset_mono import dataset_mono from lib.dataset.dataset_mono import dataset_mono
from lib.ai.components.LossCrossEntropyDice import LossCrossEntropyDice from lib.ai.components.LossCrossEntropyDice import LossCrossEntropyDice
from lib.ai.components.MetricDice import MetricDice
time_start = datetime.now() time_start = datetime.now()
logger.info(f"Starting at {str(datetime.now().isoformat())}") logger.info(f"Starting at {str(datetime.now().isoformat())}")
@ -158,7 +159,8 @@ if PATH_CHECKPOINT is None:
else: else:
model = tf.keras.models.load_model(PATH_CHECKPOINT, custom_objects={ model = tf.keras.models.load_model(PATH_CHECKPOINT, custom_objects={
# Tell Tensorflow about our custom layers so that it can deserialise models that use them # Tell Tensorflow about our custom layers so that it can deserialise models that use them
"LossCrossEntropyDice": LossCrossEntropyDice "LossCrossEntropyDice": LossCrossEntropyDice,
"MetricDice": MetricDice
}) })
@ -181,7 +183,12 @@ if PATH_CHECKPOINT is None:
model.compile( model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE), optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),
loss=loss_fn, loss=loss_fn,
metrics=["accuracy"], metrics=[
"accuracy",
MetricDice(),
tf.keras.metrics.MeanIoU(num_classes=2)
# TODO: Add IoU, F1, Precision, Recall, here.
],
) )
logger.info(">>> Beginning training") logger.info(">>> Beginning training")
history = model.fit(dataset_train, history = model.fit(dataset_train,

View file

@ -0,0 +1,45 @@
import math
import tensorflow as tf
def dice_coefficient(y_true, y_pred):
"""Compute the dice coefficient.
A measure of how similar 2 things are [images], or how much they overlap [image segmentation]
@source https://lars76.github.io/2018/09/27/loss-functions-for-segmentation.html#9
Args:
y_true (tf.Tensor): The ground truth label.
y_pred (tf.Tensor): The output predicted by the model.
Returns:
tf.Tensor: The computed Dice coefficient.
"""
y_pred = tf.math.sigmoid(y_pred)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return numerator / denominator
class MetricDice(tf.keras.metrics.Metric):
"""An implementation of the dice loss function.
@source
Args:
smooth (float): The batch size (currently unused).
"""
def __init__(self, name="dice_coefficient", smooth=100, **kwargs):
super(MetricDice, self).__init__(name=name, **kwargs)
self.param_smooth = smooth
def call(self, y_true, y_pred):
ground_truth = tf.cast(y_true, dtype=tf.float32)
prediction = tf.cast(y_pred, dtype=tf.float32)
return dice_coef(ground_truth, prediction, smooth=self.param_smooth)
def get_config(self):
config = super(MetricDice, self).get_config()
config.update({
"smooth": self.param_smooth,
})
return config