From 94a32e71440d8748101fdd3b1ed7a0c01da2d9fe Mon Sep 17 00:00:00 2001 From: Starbeamrainbowlabs Date: Fri, 3 Mar 2023 20:37:22 +0000 Subject: [PATCH] dlr: fix metrics --- aimodel/src/deeplabv3_plus_test_rainfall.py | 12 +++---- .../lib/ai/components/MetricSensitivity.py | 8 ++++- .../lib/ai/components/MetricSpecificity.py | 32 ++++--------------- 3 files changed, 19 insertions(+), 33 deletions(-) diff --git a/aimodel/src/deeplabv3_plus_test_rainfall.py b/aimodel/src/deeplabv3_plus_test_rainfall.py index 5408d38..5cca40b 100755 --- a/aimodel/src/deeplabv3_plus_test_rainfall.py +++ b/aimodel/src/deeplabv3_plus_test_rainfall.py @@ -18,9 +18,9 @@ import tensorflow as tf from lib.dataset.dataset_mono import dataset_mono from lib.ai.components.LossCrossEntropyDice import LossCrossEntropyDice -from lib.ai.components.MetricDice import MetricDice -from lib.ai.components.MetricSensitivity import MetricSensitivity -from lib.ai.components.MetricSpecificity import MetricSpecificity +from lib.ai.components.MetricDice import dice_coefficient +from lib.ai.components.MetricSensitivity import sensitivity +from lib.ai.components.MetricSpecificity import specificity time_start = datetime.now() logger.info(f"Starting at {str(datetime.now().isoformat())}") @@ -189,10 +189,10 @@ if PATH_CHECKPOINT is None: loss=loss_fn, metrics=[ "accuracy", - MetricDice(), + dice_coefficient, tf.keras.metrics.MeanIoU(num_classes=2), - MetricSensitivity(), # How many true positives were accurately predicted - MetricSpecificity() # How many true negatives were accurately predicted? + sensitivity, # How many true positives were accurately predicted + specificity # How many true negatives were accurately predicted? # TODO: Add IoU, F1, Precision, Recall, here. ], ) diff --git a/aimodel/src/lib/ai/components/MetricSensitivity.py b/aimodel/src/lib/ai/components/MetricSensitivity.py index 641e0c8..b04292f 100644 --- a/aimodel/src/lib/ai/components/MetricSensitivity.py +++ b/aimodel/src/lib/ai/components/MetricSensitivity.py @@ -2,7 +2,13 @@ import math import tensorflow as tf - +def sensitivity(y_true, y_pred): + ground_truth = tf.cast(y_true, dtype=tf.float32) + prediction = tf.cast(y_pred, dtype=tf.float32) + + recall = tf.keras.metrics.Recall() + recall.update_state(y_true, y_pred) + return recall.result() class MetricSensitivity(tf.keras.metrics.Metric): """An implementation of the sensitivity. diff --git a/aimodel/src/lib/ai/components/MetricSpecificity.py b/aimodel/src/lib/ai/components/MetricSpecificity.py index e3a2247..51598c4 100644 --- a/aimodel/src/lib/ai/components/MetricSpecificity.py +++ b/aimodel/src/lib/ai/components/MetricSpecificity.py @@ -4,7 +4,8 @@ import tensorflow as tf def specificity(y_pred, y_true): - """ + """An implementation of the specificity. + In other words, a measure of how many of the true negatives were accurately predicted @source https://datascience.stackexchange.com/a/40746/86851 param: y_pred - Predicted labels @@ -12,34 +13,13 @@ def specificity(y_pred, y_true): Returns: Specificity score """ + + y_true = tf.cast(y_true, dtype=tf.float32) + y_pred = tf.cast(y_pred, dtype=tf.float32) + neg_y_true = 1 - y_true neg_y_pred = 1 - y_pred fp = K.sum(neg_y_true * y_pred) tn = K.sum(neg_y_true * neg_y_pred) specificity = tn / (tn + fp + K.epsilon()) return specificity - - -class MetricSpecificity(tf.keras.metrics.Metric): - """An implementation of the specificity. - In other words, a measure of how many of the true negatives were accurately predicted - @source - Args: - smooth (float): The batch size (currently unused). - """ - - def __init__(self, name="specificity", **kwargs): - super(MetricSpecificity, self).__init__(name=name, **kwargs) - - def call(self, y_true, y_pred): - ground_truth = tf.cast(y_true, dtype=tf.float32) - prediction = tf.cast(y_pred, dtype=tf.float32) - - return specificity(ground_truth, prediction) - - def get_config(self): - config = super(MetricSpecificity, self).get_config() - config.update({ - - }) - return config