mirror of
https://github.com/sbrl/research-rainfallradar
synced 2024-11-22 09:13:01 +00:00
dlr: fix metrics
This commit is contained in:
parent
c7f96ab6ab
commit
94a32e7144
3 changed files with 19 additions and 33 deletions
|
@ -18,9 +18,9 @@ import tensorflow as tf
|
||||||
|
|
||||||
from lib.dataset.dataset_mono import dataset_mono
|
from lib.dataset.dataset_mono import dataset_mono
|
||||||
from lib.ai.components.LossCrossEntropyDice import LossCrossEntropyDice
|
from lib.ai.components.LossCrossEntropyDice import LossCrossEntropyDice
|
||||||
from lib.ai.components.MetricDice import MetricDice
|
from lib.ai.components.MetricDice import dice_coefficient
|
||||||
from lib.ai.components.MetricSensitivity import MetricSensitivity
|
from lib.ai.components.MetricSensitivity import sensitivity
|
||||||
from lib.ai.components.MetricSpecificity import MetricSpecificity
|
from lib.ai.components.MetricSpecificity import specificity
|
||||||
|
|
||||||
time_start = datetime.now()
|
time_start = datetime.now()
|
||||||
logger.info(f"Starting at {str(datetime.now().isoformat())}")
|
logger.info(f"Starting at {str(datetime.now().isoformat())}")
|
||||||
|
@ -189,10 +189,10 @@ if PATH_CHECKPOINT is None:
|
||||||
loss=loss_fn,
|
loss=loss_fn,
|
||||||
metrics=[
|
metrics=[
|
||||||
"accuracy",
|
"accuracy",
|
||||||
MetricDice(),
|
dice_coefficient,
|
||||||
tf.keras.metrics.MeanIoU(num_classes=2),
|
tf.keras.metrics.MeanIoU(num_classes=2),
|
||||||
MetricSensitivity(), # How many true positives were accurately predicted
|
sensitivity, # How many true positives were accurately predicted
|
||||||
MetricSpecificity() # How many true negatives were accurately predicted?
|
specificity # How many true negatives were accurately predicted?
|
||||||
# TODO: Add IoU, F1, Precision, Recall, here.
|
# TODO: Add IoU, F1, Precision, Recall, here.
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,7 +2,13 @@ import math
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
def sensitivity(y_true, y_pred):
|
||||||
|
ground_truth = tf.cast(y_true, dtype=tf.float32)
|
||||||
|
prediction = tf.cast(y_pred, dtype=tf.float32)
|
||||||
|
|
||||||
|
recall = tf.keras.metrics.Recall()
|
||||||
|
recall.update_state(y_true, y_pred)
|
||||||
|
return recall.result()
|
||||||
|
|
||||||
class MetricSensitivity(tf.keras.metrics.Metric):
|
class MetricSensitivity(tf.keras.metrics.Metric):
|
||||||
"""An implementation of the sensitivity.
|
"""An implementation of the sensitivity.
|
||||||
|
|
|
@ -4,7 +4,8 @@ import tensorflow as tf
|
||||||
|
|
||||||
|
|
||||||
def specificity(y_pred, y_true):
|
def specificity(y_pred, y_true):
|
||||||
"""
|
"""An implementation of the specificity.
|
||||||
|
In other words, a measure of how many of the true negatives were accurately predicted
|
||||||
@source https://datascience.stackexchange.com/a/40746/86851
|
@source https://datascience.stackexchange.com/a/40746/86851
|
||||||
param:
|
param:
|
||||||
y_pred - Predicted labels
|
y_pred - Predicted labels
|
||||||
|
@ -12,34 +13,13 @@ def specificity(y_pred, y_true):
|
||||||
Returns:
|
Returns:
|
||||||
Specificity score
|
Specificity score
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
y_true = tf.cast(y_true, dtype=tf.float32)
|
||||||
|
y_pred = tf.cast(y_pred, dtype=tf.float32)
|
||||||
|
|
||||||
neg_y_true = 1 - y_true
|
neg_y_true = 1 - y_true
|
||||||
neg_y_pred = 1 - y_pred
|
neg_y_pred = 1 - y_pred
|
||||||
fp = K.sum(neg_y_true * y_pred)
|
fp = K.sum(neg_y_true * y_pred)
|
||||||
tn = K.sum(neg_y_true * neg_y_pred)
|
tn = K.sum(neg_y_true * neg_y_pred)
|
||||||
specificity = tn / (tn + fp + K.epsilon())
|
specificity = tn / (tn + fp + K.epsilon())
|
||||||
return specificity
|
return specificity
|
||||||
|
|
||||||
|
|
||||||
class MetricSpecificity(tf.keras.metrics.Metric):
|
|
||||||
"""An implementation of the specificity.
|
|
||||||
In other words, a measure of how many of the true negatives were accurately predicted
|
|
||||||
@source
|
|
||||||
Args:
|
|
||||||
smooth (float): The batch size (currently unused).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, name="specificity", **kwargs):
|
|
||||||
super(MetricSpecificity, self).__init__(name=name, **kwargs)
|
|
||||||
|
|
||||||
def call(self, y_true, y_pred):
|
|
||||||
ground_truth = tf.cast(y_true, dtype=tf.float32)
|
|
||||||
prediction = tf.cast(y_pred, dtype=tf.float32)
|
|
||||||
|
|
||||||
return specificity(ground_truth, prediction)
|
|
||||||
|
|
||||||
def get_config(self):
|
|
||||||
config = super(MetricSpecificity, self).get_config()
|
|
||||||
config.update({
|
|
||||||
|
|
||||||
})
|
|
||||||
return config
|
|
||||||
|
|
Loading…
Reference in a new issue