12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091 |
- # -*- coding: utf-8 -*-
- """
- Created on Tue Jun 21 10:53:51 2022
- metrics
- @author: fangjiasheng
- """
- from keras import backend as K
- def mcor(y_true, y_pred):
- # matthews_correlation
- y_pred_pos = K.round(K.clip(y_pred, 0, 1))
- y_pred_neg = 1 - y_pred_pos
- y_pos = K.round(K.clip(y_true, 0, 1))
- y_neg = 1 - y_pos
- tp = K.sum(y_pos * y_pred_pos)
- tn = K.sum(y_neg * y_pred_neg)
- fp = K.sum(y_neg * y_pred_pos)
- fn = K.sum(y_pos * y_pred_neg)
- numerator = (tp * tn - fp * fn)
- denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
- return numerator / (denominator + K.epsilon())
- def precision(y_true, y_pred):
- """Precision metric.
- Only computes a batch-wise average of precision.
- Computes the precision, a metric for multi-label classification of
- how many selected items are relevant.
- """
- true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
- predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
- _precision = true_positives / (predicted_positives + K.epsilon())
- return _precision
- def recall(y_true, y_pred):
- """Recall metric.
- Only computes a batch-wise average of recall.
- Computes the recall, a metric for multi-label classification of
- how many relevant items are selected.
- """
- true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
- possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
- _recall = true_positives / (possible_positives + K.epsilon())
- return _recall
- def f1(y_true, y_pred):
- def recall(y_true, y_pred):
- """Recall metric.
- Only computes a batch-wise average of recall.
- Computes the recall, a metric for multi-label classification of
- how many relevant items are selected.
- """
- true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
- possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
- _recall = true_positives / (possible_positives + K.epsilon())
- return _recall
- def precision(y_true, y_pred):
- """Precision metric.
- Only computes a batch-wise average of precision.
- Computes the precision, a metric for multi-label classification of
- how many selected items are relevant.
- """
- true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
- predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
- _precision = true_positives / (predicted_positives + K.epsilon())
- return _precision
- _precision = precision(y_true, y_pred)
- _recall = recall(y_true, y_pred)
- return 2*((_precision*_recall)/(_precision+_recall+K.epsilon()))
- #you can use it like this
- # model.compile(loss='binary_crossentropy',
- # optimizer= "adam",
- # metrics=[mcor,recall, f1])
|