metrics.py 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on Tue Jun 21 10:53:51 2022
  4. metrics
  5. @author: fangjiasheng
  6. """
  7. from keras import backend as K
  8. def mcor(y_true, y_pred):
  9. # matthews_correlation
  10. y_pred_pos = K.round(K.clip(y_pred, 0, 1))
  11. y_pred_neg = 1 - y_pred_pos
  12. y_pos = K.round(K.clip(y_true, 0, 1))
  13. y_neg = 1 - y_pos
  14. tp = K.sum(y_pos * y_pred_pos)
  15. tn = K.sum(y_neg * y_pred_neg)
  16. fp = K.sum(y_neg * y_pred_pos)
  17. fn = K.sum(y_pos * y_pred_neg)
  18. numerator = (tp * tn - fp * fn)
  19. denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
  20. return numerator / (denominator + K.epsilon())
  21. def precision(y_true, y_pred):
  22. """Precision metric.
  23. Only computes a batch-wise average of precision.
  24. Computes the precision, a metric for multi-label classification of
  25. how many selected items are relevant.
  26. """
  27. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  28. predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
  29. _precision = true_positives / (predicted_positives + K.epsilon())
  30. return _precision
  31. def recall(y_true, y_pred):
  32. """Recall metric.
  33. Only computes a batch-wise average of recall.
  34. Computes the recall, a metric for multi-label classification of
  35. how many relevant items are selected.
  36. """
  37. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  38. possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
  39. _recall = true_positives / (possible_positives + K.epsilon())
  40. return _recall
  41. def f1(y_true, y_pred):
  42. def recall(y_true, y_pred):
  43. """Recall metric.
  44. Only computes a batch-wise average of recall.
  45. Computes the recall, a metric for multi-label classification of
  46. how many relevant items are selected.
  47. """
  48. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  49. possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
  50. _recall = true_positives / (possible_positives + K.epsilon())
  51. return _recall
  52. def precision(y_true, y_pred):
  53. """Precision metric.
  54. Only computes a batch-wise average of precision.
  55. Computes the precision, a metric for multi-label classification of
  56. how many selected items are relevant.
  57. """
  58. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  59. predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
  60. _precision = true_positives / (predicted_positives + K.epsilon())
  61. return _precision
  62. _precision = precision(y_true, y_pred)
  63. _recall = recall(y_true, y_pred)
  64. return 2*((_precision*_recall)/(_precision+_recall+K.epsilon()))
  65. #you can use it like this
  66. # model.compile(loss='binary_crossentropy',
  67. # optimizer= "adam",
  68. # metrics=[mcor,recall, f1])