metrics.py 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. from keras import backend as K
  2. def mcor(y_true, y_pred):
  3. # matthews_correlation
  4. y_pred_pos = K.round(K.clip(y_pred, 0, 1))
  5. y_pred_neg = 1 - y_pred_pos
  6. y_pos = K.round(K.clip(y_true, 0, 1))
  7. y_neg = 1 - y_pos
  8. tp = K.sum(y_pos * y_pred_pos)
  9. tn = K.sum(y_neg * y_pred_neg)
  10. fp = K.sum(y_neg * y_pred_pos)
  11. fn = K.sum(y_pos * y_pred_neg)
  12. numerator = (tp * tn - fp * fn)
  13. denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
  14. return numerator / (denominator + K.epsilon())
  15. def precision(y_true, y_pred):
  16. """Precision metric.
  17. Only computes a batch-wise average of precision.
  18. Computes the precision, a metric for multi-label classification of
  19. how many selected items are relevant.
  20. """
  21. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  22. predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
  23. _precision = true_positives / (predicted_positives + K.epsilon())
  24. return _precision
  25. def recall(y_true, y_pred):
  26. """Recall metric.
  27. Only computes a batch-wise average of recall.
  28. Computes the recall, a metric for multi-label classification of
  29. how many relevant items are selected.
  30. """
  31. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  32. possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
  33. _recall = true_positives / (possible_positives + K.epsilon())
  34. return _recall
  35. def f1(y_true, y_pred):
  36. def recall(y_true, y_pred):
  37. """Recall metric.
  38. Only computes a batch-wise average of recall.
  39. Computes the recall, a metric for multi-label classification of
  40. how many relevant items are selected.
  41. """
  42. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  43. possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
  44. _recall = true_positives / (possible_positives + K.epsilon())
  45. return _recall
  46. def precision(y_true, y_pred):
  47. """Precision metric.
  48. Only computes a batch-wise average of precision.
  49. Computes the precision, a metric for multi-label classification of
  50. how many selected items are relevant.
  51. """
  52. true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  53. predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
  54. _precision = true_positives / (predicted_positives + K.epsilon())
  55. return _precision
  56. _precision = precision(y_true, y_pred)
  57. _recall = recall(y_true, y_pred)
  58. return 2*((_precision*_recall)/(_precision+_recall+K.epsilon()))
  59. #you can use it like this
  60. # model.compile(loss='binary_crossentropy',
  61. # optimizer= "adam",
  62. # metrics=[mcor,recall, f1])