123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259 |
- import tensorflow as tf
- import tensorflow.keras.backend as K
- import numpy as np
- import pandas as pd
- import pickle
- from BiddingKG.dl_dev.industry.data_process import get_array_data, df_to_array
- id2lb = {0: '专业施工', 1: '专用仪器仪表', 2: '专用设备修理', 3: '互联网信息服务', 4: '互联网安全服务', 5: '互联网平台', 6: '互联网接入及相关服务', 7: '人力资源服务',
- 8: '人造原油', 9: '仓储业', 10: '仪器仪表', 11: '仪器仪表修理', 12: '会计、审计及税务服务', 13: '会议、展览及相关服务', 14: '住宅、商业用房',
- 15: '体育场地设施管理', 16: '体育组织', 17: '体育设备', 18: '保险服务', 19: '信息处理和存储支持服务', 20: '信息技术咨询服务', 21: '信息系统集成和物联网技术服务',
- 22: '修缮工程', 23: '健康咨询', 24: '公路旅客运输', 25: '其他专业咨询与调查', 26: '其他专业技术服务', 27: '其他交通运输设备', 28: '其他公共设施管理',
- 29: '其他土木工程建筑', 30: '其他工程服务', 31: '其他建筑建材', 32: '其他运输业', 33: '农业和林业机械', 34: '农业服务', 35: '农产品',
- 36: '农副食品,动、植物油制品', 37: '出版业', 38: '办公消耗用品及类似物品', 39: '办公设备', 40: '化学原料及化学制品', 41: '化学纤维', 42: '化学药品和中药专用设备',
- 43: '医疗设备', 44: '医药品', 45: '卫星传输服务', 46: '卫生', 47: '印刷服务', 48: '图书和档案', 49: '图书档案设备', 50: '图书馆与档案馆',
- 51: '土地管理业', 52: '地质勘查', 53: '地震服务', 54: '场馆、站港用房', 55: '城市公共交通运输', 56: '塑料制品、半成品及辅料', 57: '天然石料', 58: '娱乐设备',
- 59: '婚姻服务', 60: '安全保护服务', 61: '安全生产设备', 62: '家具用具', 63: '家用电器修理', 64: '工业、生产用房', 65: '工业与专业设计及其他专业技术服务',
- 66: '工矿工程建筑', 67: '工程技术与设计服务', 68: '工程机械', 69: '工程监理服务', 70: '工程评价服务', 71: '工程造价服务', 72: '市场调查', 73: '广告业',
- 74: '广播', 75: '广播、电视、电影设备', 76: '广播电视传输服务', 77: '废弃资源综合利用业', 78: '建筑涂料', 79: '建筑物、构筑物附属结构', 80: '建筑物拆除和场地准备活动',
- 81: '建筑装饰和装修业', 82: '录音制作', 83: '影视节目制作', 84: '房地产中介服务', 85: '房地产开发经营', 86: '房地产租赁经营', 87: '房屋租赁', 88: '招标代理',
- 89: '探矿、采矿、选矿和造块设备', 90: '政法、检测专用设备', 91: '教育服务', 92: '教育设备', 93: '文物及非物质文化遗产保护', 94: '文物和陈列品', 95: '文艺创作与表演',
- 96: '文艺设备', 97: '新闻业', 98: '旅行社及相关服务', 99: '日杂用品', 100: '有色金属冶炼及压延产品', 101: '有色金属矿', 102: '木材、板材等',
- 103: '木材采集和加工设备', 104: '机械设备', 105: '机械设备经营租赁', 106: '林业产品', 107: '林业服务', 108: '架线和管道工程建筑', 109: '核工业专用设备',
- 110: '橡胶制品', 111: '殡葬服务', 112: '殡葬设备及用品', 113: '气象服务', 114: '水上交通运输设备', 115: '水上运输业', 116: '水利和水运工程建筑',
- 117: '水工机械', 118: '水文服务', 119: '水资源管理', 120: '污水处理及其再生利用', 121: '汽车、摩托车修理与维护', 122: '法律服务', 123: '洗染服务',
- 124: '测绘地理信息服务', 125: '海洋仪器设备', 126: '海洋工程建筑', 127: '海洋服务', 128: '消防设备', 129: '清洁服务', 130: '渔业产品', 131: '渔业服务',
- 132: '炼焦和金属冶炼轧制设备', 133: '烟草加工设备', 134: '热力生产和供应', 135: '焦炭及其副产品', 136: '煤炭采选产品', 137: '燃气生产和供应业', 138: '物业管理',
- 139: '特种用途动、植物', 140: '环保咨询', 141: '环境与生态监测检测服务', 142: '环境污染防治设备', 143: '环境治理业', 144: '玻璃及其制品', 145: '理发及美容服务',
- 146: '生态保护', 147: '电信', 148: '电力、城市燃气、蒸汽和热水、水', 149: '电力供应', 150: '电力工业专用设备', 151: '电力工程施工', 152: '电力生产',
- 153: '电子和通信测量仪器', 154: '电工、电子专用生产设备', 155: '电影放映', 156: '电气安装', 157: '电气设备', 158: '电气设备修理', 159: '畜牧业服务',
- 160: '监控设备', 161: '石油制品', 162: '石油和化学工业专用设备', 163: '石油和天然气开采产品', 164: '石油天然气开采专用设备', 165: '研究和试验发展',
- 166: '社会工作', 167: '社会经济咨询', 168: '科技推广和应用服务业', 169: '科研、医疗、教育用房', 170: '管道和设备安装', 171: '粮油作物和饲料加工设备',
- 172: '纸、纸制品及印刷品', 173: '纺织原料、毛皮、被服装具', 174: '纺织设备', 175: '绿化管理', 176: '缝纫、服饰、制革和毛皮加工设备', 177: '航空器及其配套设备',
- 178: '航空客货运输', 179: '航空航天工业专用设备', 180: '节能环保工程施工', 181: '装卸搬运', 182: '计算机和办公设备维修', 183: '计算机设备',
- 184: '计量标准器具及量具、衡器', 185: '货币处理专用设备', 186: '货币金融服务', 187: '质检技术服务', 188: '资本市场服务', 189: '车辆',
- 190: '边界勘界和联检专用设备', 191: '运行维护服务', 192: '通信设备', 193: '通用设备修理', 194: '道路货物运输', 195: '邮政专用设备', 196: '邮政业',
- 197: '采矿业和制造业服务', 198: '铁路、船舶、航空航天等运输设备修理', 199: '铁路、道路、隧道和桥梁工程建筑', 200: '铁路运输设备', 201: '防洪除涝设施管理',
- 202: '陶瓷制品', 203: '雷达、无线电和卫星导航设备', 204: '非金属矿', 205: '非金属矿物制品工业专用设备', 206: '非金属矿物材料', 207: '食品加工专用设备',
- 208: '食品及加工盐', 209: '餐饮业', 210: '饮料、酒精及精制茶', 211: '饮料加工设备', 212: '饲养动物及其产品', 213: '黑色金属冶炼及压延产品', 214: '黑色金属矿'}
- lb2id = {k: v for v, k in id2lb.items()}
- seq_len=20
- def recall(y_true, y_pred):
- '''
- 计算召回率
- @Argus:
- y_true: 正确的标签
- y_pred: 模型预测的标签
- @Return
- 召回率
- '''
- c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
- c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
- if c3 == 0:
- return 0
- recall = c1 / c3
- return recall
- def f1_score(y_true, y_pred):
- '''
- 计算F1
- @Argus:
- y_true: 正确的标签
- y_pred: 模型预测的标签
- @Return
- F1值
- '''
- c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
- c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
- c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
- precision = c1 / c2
- if c3 == 0:
- recall = 0
- else:
- recall = c1 / c3
- f1_score = 2 * (precision * recall) / (precision + recall)
- return f1_score
- def precision(y_true, y_pred):
- '''
- 计算精确率
- @Argus:
- y_true: 正确的标签
- y_pred: 模型预测的标签
- @Return
- 精确率
- '''
- c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
- c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
- precision = c1 / c2
- return precision
- # SELU 必须与 lecun_normal 初始化一起使用,且将 AlphaDropout 作为 dropout
- def rnn_model(seq_len=seq_len, rnn_unit=128): #64
- input1 = tf.keras.Input(shape=(seq_len, 128), name='title') #20
- input2 = tf.keras.Input(shape=(seq_len, 128), name='project')
- input3 = tf.keras.Input(shape=(seq_len, 128), name='product')
- lstm1 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=rnn_unit, recurrent_dropout=0.5))(input1) #64
- lstm2 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=rnn_unit, recurrent_dropout=0.5))(input2) #rnn_unit*4
- lstm3 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=rnn_unit, recurrent_dropout=0.5))(input3) #rnn_unit*2
- concat = tf.keras.layers.Concatenate()([lstm1, lstm2, lstm3])
- drop = tf.keras.layers.Dropout(rate=0.5)(concat)
- out = tf.keras.layers.Dense(units=215, activation='softmax')(drop) #183
- model = tf.keras.Model(inputs=(input1, input2, input3), outputs=out)
- model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001), loss=tf.keras.losses.categorical_crossentropy,
- metrics=[recall, precision, f1_score, tf.keras.metrics.categorical_accuracy]) #recall, precision, f1_score
- print(model.summary())
- return model
- def predict():
- import copy
- # model = tf.keras.models.load_model('model.18-1.3477-0.7020.h5', #准确率:0.6912, 总样本:32735
- # custom_objects={'recall':recall, 'f1_score':f1_score, 'precision':precision}) #准确率:0.8383, 总样本:32735
- df_test = pd.read_excel('data/新标准215类test.xlsx')
- # df_test = pd.read_excel('E:\行业分类/标记数据.xlsx')
- # df_test = pd.read_excel('data/新标准215类train.xlsx')
- # df_test = pd.read_excel('data/新标准215类test_predict.xlsx')
- # df_test = pd.read_excel('data/新标准215类筛选数据test_predict.xlsx')
- # df_test = pd.read_excel('data/新标准所有标准规则标注数据_relabel.xlsx')
- # df_test = pd.read_excel('data/新标准所有标准规则标注数据_relabel_predict_标注与模型不一致数据50592条.xlsx')
- # df_test = pd.read_excel('data/新标准所有标准规则标注数据_relabel_predict_标注与模型不一致数据50592条预测结果_2为最新预测.xlsx')
- df_test.reset_index(drop=True, inplace=True)
- pred_list = []
- prob_list = []
- x1, x2, x3, y, all_data, _= df_to_array(df_test, seq_len)
- # assert len(lb2id) == 215
- # id2lb = {k:v for v,k in lb2id.items()}
- n = 0
- # model.load_weights('model.50-1.5302-0.5805-0.7621-0.6589.h5') #准确率:0.6515, 总样本:32735
- # model.load_weights('model.97-1.3751-0.6604-0.7787-0.7147.h5') #准确率:0.6515, 总样本:32735
- # model.load_weights('model.25-1.4056-0.6231-0.7799-0.6926.h5') #
- # model.load_weights('model.19-1.3846-0.6070-0.7887-0.6859.h5') # 测试集做训练
- # model.load_weights('model.28-0.4495-0.8564-0.8921-0.8739.h5') # 帅选数据重新训练
- model.load_weights('model.19-0.9958-0.7568.h5') # 帅选数据重新训练
- # model.load_weights('model.21-0.9929-0.7576.h5')
- # model.load_weights('newmodel_rnn_50_256.h5') #准确率:0.8290, 总样本:32735
- pred = model.predict(x=[x1[n:], x2[n:], x3[n:]])
- label = copy.deepcopy(y[n:])
- data = all_data[n:]
- for i in range(len(pred)):
- pred_list.append(id2lb[np.argmax(pred[i])])
- prob_list.append(pred[i][np.argmax(pred[i])])
- # # print(pred[i])
- # # print(label[i])
- # if np.argmax(pred[i]) != np.argmax(label[i]):
- # print(data[i])
- # print(id2lb[np.argmax(pred[i])] , id2lb[np.argmax(y[n:][i])])
- # else:
- # print('equal')
- assert len(pred_list) == len(df_test)
- df_test['预测结果2'] = pd.Series(pred_list)
- df_test['预测概率2'] = pd.Series(prob_list)
- df_test['pred=label2'] = df_test.apply(lambda x:1 if x['预测结果2']==x['label'] else 0, axis=1)
- # df_test['pred=old_label'] = df_test.apply(lambda x:1 if x['预测结果']==x['old_label'] else 0, axis=1)
- print('准确率:%.4f, 总样本:%d'%(sum(df_test['pred=label2'])/len(df_test), len(df_test)))
- # print('准确率:%.4f, 总样本:%d'%(sum(df_test['pred=old_label'])/len(df_test), len(df_test)))
- # df_test[['doctitle', 'project_name', 'product', 'win_tenderer']] = df_test["segwords"].str.split('#split#',
- # expand=True).loc[:, :]
- # df_test.to_excel('data/新标准215类筛选数据test_predict.xlsx', index=False)
- df_test.to_excel('data/新标准215类test_predict.xlsx', index=False)
- # df_test.to_excel('E:\行业分类/标记数据.xlsx', index=False)
- # df_test.to_excel('data/新标准所有标准规则标注数据_relabel_predict_标注与模型不一致数据50592条预测结果_3为最新预测.xlsx', index=False)
- # df_test.to_excel('data/新标准所有标准规则标注数据_relabel_predict.xlsx', index=False)
- # df_test.to_excel('data/新标准215类train_predict.xlsx')
- def data_enhance(df, n=1500):
- l = []
- for lb in set(df['label']):
- df_lb = df[df['label']==lb]
- num = len(df_lb)
- if num < n:
- l.append(df_lb.sample(n=n-num, replace=True))
- df = df.append(l, ignore_index=True)
- return df.sample(frac=1)
- def train():
- import pandas as pd
- # from new_model.data_process import get_array_data, df_to_array
- df_train = pd.read_excel('data/新标准215类train.xlsx') #E:\行业分类/新标准183类train.xlsx
- df_test = pd.read_excel('data/新标准215类test.xlsx')
- df_train = data_enhance(df_train, n=50)
- # df_test = pd.read_excel('data/新标准215类train.xlsx')
- # df_train = pd.read_excel('data/新标准215类test.xlsx')
- df_train = df_train.sample(frac=1)
- df_test = df_test.sample(frac=1)
- x1, x2, x3, y, all_data, lb2id = df_to_array(df_train, seq_len)
- x1_t, x2_t, x3_t, y_t, all_data_t, lb2id_t = df_to_array(df_test, seq_len)
- # model.load_weights(filepath='model.50-1.4338-0.6360-0.7683-0.6959.h5')
- model.fit(x=[x1,x2,x3], y=y, batch_size=512, epochs=100, verbose=1,
- # callbacks=[tf.keras.callbacks.ModelCheckpoint(filepath= 'model.{epoch:02d}-{val_loss:.4f}-{val_recall:.4f}-{val_precision:.4f}-{val_f1_score:.4f}.h5',#'newmodel_rnn_50_256.h5',
- callbacks=[tf.keras.callbacks.ModelCheckpoint(filepath= 'model.{epoch:02d}-{val_loss:.4f}-{val_categorical_accuracy:.4f}.h5',#'newmodel_rnn_50_256.h5',
- monitor='val_loss',
- save_best_only=True,
- save_weights_only=False)],
- validation_data=([x1_t, x2_t, x3_t], y_t), shuffle=True)
- tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='logs/',
- histogram_freq=1,write_images=True, write_grads=True)
- # model.fit(x=[x1,x2,x3], y=y, batch_size=512, epochs=50, verbose=1,
- # callbacks=[tf.keras.callbacks.ModelCheckpoint(filepath='newmodel_rnn.h5',
- # monitor='val_loss',
- # save_best_only=True,
- # save_weights_only=False),
- # tensorboard_callback],
- # validation_split=0.2, shuffle=False)
- def save_model():
- filepath = 'model.21-0.9929-0.7576.h5'
- # filepath = 'newmodel.h5'
- with tf.Graph().as_default():
- with tf.Session() as sess:
- model = tf.keras.models.load_model(filepath, custom_objects={'precision': precision, 'recall': recall,
- 'f1_score': f1_score})
- print(model.summary())
- print(model.weights)
- tf.saved_model.simple_save(sess,
- 'industry_model/',
- inputs={
- "title": model.input[0],
- "project": model.input[1],
- "product": model.input[2],
- },
- outputs={"outputs": model.output})
- if __name__ == "__main__":
- # rnn 加batchnormal及dropout loss: 0.5266 - recall: 0.8121 - precision: 0.9099 - f1_score: 0.8582 - val_loss: 0.8428 - val_recall: 0.7788 - val_precision: 0.8683 - val_f1_score: 0.8211
- # loss: 0.6649 - recall: 0.7623 - precision: 0.8891 - f1_score: 0.8208 - val_loss: 0.9595 - val_recall: 0.7373 - val_precision: 0.8468 - val_f1_score: 0.7878
- model = rnn_model() # loss: 0.5595 - categorical_crossentropy: 0.5595 - recall: 0.7968 - precision: 0.9241 - f1_score: 0.8557 - val_loss: 0.9927 - val_categorical_crossentropy: 0.9927 - val_recall: 0.7195 - val_precision: 0.8498 - val_f1_score: 0.7791
- # model = cnn_model() # epoch 15 - loss: 1.1907 - recall: 0.6311 - precision: 0.8064 - f1_score: 0.7080 - val_loss: 1.4360 - val_recall: 0.6322 - val_precision: 0.7886 - val_f1_score: 0.7017
- # from new_model.data_process import get_array_data, df_to_array
- # x1, x2, x3, y, all_data, lb2id = get_array_data()
- train()
- # predict()
- # save_model()
|