znj 4 rokov pred
rodič
commit
69c49fd34e

+ 32 - 0
didi_RL/RL_learning.py

@@ -0,0 +1,32 @@
+import numpy as np
+import pandas as pd
+from entity import *
+
+
+class RL():
+    def __init__(self,
+                 reward_decay = 0.9,
+                 time_step = 144):
+        self.time_step = time_step
+        self.gamam = reward_decay
+        self.q_label = self._build_q_label(time_step)
+
+    def _build_q_label(self,time_step):
+        q_label = [dict() for _ in range(time_step)]
+        return q_label
+
+    def learn(self,s,s_,t,t_,r,detal_t):
+
+        if not self.q_label[t-1].get(s):
+            self.q_label[t-1][s] = [ 0, 0]
+        n = self.q_label[t-1][s][0] + 1
+        self.q_label[t-1][s][0] = n
+        try:
+            q_next = self.q_label[t_-1][s_][1]
+        except:
+            q_next = 0
+        self.q_label[t-1][s][1] += 1/n * (self.gamam ** detal_t * q_next + r - self.q_label[t-1][s][1])
+
+    def save_label(self):
+        save(self.q_label,"RL_q_label.pkl")
+

BIN
didi_RL/RL_q_label.pkl


+ 45 - 0
didi_RL/train.py

@@ -0,0 +1,45 @@
+import numpy as np
+import pandas as pd
+from entity import *
+from didi_RL.RL_learning import RL
+
+n_actions = 2
+n_features = 3
+train_step = 10
+
+max_x = 50
+max_y = 50
+max_time = 144
+gamma = 0.9
+
+def train():
+    data = load('../train_data/train_data02.pkl')
+    print("数据量:",sum(len(i) for i in data))
+    step = 0
+    learn_num = 0
+    rl = RL()
+    for i in range(len(data)-1,-1,-1):
+        for match in data[i]:
+            travel_time = match.order.travel_time
+            rl.learn(s = (match.driver.x,match.driver.y),
+                     s_ = (match.order.to_x,match.order.to_y),
+                     t = match.order.order_time,
+                     t_ = match.order.arrive_time,
+                     r = (match.money / travel_time) * sum([gamma ** i for i in range(travel_time)]),
+                     detal_t = travel_time
+                     )
+    print("ok")
+    rl.save_label()
+
+def predict(s,s_,t,t_,r,detal_t):
+    q_label = load('RL_q_label.pkl')
+    q_now = q_label[t-1][s][1]
+    q_next = q_label[t_][s_][1]
+    V = gamma ** detal_t * q_next - q_now + r
+    return V
+
+
+if __name__ == '__main__':
+    # train()
+    a = load('RL_q_label.pkl')
+    print(a[0])

+ 18 - 3
entity.py

@@ -20,19 +20,34 @@ class Order():
         # time range: 1 ~ 144
         self.order_time = order_time
 
-        self.travel_distance = (abs(self.to_x - self.x) + abs(self.to_y - self.y) ) * random.uniform(0.96,1.04)
-        self.order_money = (12 + (max(self.travel_distance,8)-8) * 1.8 ) * random.uniform(0.92,1.08)
+        self.travel_distance = (abs(self.to_x - self.x) + abs(self.to_y - self.y) ) * random.uniform(0.93,1.08)
         self.travel_time = int(self.travel_distance / 5 + random.randint(0,1))
+        if self.travel_time==0:
+            self.travel_time = 1
         self.arrive_time = self.order_time + self.travel_time
         self.arrive_time = self.arrive_time if self.arrive_time<=144 else self.arrive_time-144
+        if 0<order_time<34 or 84<order_time<110 or order_time>137:
+            self.order_money = (15 + (max(self.travel_distance,8)-8) * 2.3 ) * random.uniform(0.96,1.15)
+        else:
+            self.order_money = (12 + (max(self.travel_distance,8)-8) * 1.8 ) * random.uniform(0.90,1.10)
+
 
 # 配对
+cancel_prob = 0.20
 class Match():
-    def __init__(self,order,driver):
+    def __init__(self,order,driver,is_cancel = False):
         self.order = order
         self.driver = driver
         self.distance = (abs(order.x - driver.x) + abs(order.y - driver.y) ) * random.uniform(0.96,1.03)
+        if random.random() < cancel_prob:
+            is_cancel = True
+            self.order.order_money = 0
+            self.order.to_x = self.driver.x
+            self.order.to_y = self.driver.y
+            self.order.arrive_time = self.order.order_time + 1 if self.order.order_time<144 else 1
+            self.order.travel_time = 1
         self.money = order.order_money
+        self.is_cancel = is_cancel
 
 
 

+ 2 - 0
model/dqn/checkpoint

@@ -0,0 +1,2 @@
+model_checkpoint_path: "model.ckpt"
+all_model_checkpoint_paths: "model.ckpt"

BIN
model/dqn/model.ckpt.data-00000-of-00001


BIN
model/dqn/model.ckpt.data-00000-of-00001.tempstate55707850274336993


BIN
model/dqn/model.ckpt.index


BIN
model/dqn/model.ckpt.meta


BIN
model/dqn/model.ckpt.meta.tmp5cfc6fbc5a0f45e7b56f64c9d9ac58ec


+ 36 - 9
myDQN/DQN.py

@@ -12,9 +12,9 @@ class DQN():
             n_features,
             learning_rate=0.001,
             reward_decay=0.9,
-            e_greedy=1,
+            e_greedy=1.0,
             replace_target_iter=300,
-            memory_size=600,
+            memory_size=500,
             batch_size=64,
             e_greedy_increment=None,
             output_graph=False
@@ -33,7 +33,7 @@ class DQN():
         # total learning step
         self.learn_step_counter = 0
 
-        # initialize zero memory [s, a, r, s_]
+        # initialize zero memory [s, a, r, time, s_]
         self.memory = np.zeros((self.memory_size, n_features * 2 + 3))
 
         # consist of [target_net, evaluate_net]
@@ -48,10 +48,11 @@ class DQN():
             # $ tensorboard --logdir=logs
             # tf.train.SummaryWriter soon be deprecated, use following
             tf.summary.FileWriter("logs/", self.sess.graph)
-
+        self.saver = tf.train.Saver(max_to_keep=10)
         self.sess.run(tf.global_variables_initializer())
         self.cost_his = []
 
+
     def _build_net(self):
         # ------------------ build evaluate_net ------------------
         self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s')  # input
@@ -60,7 +61,7 @@ class DQN():
         with tf.variable_scope('eval_net'):
             # c_names(collections_names) are the collections to store variables
             c_names, n_l1, w_initializer, b_initializer = \
-                ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
+                ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 24, \
                 tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)  # config of layers
 
             # first layer. collections is used later when assign to target net
@@ -154,13 +155,15 @@ class DQN():
         # gamma = gamma.reshape((self.batch_size,1))
 
         # q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
-        q_target[batch_index, eval_act_index] = reward + gamma * np.max(q_next, axis=1)
+        # q_target[batch_index, eval_act_index] = reward + gamma * np.max(q_next, axis=1)
+        q_target[batch_index, eval_act_index] = reward + gamma * q_next[batch_index, eval_act_index]
 
         # train eval network
         _, self.cost = self.sess.run([self._train_op, self.loss],
                                      feed_dict={self.s: batch_memory[:, :self.n_features],
                                                 self.q_target: q_target})
-
+        if self.learn_step_counter > 4000 and self.learn_step_counter % 100 == 0:
+            self.saver.save(self.sess,"../model/dqn/model.ckpt")
         self.cost_his.append(self.cost)
 
         # increasing epsilon
@@ -175,6 +178,30 @@ class DQN():
         plt.xlabel('training steps')
         plt.show()
 
-    def predict(self,model_path=None):
+    def predict(self,s,s_,r,detal_time,model_path=None):
         if model_path:
-            model = load(model_path)
+            self.saver.restore(self.sess, model_path)
+
+        q_now = self.sess.run(self.q_eval,{self.s:s})
+        q_next = self.sess.run(self.q_eval,{self.s:s_})
+        a_pai = self.gamma ** detal_time * q_next - q_now + r / detal_time * sum([self.gamma ** i for i in range(detal_time)])
+        return q_now,q_next,a_pai
+
+    def test(self):
+        test_num = 10
+        test_index = np.random.choice(self.memory_size, size=test_num)
+        test_memory = self.memory[test_index, :]
+
+        q_now = self.sess.run(self.q_eval, {self.s: test_memory[:, :self.n_features]})
+
+        q_next = self.sess.run(self.q_eval, {self.s:  test_memory[:, -self.n_features:]})
+
+        reward = test_memory[:, self.n_features + 1]
+        # 间隔时间段
+        travel_time = test_memory[:, self.n_features + 2]
+        gamma = np.array([self.gamma ** t for t in travel_time])
+        batch_index = np.arange(test_num, dtype=np.int32)
+        # a_pai = gamma * q_next[:, 1] - q_now[:, 1] + reward
+        a_pai = reward + gamma * q_next[:, 1]
+        return q_now,q_next,a_pai
+

+ 24 - 7
myDQN/train.py

@@ -6,15 +6,18 @@ from entity import *
 
 n_actions = 2
 n_features = 3
+train_step = 10
 
 max_x = 50
 max_y = 50
 max_time = 144
+gamma = 0.9
 
 def train():
     data = load('../train_data/train_data.pkl')
     print("数据量:",sum(len(i) for i in data))
     step = 0
+    learn_num = 0
     RL = DQN(n_actions,n_features)
     for d in data:
         for match in d:
@@ -24,18 +27,32 @@ def train():
             _s_x = match.order.to_x / max_x
             _s_y = match.order.to_y / max_y
             _s_time = match.order.arrive_time / max_time
+
             travel_time = match.order.travel_time
             reward = match.money
-            RL.store_transition((s_x,s_y,s_time),0,reward,travel_time,(_s_x,_s_y,_s_time))
-
-            if (step > 200) and (step % 10 == 0):
+            # 滴滴论文 reward
+            reward = (reward/travel_time) * sum([gamma ** i for i in range(travel_time)])
+            if match.is_cancel:
+                action = 0
+            else:
+                action = 1
+            RL.store_transition([s_x,s_y,s_time],action,reward,travel_time,[_s_x,_s_y,_s_time])
+
+            if (step > 200) and (step % train_step == 0):
                 RL.learn()
+                learn_num += 1
             step += 1
-
+        if learn_num>20000:
+            break
     RL.plot_cost()
-
-
-
+    print(RL.test())
+    # test_data = np.array([[1/50,2/50,10/144],
+    #                       [25/50,46/50,141/144],
+    #                       [45/50,2/50,65/144]])
+    # print(RL.predict([[16/50,30/50,120/144]],[[25/50,46/50,141/144]],200,21))
+    # for test in test_data:
+    #     q = RL.predict(test)
+    #     print(q)
 
 
 

BIN
train_data/train_data.pkl


+ 6 - 2
train_data/train_data.py

@@ -5,7 +5,7 @@ import numpy as np
 
 def get_data():
     all_data = []
-    for id in range(20000):
+    for id in range(200000):
         day_data = []
         for i in range(random.randint(5,25)):
             driver = Driver(id,random.randint(0,49),random.randint(0,49))
@@ -17,8 +17,12 @@ def get_data():
             day_data.append(match)
 
         all_data.append(day_data)
-    save(all_data,"train_data.pkl")
+    save(all_data,"train_data_02.pkl")
 
 
 if __name__ == '__main__':
     get_data()
+ #    q = [[6.3724101e-02 ,1.0513991e+02],
+ # [1.9977689e-03 ,9.5939758e+01],
+ # [1.5849888e-02, 1.0017528e+02]]
+ #    print(np.max(q, axis=1))

+ 18 - 0
train_data/train_data02.py

@@ -0,0 +1,18 @@
+from entity import *
+import random
+import numpy as np
+
+def get_data():
+    data = load("train_data_02.pkl")
+    new_data = [[] for _ in range(144)]
+    for d in data:
+        for match in d:
+            if match.order.arrive_time >= match.order.order_time or match.order.order_time >= 142:
+                new_data[match.order.order_time-1].append(match)
+
+    for i in new_data:
+        print(len(i))
+    save(new_data,"train_data02.pkl")
+
+if __name__ == '__main__':
+    get_data()