DQN.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. import numpy as np
  2. import pandas as pd
  3. import tensorflow as tf
  4. from entity import *
  5. # np.random.seed(1)
  6. # tf.set_random_seed(1)
  7. class DQN():
  8. def __init__(self,
  9. n_actions,
  10. n_features,
  11. learning_rate=0.001,
  12. reward_decay=0.9,
  13. e_greedy=1.0,
  14. replace_target_iter=300,
  15. memory_size=500,
  16. batch_size=64,
  17. e_greedy_increment=None,
  18. output_graph=False
  19. ):
  20. self.n_actions = n_actions
  21. self.n_features = n_features
  22. self.lr = learning_rate
  23. self.gamma = reward_decay
  24. self.epsilon_max = e_greedy
  25. self.replace_target_iter = replace_target_iter
  26. self.memory_size = memory_size
  27. self.batch_size = batch_size
  28. self.epsilon_increment = e_greedy_increment
  29. self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
  30. # total learning step
  31. self.learn_step_counter = 0
  32. # initialize zero memory [s, a, r, time, s_]
  33. self.memory = np.zeros((self.memory_size, n_features * 2 + 3))
  34. # consist of [target_net, evaluate_net]
  35. self._build_net()
  36. t_params = tf.get_collection('target_net_params')
  37. e_params = tf.get_collection('eval_net_params')
  38. self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
  39. self.sess = tf.Session()
  40. if output_graph:
  41. # $ tensorboard --logdir=logs
  42. # tf.train.SummaryWriter soon be deprecated, use following
  43. tf.summary.FileWriter("logs/", self.sess.graph)
  44. self.saver = tf.train.Saver(max_to_keep=10)
  45. self.sess.run(tf.global_variables_initializer())
  46. self.cost_his = []
  47. def _build_net(self):
  48. # ------------------ build evaluate_net ------------------
  49. self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
  50. self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
  51. with tf.variable_scope('eval_net'):
  52. # c_names(collections_names) are the collections to store variables
  53. c_names, n_l1, w_initializer, b_initializer = \
  54. ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 24, \
  55. tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
  56. # first layer. collections is used later when assign to target net
  57. with tf.variable_scope('l1'):
  58. w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
  59. b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
  60. l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
  61. # second layer. collections is used later when assign to target net
  62. with tf.variable_scope('l2'):
  63. w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
  64. b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
  65. self.q_eval = tf.matmul(l1, w2) + b2
  66. with tf.variable_scope('loss'):
  67. self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
  68. with tf.variable_scope('train'):
  69. self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
  70. # ------------------ build target_net ------------------
  71. self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
  72. with tf.variable_scope('target_net'):
  73. # c_names(collections_names) are the collections to store variables
  74. c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
  75. # first layer. collections is used later when assign to target net
  76. with tf.variable_scope('l1'):
  77. w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
  78. b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
  79. l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
  80. # second layer. collections is used later when assign to target net
  81. with tf.variable_scope('l2'):
  82. w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
  83. b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
  84. self.q_next = tf.matmul(l1, w2) + b2
  85. def store_transition(self, s, a, r,travel_time, s_):
  86. if not hasattr(self, 'memory_counter'):
  87. self.memory_counter = 0
  88. transition = np.hstack((s, [a, r,travel_time], s_))
  89. # replace the old memory with new memory
  90. index = self.memory_counter % self.memory_size
  91. self.memory[index, :] = transition
  92. self.memory_counter += 1
  93. def choose_action(self, observation):
  94. # to have batch dimension when feed into tf placeholder
  95. observation = observation[np.newaxis, :]
  96. if np.random.uniform() < self.epsilon:
  97. # forward feed the observation and get q value for every actions
  98. actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
  99. action = np.argmax(actions_value)
  100. else:
  101. action = np.random.randint(0, self.n_actions)
  102. return action
  103. def learn(self):
  104. # check to replace target parameters
  105. if self.learn_step_counter % self.replace_target_iter == 0:
  106. self.sess.run(self.replace_target_op)
  107. print('target_params_replaced\n')
  108. # sample batch memory from all memory
  109. if self.memory_counter > self.memory_size:
  110. sample_index = np.random.choice(self.memory_size, size=self.batch_size)
  111. else:
  112. sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
  113. batch_memory = self.memory[sample_index, :]
  114. q_next, q_eval = self.sess.run(
  115. [self.q_next, self.q_eval],
  116. feed_dict={
  117. self.s_: batch_memory[:, -self.n_features:], # fixed params
  118. self.s: batch_memory[:, :self.n_features], # newest params
  119. })
  120. # change q_target w.r.t q_eval's action
  121. q_target = q_eval.copy()
  122. batch_index = np.arange(self.batch_size, dtype=np.int32)
  123. eval_act_index = batch_memory[:, self.n_features].astype(int)
  124. reward = batch_memory[:, self.n_features + 1]
  125. # 间隔时间段
  126. travel_time = batch_memory[:, self.n_features + 2]
  127. gamma = np.array([self.gamma ** t for t in travel_time])
  128. # gamma = gamma.reshape((self.batch_size,1))
  129. # q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
  130. # q_target[batch_index, eval_act_index] = reward + gamma * np.max(q_next, axis=1)
  131. q_target[batch_index, eval_act_index] = reward + gamma * q_next[batch_index, eval_act_index]
  132. # train eval network
  133. _, self.cost = self.sess.run([self._train_op, self.loss],
  134. feed_dict={self.s: batch_memory[:, :self.n_features],
  135. self.q_target: q_target})
  136. if self.learn_step_counter > 4000 and self.learn_step_counter % 100 == 0:
  137. self.saver.save(self.sess,"../model/dqn/model.ckpt")
  138. self.cost_his.append(self.cost)
  139. # increasing epsilon
  140. self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
  141. self.learn_step_counter += 1
  142. def plot_cost(self):
  143. import matplotlib.pyplot as plt
  144. print("min_loss:",min(self.cost_his))
  145. plt.plot(np.arange(len(self.cost_his)), self.cost_his)
  146. plt.ylabel('Cost')
  147. plt.xlabel('training steps')
  148. plt.show()
  149. def predict(self,s,s_,r,detal_time,model_path=None):
  150. if model_path:
  151. self.saver.restore(self.sess, model_path)
  152. q_now = self.sess.run(self.q_eval,{self.s:s})
  153. q_next = self.sess.run(self.q_eval,{self.s:s_})
  154. a_pai = self.gamma ** detal_time * q_next - q_now + r / detal_time * sum([self.gamma ** i for i in range(detal_time)])
  155. return q_now,q_next,a_pai
  156. def test(self):
  157. test_num = 10
  158. test_index = np.random.choice(self.memory_size, size=test_num)
  159. test_memory = self.memory[test_index, :]
  160. q_now = self.sess.run(self.q_eval, {self.s: test_memory[:, :self.n_features]})
  161. q_next = self.sess.run(self.q_eval, {self.s: test_memory[:, -self.n_features:]})
  162. reward = test_memory[:, self.n_features + 1]
  163. # 间隔时间段
  164. travel_time = test_memory[:, self.n_features + 2]
  165. gamma = np.array([self.gamma ** t for t in travel_time])
  166. batch_index = np.arange(test_num, dtype=np.int32)
  167. # a_pai = gamma * q_next[:, 1] - q_now[:, 1] + reward
  168. a_pai = reward + gamma * q_next[:, 1]
  169. return q_now,q_next,a_pai