-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcartpole_q.py
107 lines (78 loc) · 3.51 KB
/
cartpole_q.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import gym
import numpy as np
import matplotlib.pyplot as plt
import pickle
def run(is_training=True, render=False):
env = gym.make('CartPole-v1', render_mode='human' if render else None)
# Divide position, velocity, pole angle, and pole angular velocity into segments
pos_space = np.linspace(-2.4, 2.4, 10)
vel_space = np.linspace(-4, 4, 10)
ang_space = np.linspace(-.2095, .2095, 10)
ang_vel_space = np.linspace(-4, 4, 10)
if(is_training):
q = np.zeros((len(pos_space)+1, len(vel_space)+1, len(ang_space)+1, len(ang_vel_space)+1, env.action_space.n)) # init a 11x11x11x11x2 array
else:
f = open('cartpole.pkl', 'rb')
q = pickle.load(f)
f.close()
learning_rate_a = 0.2 # alpha or learning rate
discount_factor_g = 0.99 # gamma or discount factor.
epsilon = 1 # 1 = 100% random actions
epsilon_decay_rate = 0.00001 # epsilon decay rate
rng = np.random.default_rng() # random number generator
rewards_per_episode = []
i = 0
# for i in range(episodes):
while(True):
state = env.reset()[0] # Starting position, starting velocity always 0
state_p = np.digitize(state[0], pos_space)
state_v = np.digitize(state[1], vel_space)
state_a = np.digitize(state[2], ang_space)
state_av = np.digitize(state[3], ang_vel_space)
terminated = False # True when reached goal
rewards=0
while(not terminated and rewards < 10000):
if is_training and rng.random() < epsilon:
# Choose random action (0=go left, 1=go right)
action = env.action_space.sample()
else:
action = np.argmax(q[state_p, state_v, state_a, state_av, :])
new_state,reward,terminated,_,_ = env.step(action)
new_state_p = np.digitize(new_state[0], pos_space)
new_state_v = np.digitize(new_state[1], vel_space)
new_state_a = np.digitize(new_state[2], ang_space)
new_state_av= np.digitize(new_state[3], ang_vel_space)
if is_training:
q[state_p, state_v, state_a, state_av, action] = q[state_p, state_v, state_a, state_av, action] + learning_rate_a * (
reward + discount_factor_g*np.max(q[new_state_p, new_state_v, new_state_a, new_state_av,:]) - q[state_p, state_v, state_a, state_av, action]
)
state = new_state
state_p = new_state_p
state_v = new_state_v
state_a = new_state_a
state_av= new_state_av
rewards+=reward
if not is_training and rewards%100==0:
print(f'Episode: {i} Rewards: {rewards}')
rewards_per_episode.append(rewards)
mean_rewards = np.mean(rewards_per_episode[len(rewards_per_episode)-100:])
if is_training and i%100==0:
print(f'Episode: {i} {rewards} Epsilon: {epsilon:0.2f} Mean Rewards {mean_rewards:0.1f}')
if mean_rewards>1000:
break
epsilon = max(epsilon - epsilon_decay_rate, 0)
i+=1
env.close()
# Save Q table to file
if is_training:
f = open('cartpole.pkl','wb')
pickle.dump(q, f)
f.close()
mean_rewards = []
for t in range(i):
mean_rewards.append(np.mean(rewards_per_episode[max(0, t-100):(t+1)]))
plt.plot(mean_rewards)
plt.savefig(f'cartpole.png')
if __name__ == '__main__':
# run(is_training=True, render=False) #TRAIN
run(is_training=False, render=True) #TEST