-
Notifications
You must be signed in to change notification settings - Fork 18
Expand file tree
/
Copy pathutils.py
More file actions
166 lines (145 loc) · 4.77 KB
/
utils.py
File metadata and controls
166 lines (145 loc) · 4.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import numpy as np
import torch
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s')
logger = logging.getLogger('MAIN')
logger.setLevel(logging.DEBUG)
class Normalizer:
def __init__(self, filter_mean=True):
self.m = 0
self.v = 0
self.n = 0.
self.filter_mean = filter_mean
def state_dict(self):
return {'m': self.m,
'v': self.v,
'n': self.n}
def load_state_dict(self, saved):
self.m = saved['m']
self.v = saved['v']
self.n = saved['n']
def __call__(self, o):
self.m = self.m * (self.n / (self.n + 1)) + o * 1 / (1 + self.n)
self.v = self.v * (self.n / (self.n + 1)) + (o - self.m) ** 2 * 1 / (1 + self.n)
self.std = (self.v + 1e-6) ** .5 # std
self.n += 1
if self.filter_mean:
o_ = (o - self.m) / self.std
else:
o_ = o / self.std
return o_
class StaticNormalizer:
def __init__(self, o_size):
self.offline_stats = SharedStats(o_size)
self.online_stats = SharedStats(o_size)
def __call__(self, o_):
if np.isscalar(o_):
o = torch.FloatTensor([o_])
else:
o = torch.FloatTensor(o_)
self.online_stats.feed(o)
if self.offline_stats.n[0] == 0:
return o_
std = (self.offline_stats.v + 1e-6) ** .5
o = (o - self.offline_stats.m) / std
o = o.numpy()
if np.isscalar(o_):
o = np.asscalar(o)
else:
o = o.reshape(o_.shape)
return o
class SharedStats:
def __init__(self, o_size):
self.m = torch.zeros(o_size)
self.v = torch.zeros(o_size)
self.n = torch.zeros(1)
self.m.share_memory_()
self.v.share_memory_()
self.n.share_memory_()
def feed(self, o):
n = self.n[0]
new_m = self.m * (n / (n + 1)) + o / (n + 1)
self.v.copy_(self.v * (n / (n + 1)) + (o - self.m) * (o - new_m) / (n + 1))
self.m.copy_(new_m)
self.n.add_(1)
def zero(self):
self.m.zero_()
self.v.zero_()
self.n.zero_()
def load(self, stats):
self.m.copy_(stats.m)
self.v.copy_(stats.v)
self.n.copy_(stats.n)
def merge(self, B):
A = self
n_A = self.n[0]
n_B = B.n[0]
n = n_A + n_B
delta = B.m - A.m
m = A.m + delta * n_B / n
v = A.v * n_A + B.v * n_B + delta * delta * n_A * n_B / n
v /= n
self.m.copy_(m)
self.v.copy_(v)
self.n.add_(B.n)
def state_dict(self):
return {'m': self.m.numpy(),
'v': self.v.numpy(),
'n': self.n.numpy()}
def load_state_dict(self, saved):
self.m = torch.FloatTensor(saved['m'])
self.v = torch.FloatTensor(saved['v'])
self.n = torch.FloatTensor(saved['n'])
class Evaluator:
def __init__(self, config, state_normalizer):
self.model = config.model_fn()
self.repetitions = config.repetitions
self.env = config.env_fn()
self.state_normalizer = state_normalizer
self.config = config
def eval(self, solution):
self.model.set_weight(solution)
rewards = []
steps = []
for i in range(self.repetitions):
reward, step = self.single_run()
rewards.append(reward)
steps.append(step)
return -np.mean(rewards), np.sum(steps)
def single_run(self):
state = self.env.reset()
total_reward = 0
steps = 0
while True:
state = self.state_normalizer(state)
action = self.model(np.stack([state])).data.numpy().flatten()
action += np.random.randn(len(action)) * self.config.action_noise_std
action = self.config.action_clip(action)
state, reward, done, info = self.env.step(action)
steps += 1
total_reward += reward
if done:
return total_reward, steps
def fitness_shift(x):
x = np.asarray(x).flatten()
ranks = np.empty(len(x))
ranks[x.argsort()] = np.arange(len(x))
ranks /= (len(x) - 1)
ranks -= .5
return ranks
class Adam:
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-08):
self.beta1 = beta1
self.beta2 = beta2
self.beta1_t = self.beta2_t = 1
self.epsilon = epsilon
self.m = 0
self.v = 0
def update(self, g):
self.beta1_t *= self.beta1
self.beta2_t *= self.beta2
self.m = self.beta1 * self.m + (1 - self.beta1) * g
self.v = self.beta2 * self.v + (1 - self.beta2) * np.power(g, 2)
m_ = self.m / (1 - self.beta1_t)
v_ = self.v / (1 - self.beta2_t)
return m_ / (np.sqrt(v_) + self.epsilon)