-
Notifications
You must be signed in to change notification settings - Fork 30
/
actor_critic.py
43 lines (38 loc) · 1.97 KB
/
actor_critic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import tensorflow as tf
from util import store_args, nn
class ActorCritic:
@store_args
def __init__(self, inputs_tf, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers,
**kwargs):
"""The actor-critic network and related training code.
Args:
inputs_tf (dict of tensors): all necessary inputs for the network: the
observation (o), the goal (g), and the action (u)
dimo (int): the dimension of the observations
dimg (int): the dimension of the goals
dimu (int): the dimension of the actions
max_u (float): the maximum magnitude of actions; action outputs will be scaled
accordingly
o_stats (baselines.her.Normalizer): normalizer for observations
g_stats (baselines.her.Normalizer): normalizer for goals
hidden (int): number of hidden units that should be used in hidden layers
layers (int): number of hidden layers
"""
self.o_tf = inputs_tf['o']
self.g_tf = inputs_tf['g']
self.u_tf = inputs_tf['u']
# Prepare inputs for actor and critic.
o = self.o_stats.normalize(self.o_tf)
g = self.g_stats.normalize(self.g_tf)
input_pi = tf.concat(axis=1, values=[o, g]) # for actor
# Networks.
with tf.variable_scope('pi'):
self.pi_tf = self.max_u * tf.tanh(nn(input_pi, [self.hidden] * self.layers + [self.dimu]))
with tf.variable_scope('Q'):
# for policy training
input_Q = tf.concat(axis=1, values=[o, g, self.pi_tf / self.max_u]) #actions from the policy
self.Q_pi_tf = nn(input_Q, [self.hidden] * self.layers + [1])
# for critic training
input_Q = tf.concat(axis=1, values=[o, g, self.u_tf / self.max_u]) #actions from the buffer
self._input_Q = input_Q # exposed for tests
self.Q_tf = nn(input_Q, [self.hidden] * self.layers + [1], reuse=True)