code stringlengths 17 6.64M |
|---|
class TestPicking(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='picking')
self.env = CausalWorld(task=self.task, enable_visualization=False, skip_frame=1, action_mode='end_effector_positions', normalize_actions=False, normalize_observations=False)
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
self.env.set_action_mode('joint_positions')
observations_1 = []
rewards_1 = []
horizon = 2000
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((observations_1[i] - observations_2[i]))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def lift_last_finger_first(self, current_obs):
desired_action = current_obs[19:(19 + 9)]
desired_action[6:] = [(- 0), (- 0.08), 0.4]
for _ in range(250):
(obs, reward, done, info) = self.env.step(desired_action)
return desired_action
def move_first_two_fingers(self, current_obs):
desired_action = current_obs[19:(19 + 9)]
desired_action[:6] = [0.0, 0.15313708, 0.05586292, 0.13262061, (- 0.07656854), 0.05586292]
for _ in range(250):
(obs, reward, done, info) = self.env.step(desired_action)
return obs
def grip_block(self):
grip_locations = get_suggested_grip_locations(self.env._task._stage.get_object('tool_block').get_size(), self.env._task._stage.get_object('tool_block').world_to_cube_r_matrix())
desired_action = np.zeros(9)
desired_action[6:] = [(- 0), (- 0.08), 0.4]
desired_action[:3] = grip_locations[0]
desired_action[3:6] = grip_locations[1]
for _ in range(250):
(obs, reward, done, info) = self.env.step(desired_action)
return desired_action
def lift_block(self, desired_grip):
desired_action = desired_grip
for _ in range(40):
desired_action[2] += 0.005
desired_action[5] += 0.005
for _ in range(10):
(obs, reward, done, info) = self.env.step(desired_action)
return obs
def test_02_mass(self):
self.env.set_action_mode('end_effector_positions')
intervention = {'tool_block': {'mass': 0.02}}
self.env.do_intervention(interventions_dict=intervention)
for _ in range(1):
obs = self.env.reset()
obs = self.move_first_two_fingers(obs)
self.lift_last_finger_first(obs)
desired_grip = self.grip_block()
self.assertEqual(self.env.get_robot().get_tip_contact_states(), [1, 1, 0], 'contact states are not closed')
final_obs = self.lift_block(desired_grip)
self.assertGreater(final_obs[(- 22)], 0.2, "the block didn't get lifted")
def test_08_mass(self):
self.env.set_action_mode('end_effector_positions')
intervention = {'tool_block': {'mass': 0.08}}
self.env.do_intervention(interventions_dict=intervention)
for _ in range(1):
obs = self.env.reset()
obs = self.move_first_two_fingers(obs)
self.lift_last_finger_first(obs)
desired_grip = self.grip_block()
self.assertEqual(self.env.get_robot().get_tip_contact_states(), [1, 1, 0], 'contact states are not closed')
final_obs = self.lift_block(desired_grip)
self.assertGreater(final_obs[(- 22)], 0.2, "the block didn't get lifted")
def test_1_mass(self):
self.env.set_action_mode('end_effector_positions')
intervention = {'tool_block': {'mass': 0.1}}
self.env.do_intervention(interventions_dict=intervention)
for _ in range(1):
obs = self.env.reset()
obs = self.move_first_two_fingers(obs)
self.lift_last_finger_first(obs)
desired_grip = self.grip_block()
self.assertEqual(self.env.get_robot().get_tip_contact_states(), [1, 1, 0], 'contact states are not closed')
final_obs = self.lift_block(desired_grip)
self.assertGreater(final_obs[(- 22)], 0.2, "the block didn't get lifted")
def test_determinism_w_interventions(self):
self.env.set_action_mode('joint_positions')
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
self.env.set_action_mode('joint_positions')
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
success_signal = self.env.do_intervention({'tool_block': {'cylindrical_position': [0.1, (np.pi / 2), 0.0325]}})
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((observations_1[i] - observations_2[i]))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_goal_intervention(self):
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=False, normalize_observations=False)
for _ in range(10):
invalid_interventions_before = env.get_tracker().invalid_intervention_steps
new_goal = env.sample_new_goal()
env.set_starting_state(interventions_dict=new_goal)
invalid_interventions_after = env.get_tracker().invalid_intervention_steps
for _ in range(2):
for _ in range(100):
(obs, reward, done, info) = env.step(env.action_space.low)
if (invalid_interventions_before == invalid_interventions_after):
assert np.array_equal(cyl2cart(new_goal['goal_block']['cylindrical_position']), obs[(- 7):(- 4)])
env.reset()
env.close()
|
class TestPushing(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='pushing')
self.env = CausalWorld(task=self.task, enable_visualization=False)
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_1(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print(i)
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_2(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
intervention = {'tool_block': {'cylindrical_position': [0, 0.3, 0.0325]}}
self.env.set_starting_state(interventions_dict=intervention)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print(i)
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
success_signal = self.env.do_intervention({'tool_block': {'cylindrical_position': [0.1, (np.pi / 2), 0.0325]}})
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_goal_intervention(self):
task = generate_task(task_generator_id='pushing')
env = CausalWorld(task=task, enable_visualization=False, normalize_observations=False)
for _ in range(10):
invalid_interventions_before = env.get_tracker().invalid_intervention_steps
new_goal = env.sample_new_goal()
env.set_starting_state(interventions_dict=new_goal)
invalid_interventions_after = env.get_tracker().invalid_intervention_steps
for _ in range(2):
for _ in range(100):
(obs, reward, done, info) = env.step(env.action_space.low)
if (invalid_interventions_before == invalid_interventions_after):
assert np.array_equal(cyl2cart(new_goal['goal_block']['cylindrical_position']), obs[(- 7):(- 4)])
env.reset()
env.close()
|
class TestReaching(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='reaching')
self.env = CausalWorld(task=self.task, enable_visualization=False, action_mode='joint_positions', normalize_observations=False, normalize_actions=False)
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
new_goal = self.env.sample_new_goal()
success_signal = self.env.do_intervention(new_goal)
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
|
class TestStackedBlocks(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='stacked_blocks')
self.env = CausalWorld(task=self.task, enable_visualization=False)
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
success_signal = self.env.do_intervention({'tool_level_0_num_1': {'cylindrical_position': [0, 0, 0.2]}})
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
|
class TestStacking2(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='stacking2')
self.env = CausalWorld(task=self.task, enable_visualization=False)
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print(i)
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_1(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((observations_1[i] - observations_2[i]))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_2(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
intervention = {'joint_positions': [0, 0, 0, 0, 0, 0, 0, 0, 0]}
self.env.set_starting_state(interventions_dict=intervention)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
success_signal = self.env.do_intervention({'tool_block_1': {'cylindrical_position': [0, 0, 0.2]}})
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
|
class TestTowers(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='towers')
self.env = CausalWorld(task=self.task, enable_visualization=False)
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print(i)
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_1(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((observations_1[i] - observations_2[i]))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions_2(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
intervention = {'joint_positions': [0, 0, 0, 0, 0, 0, 0, 0, 0]}
self.env.set_starting_state(interventions_dict=intervention)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
success_signal = self.env.do_intervention({'tool_level_0_col_0_row_0': {'cylindrical_position': [0, 0, 0.2]}})
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
|
def apply_delta_action():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=True, action_mode='joint_positions', normalize_actions=True, normalize_observations=True, skip_frame=1)
env = DeltaActionEnvWrapper(env)
for _ in range(50):
obs = env.reset()
for _ in range(1000):
desired_action = np.zeros([9])
(obs, reward, done, info) = env.step(desired_action)
env.close()
|
def smooth_action():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=True, action_mode='joint_positions', normalize_actions=True, normalize_observations=True, skip_frame=1)
env = MovingAverageActionEnvWrapper(env)
for _ in range(50):
obs = env.reset()
for _ in range(1000):
desired_action = np.zeros([9])
(obs, reward, done, info) = env.step(desired_action)
env.close()
|
def example():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True)
env = ObjectSelectorWrapper(env)
for _ in range(50):
obs = env.reset()
for i in range(70):
(obs, reward, done, info) = env.step([0, 1, 0])
for i in range(20):
(obs, reward, done, info) = env.step([0, 0, 1])
for i in range(50):
(obs, reward, done, info) = env.step([0, 5, 0])
for i in range(20):
(obs, reward, done, info) = env.step([0, 0, 1])
for i in range(50):
(obs, reward, done, info) = env.step([0, 2, 0])
env.close()
|
def example():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task, skip_frame=10, enable_visualization=True)
env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=[(0, 1000000000, 1, 0)])
for reset_idx in range(30):
obs = env.reset()
for time in range(100):
desired_action = env.action_space.sample()
(obs, reward, done, info) = env.step(action=desired_action)
env.close()
|
def example():
task = generate_task(task_generator_id='pick_and_place')
env = CausalWorld(task, skip_frame=10, enable_visualization=True)
env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=[(0, 1000000000, 1, 0)])
for reset_idx in range(30):
obs = env.reset()
for time in range(300):
(obs, reward, done, info) = env.step(env.action_space.low)
env.close()
|
def example():
task_gen = generate_task(task_generator_id='pushing')
env = CausalWorld(task_gen, skip_frame=10, enable_visualization=True)
env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy(), VisualInterventionActorPolicy(), RandomInterventionActorPolicy(), GoalInterventionActorPolicy()], actives=[(5, 10, 1, 0), (10, 20, 2, 0), (20, 25, 1, 0), (25, 30, 1, 50)])
for reset_idx in range(30):
obs = env.reset()
for time in range(100):
desired_action = env.action_space.sample()
(obs, reward, done, info) = env.step(action=desired_action)
env.close()
|
class MyOwnTask(BaseTask):
def __init__(self, **kwargs):
super().__init__(task_name='new_task', variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([]))
self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions']
def _set_up_stage_arena(self):
creation_dict = {'name': 'tool_block', 'filename': './assets/719.obj', 'initial_position': [0, 0, 0.1]}
self._stage.add_rigid_mesh_object(**creation_dict)
creation_dict = {'name': 'goal_block', 'filename': './assets/719.obj', 'position': [0, 0, 0.1]}
self._stage.add_silhoutte_mesh_object(**creation_dict)
self._task_stage_observation_keys = ['tool_block_type', 'tool_block_size', 'tool_block_cartesian_position', 'tool_block_orientation', 'tool_block_linear_velocity', 'tool_block_angular_velocity', 'goal_block_type', 'goal_block_size', 'goal_block_cylindrical_position', 'goal_block_orientation']
return
|
def example():
task = MyOwnTask()
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
for _ in range(2000):
for _ in range(10):
(obs, reward, done, info) = env.step(env.action_space.sample())
random_intervention_dict = env.do_single_random_intervention()
env.close()
|
def evaluate_controller():
task_params = dict()
task_params['task_generator_id'] = 'pushing'
world_params = dict()
world_params['skip_frame'] = 3
evaluator = EvaluationPipeline(evaluation_protocols=[protocols.FullyRandomProtocol(name='P10', variable_space='space_a')], task_params=task_params, world_params=world_params, visualize_evaluation=True)
policy = PushingActorPolicy()
scores = evaluator.evaluate_policy(policy.act, fraction=0.1)
scores = evaluator.evaluate_policy(policy.act, fraction=0.1)
vis.generate_visual_analysis(log_relative_path, experiments=experiments)
print(scores)
|
def control_policy(env):
def _control_policy(obs):
return env.get_robot().get_joint_positions_from_tip_positions(obs[(- 9):], obs[1:10])
return _control_policy
|
def evaluate_controller():
task_params = dict()
task_params['task_generator_id'] = 'reaching'
world_params = dict()
world_params['normalize_observations'] = False
world_params['normalize_actions'] = False
evaluator = EvaluationPipeline(evaluation_protocols=[protocols.ProtocolGenerator(name='goal_poses_space_a', first_level_regex='goal_.*', second_level_regex='cylindrical_position', variable_space='space_a'), protocols.ProtocolGenerator(name='goal_poses_space_b', first_level_regex='goal_.*', second_level_regex='cylindrical_position', variable_space='space_b')], task_params=task_params, world_params=world_params, visualize_evaluation=True)
controller_fn = control_policy(evaluator.evaluation_env)
scores = evaluator.evaluate_policy(controller_fn, fraction=0.02)
evaluator.save_scores(log_relative_path)
experiments = {'reacher_model': scores}
vis.generate_visual_analysis(log_relative_path, experiments=experiments)
print(scores)
|
def _make_env(rank):
def _init():
task = generate_task(task_generator_id='pushing')
env = CausalWorld(task=task, enable_visualization=False, seed=rank, skip_frame=3)
return env
set_global_seeds(0)
return _init
|
def train_policy():
ppo_config = {'gamma': 0.9988, 'n_steps': 200, 'ent_coef': 0, 'learning_rate': 0.001, 'vf_coef': 0.99, 'max_grad_norm': 0.1, 'lam': 0.95, 'nminibatches': 5, 'noptepochs': 100, 'cliprange': 0.2, 'tensorboard_log': log_relative_path}
os.makedirs(log_relative_path)
policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=[256, 128])
env = SubprocVecEnv([_make_env(rank=i) for i in range(5)])
model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, verbose=1, **ppo_config)
model.learn(total_timesteps=1000, tb_log_name='ppo2', reset_num_timesteps=False)
model.save(os.path.join(log_relative_path, 'model'))
env.env_method('save_world', log_relative_path)
env.close()
return
|
def evaluate_trained_policy():
model = PPO2.load(os.path.join(log_relative_path, 'model.zip'))
def policy_fn(obs):
return model.predict(obs)[0]
evaluator = EvaluationPipeline(evaluation_protocols=[protocols.FullyRandomProtocol(name='P11', variable_space='space_b')], visualize_evaluation=True, tracker_path=log_relative_path, initial_seed=0)
scores = evaluator.evaluate_policy(policy_fn, fraction=0.05)
evaluator.save_scores(log_relative_path)
print(scores)
|
def compare_controllers():
task_params = dict()
task_params['task_generator_id'] = 'pushing'
world_params = dict()
world_params['skip_frame'] = 3
evaluation_protocols = PUSHING_BENCHMARK['evaluation_protocols']
evaluator_1 = EvaluationPipeline(evaluation_protocols=evaluation_protocols, task_params=task_params, world_params=world_params, visualize_evaluation=False)
evaluator_2 = EvaluationPipeline(evaluation_protocols=evaluation_protocols, task_params=task_params, world_params=world_params, visualize_evaluation=False)
stable_baselines_policy_path_1 = './model_pushing_curr0.zip'
stable_baselines_policy_path_2 = './model_pushing_curr1.zip'
model_1 = PPO2.load(stable_baselines_policy_path_1)
model_2 = PPO2.load(stable_baselines_policy_path_2)
def policy_fn_1(obs):
return model_1.predict(obs, deterministic=True)[0]
def policy_fn_2(obs):
return model_2.predict(obs, deterministic=True)[0]
scores_model_1 = evaluator_1.evaluate_policy(policy_fn_1, fraction=0.005)
scores_model_2 = evaluator_2.evaluate_policy(policy_fn_2, fraction=0.005)
experiments = dict()
experiments['PPO(0)'] = scores_model_1
experiments['PPO(1)'] = scores_model_2
vis.generate_visual_analysis('./', experiments=experiments)
|
def _make_env(rank):
def _init():
task = generate_task(task_generator_id='pushing')
env = CausalWorld(task=task, enable_visualization=False, seed=rank)
return env
set_global_seeds(0)
return _init
|
def train_policy():
ppo_config = {'gamma': 0.9988, 'n_steps': 200, 'ent_coef': 0, 'learning_rate': 0.001, 'vf_coef': 0.99, 'max_grad_norm': 0.1, 'lam': 0.95, 'nminibatches': 5, 'noptepochs': 100, 'cliprange': 0.2, 'tensorboard_log': log_relative_path}
os.makedirs(log_relative_path)
policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=[256, 128])
env = SubprocVecEnv([_make_env(rank=i) for i in range(5)])
model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, verbose=1, **ppo_config)
model.learn(total_timesteps=1000, tb_log_name='ppo2', reset_num_timesteps=False)
model.save(os.path.join(log_relative_path, 'model'))
env.env_method('save_world', log_relative_path)
env.close()
return
|
def evaluate_model():
model = PPO2.load(os.path.join(log_relative_path, 'model.zip'))
def policy_fn(obs):
return model.predict(obs)[0]
evaluation_protocols = PUSHING_BENCHMARK['evaluation_protocols']
evaluator = EvaluationPipeline(evaluation_protocols=evaluation_protocols, tracker_path=log_relative_path, initial_seed=0)
scores = evaluator.evaluate_policy(policy_fn, fraction=0.02)
evaluator.save_scores(log_relative_path)
experiments = {'pushing_model': scores}
vis.generate_visual_analysis(log_relative_path, experiments=experiments)
|
def _make_env(rank):
def _init():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=False, seed=rank)
return env
set_global_seeds(0)
return _init
|
def train_policy():
ppo_config = {'gamma': 0.9988, 'n_steps': 200, 'ent_coef': 0, 'learning_rate': 0.001, 'vf_coef': 0.99, 'max_grad_norm': 0.1, 'lam': 0.95, 'nminibatches': 5, 'noptepochs': 100, 'cliprange': 0.2, 'tensorboard_log': log_relative_path}
os.makedirs(log_relative_path)
policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=[256, 128])
env = SubprocVecEnv([_make_env(rank=i) for i in range(5)])
model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, verbose=1, **ppo_config)
model.learn(total_timesteps=1000, tb_log_name='ppo2', reset_num_timesteps=False)
model.save(os.path.join(log_relative_path, 'model'))
env.env_method('save_world', log_relative_path)
env.close()
return
|
def evaluate_model():
model = PPO2.load(os.path.join(log_relative_path, 'model.zip'))
def policy_fn(obs):
return model.predict(obs)[0]
evaluation_protocols = REACHING_BENCHMARK['evaluation_protocols']
evaluator = EvaluationPipeline(evaluation_protocols=evaluation_protocols, tracker_path=log_relative_path, initial_seed=0)
scores = evaluator.evaluate_policy(policy_fn, fraction=0.1)
evaluator.save_scores(log_relative_path)
experiments = {'reaching_model': scores}
vis.generate_visual_analysis(log_relative_path, experiments=experiments)
|
def example():
task = generate_task(task_generator_id='creative_stacked_blocks')
env = CausalWorld(task=task, enable_visualization=True)
for _ in range(1):
env.reset()
for _ in range(10):
(obs, reward, done, info) = env.step(env.action_space.sample())
print(env.get_current_state_variables())
env.close()
|
def example():
task = generate_task(task_generator_id='pushing')
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
counter = 0
for _ in range(1):
for i in range(210):
(obs, reward, done, info) = env.step(env.action_space.low)
if (((i % 50) == 0) and (i > 0)):
print(i)
intervention = {'goal_block': {'cartesian_position': [0, ((- 0.08) + (0.04 * counter)), 0.0325], 'color': [0, 0, 1]}}
env.do_intervention(intervention, check_bounds=False)
counter += 1
print('intervention')
if (i == 201):
intervention = {'goal_block': {'cartesian_position': [0, 0.08, 0.0325], 'color': [0, 1, 0]}}
env.do_intervention(intervention, check_bounds=False)
env.close()
|
def example():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True)
env.set_starting_state({'goal_block': {'cartesian_position': [0.1, 0.1, 0.1]}})
for _ in range(500):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.reset_default_state()
for _ in range(500):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.reset()
for _ in range(500):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.close()
|
def privileged_information():
task = generate_task(task_generator_id='general')
env = CausalWorld(task=task, enable_visualization=True)
env.expose_potential_partial_solution()
env.reset()
for _ in range(10):
goal_intervention_dict = env.sample_new_goal()
(success_signal, obs) = env.do_intervention(goal_intervention_dict)
print('Goal Intervention success signal', success_signal)
for i in range(100):
(obs, reward, done, info) = env.step(env.action_space.low)
print('now we solve it with privileged info')
(success_signal, obs) = env.do_intervention(info['possible_solution_intervention'], check_bounds=False)
print('Partial Solution Setting Intervention Succes Signal', success_signal)
for i in range(100):
(obs, reward, done, info) = env.step(env.action_space.low)
print('fractional_success is:', info['fractional_success'])
env.close()
|
def example():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
for _ in range(50):
(random_intervention_dict, success_signal, obs) = env.do_single_random_intervention()
print('The random intervention performed is ', random_intervention_dict)
for i in range(100):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.close()
|
def example():
task = generate_task(task_generator_id='pick_and_place')
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
intervention_space = env.get_variable_space_used()
for _ in range(100):
for i in range(200):
(obs, reward, done, info) = env.step(env.action_space.low)
intervention = {'tool_block': {'size': np.random.uniform(intervention_space['tool_block']['size'][0], intervention_space['tool_block']['size'][1])}}
env.do_intervention(intervention)
env.close()
|
def goal_interventions():
task = generate_task(task_generator_id='stacked_blocks')
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
for _ in range(10):
for i in range(200):
(obs, reward, done, info) = env.step(env.action_space.sample())
goal_intervention_dict = env.sample_new_goal()
print('new goal chosen: ', goal_intervention_dict)
(success_signal, obs) = env.do_intervention(goal_intervention_dict)
print('Goal Intervention success signal', success_signal)
env.close()
|
def without_intervention_split():
task = generate_task(task_generator_id='pushing')
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
for _ in range(2):
for i in range(200):
(obs, reward, done, info) = env.step(env.action_space.sample())
(success_signal, obs) = env.do_intervention({'stage_color': np.random.uniform(0, 1, [3])})
print('Intervention success signal', success_signal)
env.close()
|
def with_intervention_split_1():
task = generate_task(task_generator_id='pushing', variables_space='space_a')
env = CausalWorld(task=task, enable_visualization=False)
env.reset()
for _ in range(2):
for i in range(200):
(obs, reward, done, info) = env.step(env.action_space.sample())
(success_signal, obs) = env.do_intervention({'stage_color': np.random.uniform(0, 1, [3])})
print('Intervention success signal', success_signal)
env.close()
|
def with_intervention_split_2():
task = generate_task(task_generator_id='pushing', variables_space='space_b')
env = CausalWorld(task=task, enable_visualization=False)
interventions_space = task.get_intervention_space_a()
env.reset()
for _ in range(2):
for i in range(200):
(obs, reward, done, info) = env.step(env.action_space.sample())
(success_signal, obs) = env.do_intervention({'stage_color': np.random.uniform(interventions_space['stage_color'][0], interventions_space['stage_color'][1])})
print('Intervention success signal', success_signal)
env.close()
|
def example():
data_recorder = DataRecorder(output_directory='pushing_episodes', rec_dumb_frequency=11)
task = generate_task(task_generator_id='pushing')
env = CausalWorld(task=task, enable_visualization=True, data_recorder=data_recorder)
for _ in range(23):
env.reset()
for _ in range(50):
env.step(env.action_space.sample())
env.close()
data = DataLoader(episode_directory='pushing_episodes')
episode = data.get_episode(14)
task = generate_task(episode.task_name, **episode.task_params)
env = CausalWorld(task, **episode.world_params, enable_visualization=True)
env.set_starting_state(episode.initial_full_state, check_bounds=False)
for action in episode.robot_actions:
env.step(action)
env.close()
viewer.view_episode(episode)
|
def _make_env():
def _init():
task = generate_task(task_generator_id='picking', joint_positions=[(- 0.21737874), 0.55613149, (- 1.09308519), (- 0.12868997), 0.52551013, (- 1.08006493), (- 0.00221536), 0.46163487, (- 1.00948735)], tool_block_position=[0.0, 0, 0.035], fractional_reward_weight=1, dense_reward_weights=np.array([0, 10, 0, 1, 1, 0, 0, 0]))
env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=seed)
return env
set_global_seeds(seed)
return _init
|
def run_mpc():
task = generate_task(task_generator_id='picking', joint_positions=[(- 0.21737874), 0.55613149, (- 1.09308519), (- 0.12868997), 0.52551013, (- 1.08006493), (- 0.00221536), 0.46163487, (- 1.00948735)], tool_block_position=[0.0, 0, 0.035], fractional_reward_weight=1, dense_reward_weights=np.array([0, 10, 0, 1, 1, 0, 0, 0]))
env = CausalWorld(task=task, skip_frame=1, enable_visualization=False, seed=seed)
true_model = SimulatorModel(_make_env, parallel_agents=parallel_agents)
optimizer = CrossEntropyMethod(planning_horizon=horizon_length, max_iterations=max_iterations, population_size=num_of_particles, num_elite=num_elite, action_upper_bound=np.array(env.action_space.high), action_lower_bound=np.array(env.action_space.low), model=true_model)
env.reset()
actions = optimizer.get_actions()
true_model.end_sim()
recorder = VideoRecorder(env, 'picking.mp4')
for i in range(horizon_length):
for _ in range(skip_frame):
recorder.capture_frame()
(obs, reward, done, info) = env.step(actions[i])
recorder.capture_frame()
recorder.close()
env.close()
|
def privileged_information():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=True, normalize_actions=False)
env.expose_potential_partial_solution()
env.reset()
for _ in range(10):
goal_intervention_dict = env.sample_new_goal()
(success_signal, obs) = env.do_intervention(goal_intervention_dict)
print('Goal Intervention success signal', success_signal)
(obs, reward, done, info) = env.step(env.action_space.low)
for i in range(1000):
(obs, reward, done, info) = env.step(info['possible_solution_intervention']['joint_positions'])
print('now we solve it with privileged info')
print(info['possible_solution_intervention'])
print('Partial Solution Setting Intervention Succes Signal', success_signal)
env.close()
|
def privileged_information():
task = generate_task(task_generator_id='pushing')
env = CausalWorld(task=task, enable_visualization=True)
env.expose_potential_partial_solution()
env.reset()
for _ in range(10):
goal_intervention_dict = env.sample_new_goal()
(success_signal, obs) = env.do_intervention(goal_intervention_dict)
print('Goal Intervention success signal', success_signal)
for i in range(1000):
(obs, reward, done, info) = env.step(env.action_space.low)
print('now we solve it with privileged info')
(success_signal, obs) = env.do_intervention(info['possible_solution_intervention'], check_bounds=False)
print('Partial Solution Setting Intervention Succes Signal', success_signal)
for i in range(500):
(obs, reward, done, info) = env.step(env.action_space.low)
env.close()
|
def example():
task = generate_task(task_generator_id='creative_stacked_blocks')
env = CausalWorld(task=task, enable_visualization=True)
for _ in range(20):
env.reset()
for _ in range(200):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.close()
|
def control_policy(env, obs):
return env.get_robot().get_joint_positions_from_tip_positions(obs[(- 9):], obs[1:10])
|
def end_effector_pos():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=True, action_mode='joint_positions', normalize_actions=False, normalize_observations=False)
obs = env.reset()
for _ in range(100):
goal_dict = env.sample_new_goal()
(success_signal, obs) = env.do_intervention(goal_dict)
(obs, reward, done, info) = env.step(control_policy(env, obs))
for _ in range(250):
(obs, reward, done, info) = env.step(control_policy(env, obs))
env.close()
|
def example():
task = generate_task(task_generator_id='stacked_blocks')
env = CausalWorld(task=task, skip_frame=10, enable_visualization=True, seed=0, action_mode='joint_positions', observation_mode='pixel', camera_indicies=[0, 1, 2])
env.reset()
for _ in range(5):
(obs, reward, done, info) = env.step(env.action_space.sample())
for i in range(6):
plt.imshow(obs[i])
plt.show()
env.close()
|
def experiment(variant):
task = generate_task(task_generator_id='picking', dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02)
eval_env = CausalWorld(task=task, skip_frame=3, enable_visualization=False, seed=0, max_episode_length=600)
task = generate_task(task_generator_id='picking', dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02)
expl_env = CausalWorld(task=task, skip_frame=3, enable_visualization=False, seed=0, max_episode_length=600)
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
qf1 = FlattenMlp(input_size=(obs_dim + action_dim), output_size=1, **variant['qf_kwargs'])
qf2 = FlattenMlp(input_size=(obs_dim + action_dim), output_size=1, **variant['qf_kwargs'])
target_qf1 = FlattenMlp(input_size=(obs_dim + action_dim), output_size=1, **variant['qf_kwargs'])
target_qf2 = FlattenMlp(input_size=(obs_dim + action_dim), output_size=1, **variant['qf_kwargs'])
policy = TanhGaussianPolicy(obs_dim=obs_dim, action_dim=action_dim, **variant['policy_kwargs'])
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(eval_env, eval_policy)
expl_path_collector = MdpPathCollector(expl_env, policy)
replay_buffer = EnvReplayBuffer(variant['replay_buffer_size'], expl_env)
trainer = SACTrainer(env=eval_env, policy=policy, qf1=qf1, qf2=qf2, target_qf1=target_qf1, target_qf2=target_qf2, **variant['sac_trainer_kwargs'])
algorithm = TorchBatchRLAlgorithm(trainer=trainer, exploration_env=expl_env, evaluation_env=eval_env, exploration_data_collector=expl_path_collector, evaluation_data_collector=eval_path_collector, replay_buffer=replay_buffer, **variant['algo_kwargs'])
algorithm.to(ptu.device)
algorithm.train()
|
def simulate_policy():
file = './her-sac-fetch-experiment/her-sac-fetch-experiment_2020_07_07_11_11_14_0000--s-0/params.pkl'
data = torch.load(file)
policy = data['evaluation/policy']
policy.reset()
def policy_func(obs):
(a, agent_info) = policy.get_action(obs)
return a
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=True, skip_frame=1, seed=0, max_episode_length=2500)
env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=[(0, 1000000000, 1, 0)])
for _ in range(100):
total_reward = 0
o = env.reset()
for _ in range(2500):
(o, reward, done, info) = env.step(policy_func(o))
total_reward += reward
print('total reward is :', total_reward)
env.close()
|
def _make_env(rank):
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, skip_frame=10, enable_visualization=False, seed=(0 + rank), max_episode_length=600)
env = GymEnvWrapper(env)
return env
|
def build_and_train():
affinity = dict(cuda_idx=None, workers_cpus=list(range(15)))
sampler = CpuSampler(EnvCls=_make_env, env_kwargs=dict(rank=0), batch_T=6000, batch_B=20)
algo = SAC(bootstrap_timelimit=False)
agent = SacAgent()
runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, n_steps=50000000.0, log_interval_steps=600, affinity=affinity)
config = dict(env_id='reaching')
name = 'sac_reaching'
log_dir = os.path.join(os.path.dirname(__file__), 'example')
with logger_context(log_dir, 0, name, config, use_summary_writer=True):
runner.train()
|
def _make_env(rank):
task = generate_task(task_generator_id='picking', dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02)
env = CausalWorld(task=task, skip_frame=3, enable_visualization=False, seed=0, max_episode_length=600)
env = GymEnvWrapper(env)
return env
|
def build_and_train():
opt_affinities = list()
opt_affinity = dict(cpus=[0], cuda_idx=None, torch_threads=1, set_affinity=True)
opt_affinities.append(opt_affinity)
smp_affinity = AttrDict(all_cpus=[0, 1], master_cpus=[0], workers_cpus=[1], master_torch_threads=1, worker_torch_threads=1, cuda_idx=None, alternating=False, set_affinity=True)
affinity = AttrDict(all_cpus=[0, 1], optimizer=opt_affinities, sampler=smp_affinity, set_affinity=True)
sampler = AsyncCpuSampler(EnvCls=_make_env, env_kwargs=dict(rank=0), batch_T=600, batch_B=3, max_decorrelation_steps=0, CollectorCls=DbCpuResetCollector)
algo = SAC(batch_size=256, min_steps_learn=10000, replay_size=1000000, replay_ratio=1, target_update_interval=1, target_entropy=(- 9), target_update_tau=0.01, learning_rate=0.00025, action_prior='uniform', reward_scale=1, reparameterize=True, clip_grad_norm=1000000000.0, n_step_return=1, updates_per_sync=1, bootstrap_timelimit=False)
agent = SacAgent(model_kwargs={'hidden_sizes': [256, 256]})
runner = AsyncRl(algo=algo, agent=agent, sampler=sampler, n_steps=50000000.0, log_interval_steps=10000, affinity=affinity)
config = dict(env_id='picking')
name = 'sac_rlpyt_picking'
log_dir = os.path.join(os.path.dirname(__file__), 'sac_rlpyt_picking')
with logger_context(log_dir, 0, name, config, use_summary_writer=False, snapshot_mode='all'):
runner.train()
|
def simulate_policy():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True, skip_frame=3, seed=0, max_episode_length=600)
env = GymEnvWrapper(env)
file = './itr_1097499.pkl'
data = torch.load(file)
agent_state_dict = data['agent_state_dict']
agent = SacAgent(initial_model_state_dict=agent_state_dict)
agent.initialize(env_spaces=env.spaces)
agent.eval_mode(itr=data['itr'])
def policy_func(obs):
agent_info = agent.step(torchify_buffer(obs), prev_action=None, prev_reward=None)
return agent_info.action.numpy()
for _ in range(100):
total_reward = 0
o = env.reset()
for _ in range(600):
(o, reward, done, info) = env.step(policy_func(o))
total_reward += reward
print('total reward is :', total_reward)
env.close()
|
def example():
task = generate_task(task_generator_id='creative_stacked_blocks')
env = CausalWorld(task=task, enable_visualization=False, seed=0)
actions = [env.action_space.sample() for _ in range(200)]
env.reset()
observations_1 = []
rewards_1 = []
for i in range(200):
(observations, rewards, _, _) = env.step(actions[i])
if (i == 100):
state = env.get_state()
observations_1.append(observations)
rewards_1.append(rewards)
env.set_state(state)
for i in range(101, 200):
(observations, rewards, _, _) = env.step(actions[i])
assert np.array_equal(observations_1[i], observations)
env.close()
|
def example():
task_gen = generate_task(task_generator_id='pushing')
env = CausalWorld(task_gen, skip_frame=1, enable_visualization=True)
env = DeltaActionEnvWrapper(env)
env = CurriculumWrapper(env, intervention_actors=[VisualInterventionActorPolicy()], actives=[(0, 20, 1, 0)])
for reset_idx in range(10):
obs = env.reset()
for time in range(15):
(obs, reward, done, info) = env.step(action=np.zeros(9))
env.save_world('./')
env.close()
env = load_world(tracker_relative_path='./', enable_visualization=True)
for reset_idx in range(10):
obs = env.reset()
for time in range(15):
(obs, reward, done, info) = env.step(action=np.zeros(9))
|
def example():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True)
env.set_starting_state({'goal_block': {'cartesian_position': [0.1, 0.1, 0.1]}})
for _ in range(500):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.reset_default_state()
for _ in range(500):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.reset()
for _ in range(500):
(obs, reward, done, info) = env.step(env.action_space.sample())
env.close()
|
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, ppo_config, total_time_steps, validate_every_timesteps, task_name):
def _make_env(rank):
def _init():
task = generate_task(task_generator_id=task_name)
env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=(seed_num + rank), max_episode_length=maximum_episode_length)
return env
set_global_seeds(seed_num)
return _init
os.makedirs(log_relative_path)
policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=[256, 128])
env = SubprocVecEnv([_make_env(rank=i) for i in range(num_of_envs)])
model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, verbose=1, **ppo_config)
save_config_file(ppo_config, _make_env(0)(), os.path.join(log_relative_path, 'config.json'))
for i in range(int((total_time_steps / validate_every_timesteps))):
model.learn(total_timesteps=validate_every_timesteps, tb_log_name='ppo2', reset_num_timesteps=False)
model.save(os.path.join(log_relative_path, 'saved_model'))
return
|
def save_config_file(ppo_config, env, file_path):
task_config = env._task.get_task_params()
for task_param in task_config:
if (not isinstance(task_config[task_param], str)):
task_config[task_param] = str(task_config[task_param])
env_config = env.get_world_params()
env.close()
configs_to_save = [task_config, env_config, ppo_config]
with open(file_path, 'w') as fout:
json.dump(configs_to_save, fout)
|
def simulate_policy():
task = generate_task(task_generator_id='picking')
env = CausalWorld(task=task, enable_visualization=True, skip_frame=3, seed=0, max_episode_length=600)
file = './model_600000_steps.zip'
model = SAC.load(file)
def policy_func(obs):
return model.predict(obs, deterministic=True)[0]
for _ in range(100):
total_reward = 0
o = env.reset()
for _ in range(600):
(o, reward, done, info) = env.step(policy_func(o))
total_reward += reward
print('total reward is :', total_reward)
env.close()
|
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, sac_config, total_time_steps, validate_every_timesteps, task_name):
def _make_env(rank):
def _init():
task = generate_task(task_generator_id=task_name)
env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=(seed_num + rank), max_episode_length=maximum_episode_length)
env = HERGoalEnvWrapper(env)
return env
set_global_seeds(seed_num)
return _init
os.makedirs(log_relative_path)
env = SubprocVecEnv([_make_env(rank=i) for i in range(num_of_envs)])
model = HER('MlpPolicy', env, SAC, verbose=1, policy_kwargs=dict(layers=[256, 256, 256]), **sac_config)
save_config_file(sac_config, _make_env(0)(), os.path.join(log_relative_path, 'config.json'))
for i in range(int((total_time_steps / validate_every_timesteps))):
model.learn(total_timesteps=validate_every_timesteps, tb_log_name='sac', reset_num_timesteps=False)
model.save(os.path.join(log_relative_path, 'saved_model'))
return
|
def save_config_file(sac_config, env, file_path):
task_config = env.get_task().get_task_params()
for task_param in task_config:
if (not isinstance(task_config[task_param], str)):
task_config[task_param] = str(task_config[task_param])
env_config = env.get_world_params()
env.close()
configs_to_save = [task_config, env_config, sac_config]
with open(file_path, 'w') as fout:
json.dump(configs_to_save, fout)
|
def example():
task = generate_task(task_generator_id='pick_and_place')
env = CausalWorld(task=task, skip_frame=3, enable_visualization=True)
policy = PickAndPlaceActorPolicy()
env.close()
|
def example():
task = generate_task(task_generator_id='stacking2', tool_block_mass=0.02)
env = CausalWorld(task=task, enable_visualization=True, action_mode='end_effector_positions')
policy = GraspingPolicy(tool_blocks_order=[0, 1])
for _ in range(20):
policy.reset()
obs = env.reset()
for _ in range(6000):
(obs, reward, done, info) = env.step(policy.act(obs))
env.close()
|
def example():
task = generate_task(task_generator_id='pick_and_place')
world_params = dict()
world_params['skip_frame'] = 3
world_params['seed'] = 0
stable_baselines_policy_path = './model_100000000_steps.zip'
model = PPO2.load(stable_baselines_policy_path)
def policy_fn(obs):
return model.predict(obs, deterministic=True)[0]
viewer.view_policy(task=task, world_params=world_params, policy_fn=policy_fn, max_time_steps=(40 * 600), number_of_resets=40, env_wrappers=[CurriculumWrapper], env_wrappers_args=[{'intervention_actors': [GoalInterventionActorPolicy()], 'actives': [(0, 1000000000, 1, 0)]}])
|
def example():
task = generate_task(task_generator_id='picking')
world_params = dict()
world_params['skip_frame'] = 3
world_params['seed'] = 0
stable_baselines_policy_path = './model_2000000_steps.zip'
model = SAC.load(stable_baselines_policy_path)
def policy_fn(obs):
return model.predict(obs, deterministic=True)[0]
viewer.record_video_of_policy(task=task, world_params=world_params, policy_fn=policy_fn, file_name='pushing_video', number_of_resets=10, max_time_steps=(10 * 100))
viewer.view_policy(task=task, world_params=world_params, policy_fn=policy_fn, max_time_steps=(40 * 600), number_of_resets=40)
|
def example():
task = generate_task(task_generator_id='reaching')
world_params = dict()
world_params['skip_frame'] = 1
world_params['seed'] = 0
agent = ReacherActorPolicy()
def policy_fn(obs):
return agent.act(obs)
viewer.view_policy(task=task, world_params=world_params, policy_fn=policy_fn, max_time_steps=(40 * 960), number_of_resets=40)
|
def example():
task = generate_task(task_generator_id='picking')
world_params = dict()
world_params['skip_frame'] = 3
world_params['seed'] = 200
viewer.record_video_of_random_policy(task=task, world_params=world_params, file_name='picking_video', number_of_resets=1, max_time_steps=300)
|
def example():
data = DataLoader(episode_directory='pushing_episodes')
episode = data.get_episode(6)
viewer.record_video_of_episode(episode=episode, file_name='pushing_video')
viewer.view_episode(episode)
|
def get_wordmap(textfile):
words = {}
We = []
f = io.open(textfile, 'r', encoding='utf-8')
lines = f.readlines()
if (len(lines[0].split()) == 2):
lines.pop(0)
ct = 0
for (n, i) in enumerate(lines):
word = i.split(' ', 1)[0]
vec = i.split(' ', 1)[1].split(' ')
j = 0
v = []
while (j < len(vec)):
v.append(float(vec[j]))
j += 1
words[word] = ct
ct += 1
We.append(v)
return (words, np.array(We))
|
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype='int32')
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range((n // minibatch_size)):
minibatches.append(idx_list[minibatch_start:(minibatch_start + minibatch_size)])
minibatch_start += minibatch_size
if (minibatch_start != n):
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
|
def max_pool(x, lengths, gpu):
out = torch.FloatTensor(x.size(0), x.size(2)).zero_()
if (gpu >= 0):
out = out.cuda()
for i in range(len(lengths)):
out[i] = torch.max(x[i][0:lengths[i]], 0)[0]
return out
|
def mean_pool(x, lengths, gpu):
out = torch.FloatTensor(x.size(0), x.size(2)).zero_()
if (gpu >= 0):
out = out.cuda()
for i in range(len(lengths)):
out[i] = torch.mean(x[i][0:lengths[i]], 0)
return out
|
def lookup(words, w):
w = w.lower()
if (w in words):
return words[w]
|
class Example(object):
def __init__(self, sentence):
self.sentence = sentence.strip().lower()
self.embeddings = []
self.representation = None
def populate_embeddings(self, words):
sentence = self.sentence.lower()
arr = sentence.split()
for i in arr:
emb = lookup(words, i)
if emb:
self.embeddings.append(emb)
if (len(self.embeddings) == 0):
self.embeddings.append(words['UUUNKKK'])
|
class SimilarityEvaluator():
def __init__(self, model_path='models/sim/sim.pt', tokenizer_path='models/sim/sim.sp.30k.model', gpu=False):
self.model_path = model_path
self.tokenizer_path = tokenizer_path
self.tok = TreebankWordTokenizer()
kw = {}
if (not torch.cuda.is_available()):
kw['map_location'] = torch.device('cpu')
model = torch.load(self.model_path, **kw)
state_dict = model['state_dict']
vocab_words = model['vocab_words']
args = model['args']
if (gpu is False):
args.gpu = (- 1)
self.model = WordAveraging(args, vocab_words)
self.model.load_state_dict(state_dict, strict=True)
self.sp = spm.SentencePieceProcessor()
self.sp.Load(self.tokenizer_path)
self.model.eval()
def make_example(self, sentence):
sentence = sentence.lower()
sentence = ' '.join(self.tok.tokenize(sentence))
sentence = self.sp.EncodeAsPieces(sentence)
wp1 = Example(' '.join(sentence))
wp1.populate_embeddings(self.model.vocab)
return wp1
def find_similarity(self, s1, s2):
with torch.no_grad():
s1 = [self.make_example(x) for x in s1]
s2 = [self.make_example(x) for x in s2]
(wx1, wl1, wm1) = self.model.torchify_batch(s1)
(wx2, wl2, wm2) = self.model.torchify_batch(s2)
scores = self.model.scoring_function(wx1, wm1, wl1, wx2, wm2, wl2)
return [x.item() for x in scores]
def find_similarity_batched(self, inputs, preds, batch_size=32):
assert (len(inputs) == len(preds))
sim_scores = []
for i in range(0, len(inputs), batch_size):
sim_scores.extend(self.find_similarity(inputs[i:(i + batch_size)], preds[i:(i + batch_size)]))
return np.array(sim_scores)
def embed_texts(self, texts, batch_size=128):
result = []
for i in range(0, len(texts), batch_size):
(wx, wl, wm) = self.model.torchify_batch([self.make_example(x) for x in texts[i:(i + batch_size)]])
with torch.no_grad():
tensors = torch.nn.functional.normalize(self.model.encode(wx, wm, wl))
result.append(tensors.cpu().numpy())
return np.concatenate(result)
|
def make_example(sentence, model):
sentence = sentence.lower()
sentence = ' '.join(tok.tokenize(sentence))
sentence = sp.EncodeAsPieces(sentence)
wp1 = Example(' '.join(sentence))
wp1.populate_embeddings(model.vocab)
return wp1
|
def find_similarity(s1, s2):
with torch.no_grad():
s1 = [make_example(x, model) for x in s1]
s2 = [make_example(x, model) for x in s2]
(wx1, wl1, wm1) = model.torchify_batch(s1)
(wx2, wl2, wm2) = model.torchify_batch(s2)
scores = model.scoring_function(wx1, wm1, wl1, wx2, wm2, wl2)
return [x.item() for x in scores]
|
def cosine(v1, v2):
return (np.dot(v1, v2) / np.sqrt(((sum((v1 ** 2)) * sum((v2 ** 2))) + 1e-10)))
|
class EmbeddingSimilarityChooser():
def __init__(self, sim_coef=100, tokenizer=None):
self.glove_embedding = WordEmbeddings('glove')
self.sim_coef = sim_coef
self.tokenizer = tokenizer
def embed(self, text):
toks = self.glove_embedding.embed(Sentence(text))[0]
if (not toks):
return np.zeros(self.glove_embedding.embedding_length)
return np.mean([t.embedding.cpu().numpy() for t in toks], axis=0)
def decode(self, tokens):
if isinstance(tokens, str):
return tokens
if self.tokenizer:
return self.tokenizer.convert_tokens_to_string(tokens)
return ' '.join(tokens).replace(' ##', '')
def __call__(self, hypotheses, original=None, scores=None, **kwargs):
e = self.embed(self.decode(original))
candidates = [(fill_words, score, cosine(e, self.embed(self.decode(fill_words)))) for (fill_words, score) in zip(hypotheses, scores)]
candidates = sorted(candidates, key=(lambda x: (x[1] + (x[2] * self.sim_coef))), reverse=True)
return candidates[0][0]
|
class NgramSalienceCalculator():
def __init__(self, tox_corpus, norm_corpus, use_ngrams=False):
ngrams = ((1, 3) if use_ngrams else (1, 1))
self.vectorizer = CountVectorizer(ngram_range=ngrams)
tox_count_matrix = self.vectorizer.fit_transform(tox_corpus)
self.tox_vocab = self.vectorizer.vocabulary_
self.tox_counts = np.sum(tox_count_matrix, axis=0)
norm_count_matrix = self.vectorizer.fit_transform(norm_corpus)
self.norm_vocab = self.vectorizer.vocabulary_
self.norm_counts = np.sum(norm_count_matrix, axis=0)
def salience(self, feature, attribute='tox', lmbda=0.5):
assert (attribute in ['tox', 'norm'])
if (feature not in self.tox_vocab):
tox_count = 0.0
else:
tox_count = self.tox_counts[(0, self.tox_vocab[feature])]
if (feature not in self.norm_vocab):
norm_count = 0.0
else:
norm_count = self.norm_counts[(0, self.norm_vocab[feature])]
if (attribute == 'tox'):
return ((tox_count + lmbda) / (norm_count + lmbda))
else:
return ((norm_count + lmbda) / (tox_count + lmbda))
|
def adjust_logits(logits, label=0):
return (logits - ((token_toxicities * 100) * (1 - (2 * label))))
|
def add_sys_path(p):
p = os.path.abspath(p)
print(p)
if (p not in sys.path):
sys.path.append(p)
|
def adjust_logits(logits, label):
return (logits - (editor.token_toxicities * 3))
|
def bpe_tokenize(bpe_tokenizer, sentence):
sent_bpe_tokens = []
sent_bpe_offsets = []
for token in sentence:
token_bpes = bpe_tokenizer.tokenize(token.text)
sent_bpe_offsets += [(token.begin, token.end) for _ in range(len(token_bpes))]
sent_bpe_tokens += token_bpes
return (sent_bpe_tokens, sent_bpe_offsets)
|
def nlargest_indexes(arr, n_top):
arr_ids = np.argpartition(arr, (- n_top))[(- n_top):]
sel_arr = arr[arr_ids]
top_ids = arr_ids[np.argsort((- sel_arr))]
return top_ids
|
def remove_masked_token_subwords(masked_position, bpe_tokens, bpe_offsets):
'\n If the masked token has been tokenied into multiple subwords: like dieting-->diet and ##ing\n keep the first subword and remove others.\n '
logger.debug(f'bpe tokens: {bpe_tokens}')
logger.debug(f'bpe offsets: {bpe_offsets}')
if (len(masked_position[1]) > 1):
indexes_to_del = masked_position[1][1:]
del bpe_tokens[masked_position[0]][indexes_to_del[0]:(indexes_to_del[(- 1)] + 1)]
del bpe_offsets[masked_position[0]][indexes_to_del[0]:(indexes_to_del[(- 1)] + 1)]
masked_position = (masked_position[0], masked_position[1][0])
logger.debug(f'bpe offsets: {str(bpe_tokens)}')
logger.debug(f'bpe offsets: {str(bpe_offsets)}')
return (masked_position, bpe_tokens, bpe_offsets)
|
def merge_sorted_results(objects_left, scores_left, objects_right, scores_right, max_elems):
result_objects = []
result_scores = []
j = 0
i = 0
while True:
if (len(result_scores) == max_elems):
break
if (i == len(scores_left)):
result_objects += objects_right[j:((j + max_elems) - len(result_scores))]
result_scores += scores_right[j:((j + max_elems) - len(result_scores))]
break
if (j == len(scores_right)):
result_objects += objects_left[i:((i + max_elems) - len(result_scores))]
result_scores += scores_left[i:((i + max_elems) - len(result_scores))]
break
if (scores_left[i] > scores_right[j]):
result_objects.append(objects_left[i])
result_scores.append(scores_left[i])
i += 1
else:
result_objects.append(objects_right[j])
result_scores.append(scores_right[j])
j += 1
return (result_objects, result_scores)
|
class MaskedTokenPredictorBert():
def __init__(self, model, bpe_tokenizer, max_len=250, mask_in_multiunit=False, device=None, label=0, logits_postprocessor=None, contrast_penalty=0, mean=np.mean, confuse_bert_args=False):
self._model = model
self._bpe_tokenizer = bpe_tokenizer
self._max_len = max_len
self._mask_in_multiunit = mask_in_multiunit
self.device = (device or torch.device('cuda'))
self.label = label
self.logits_postprocessor = logits_postprocessor
self.contrast_penalty = contrast_penalty
self.mean = mean
self.confuse_bert_args = confuse_bert_args
def __call__(self, sentences, masked_position, **kwargs):
if (type(masked_position) is not list):
bpe_tokens = [bpe_tokens]
masked_position = [masked_position]
b_masked_pos = []
b_bpe_tokens = []
for (sent, mask_pos) in zip(sentences, masked_position):
(bpe_tokens, bpe_offsets) = bpe_tokenize(self._bpe_tokenizer, sent)
masked_position = find_bpe_position_by_offset([bpe_offsets], (sent[mask_pos].begin, sent[mask_pos].end))
(masked_position, bpe_tokens, _) = remove_masked_token_subwords(masked_position, [bpe_tokens], [bpe_offsets])
bpe_tokens = bpe_tokens[0]
logger.debug(f'Bpe tokens: {bpe_tokens}')
b_bpe_tokens.append(bpe_tokens)
b_masked_pos.append(masked_position[1])
return self.generate(b_bpe_tokens, b_masked_pos, **kwargs)
def generate(self, b_bpe_tokens, b_masked_pos, mask_token=True, n_top=5, n_units=1, n_tokens=[1], fix_multiunit=True, beam_size=10, multiunit_lookup=100, max_multiunit=10, label=None, **kwargs):
result_preds = [[] for _ in range(len(b_bpe_tokens))]
result_scores = [[] for _ in range(len(b_bpe_tokens))]
if (1 in n_tokens):
(result_preds, result_scores) = self.predict_single_word(b_bpe_tokens, b_masked_pos, mask_token=mask_token, n_top=n_top, n_units=n_units, multiunit_lookup=multiunit_lookup, fix_multiunit=fix_multiunit, max_multiunit=max_multiunit, label=label)
for n_t in n_tokens:
if (n_t == 1):
continue
(pred_tokens, pred_scores) = self.predict_token_sequence(b_bpe_tokens, b_masked_pos, mask_token=mask_token, n_top=n_top, n_units=n_units, seq_len=n_t, multiunit_lookup=multiunit_lookup, fix_multiunit=fix_multiunit, beam_size=beam_size, max_multiunit=max_multiunit, label=label)
for i in range(len(b_bpe_tokens)):
(result_preds[i], result_scores[i]) = merge_sorted_results(result_preds[i], result_scores[i], pred_tokens[i], pred_scores[i], n_top)
return (result_preds, result_scores)
def predict_single_unit(self, bpe_tokens, masked_position, mask_token, n_top, label=None):
if (label is None):
label = self.label
bpe_tokens = copy.deepcopy(bpe_tokens)
max_len = min([(max((len(e) for e in bpe_tokens)) + 2), self._max_len])
token_ids = []
for i in range(len(bpe_tokens)):
bpe_tokens[i] = bpe_tokens[i][:(max_len - 2)]
if mask_token:
if (i >= len(masked_position)):
continue
pos = masked_position[i]
if (pos >= len(bpe_tokens[i])):
continue
bpe_tokens[i][pos] = '[MASK]'
bpe_tokens[i] = ((['[CLS]'] + bpe_tokens[i]) + ['[SEP]'])
logger.debug(f'Masked BPE tokens: {bpe_tokens[i]}')
token_ids.append(self._bpe_tokenizer.convert_tokens_to_ids(bpe_tokens[i]))
token_ids = pad_sequences(token_ids, maxlen=max_len, dtype='long', truncating='post', padding='post')
attention_masks_tensor = torch.tensor((token_ids > 0)).long().to(self.device)
tokens_tensor = torch.tensor(token_ids).to(self.device)
segments_ids = (np.ones_like(token_ids, dtype=int) * label)
segments_tensor = torch.tensor(segments_ids).to(self.device)
self._model.eval()
with torch.no_grad():
if self.confuse_bert_args:
target_sent = self._model(tokens_tensor, attention_mask=segments_tensor, token_type_ids=attention_masks_tensor)[0]
else:
target_sent = self._model(tokens_tensor, token_type_ids=segments_tensor, attention_mask=attention_masks_tensor)[0]
if self.contrast_penalty:
with torch.no_grad():
another = self._model(tokens_tensor, token_type_ids=(1 - segments_tensor), attention_mask=attention_masks_tensor)[0]
diff = (torch.softmax(target_sent, (- 1)) - (self.contrast_penalty * torch.softmax(another, (- 1))))
target_sent = torch.log(torch.clamp(diff, 1e-20))
target_sent = target_sent.detach().cpu().numpy()
final_top_scores = []
final_top_tokens = []
for i in range(target_sent.shape[0]):
row = target_sent[i]
idx = masked_position[i]
if ((idx + 1) >= len(row)):
continue
logits = row[(idx + 1)]
logits = self.adjust_logits(logits, label=label)
top_ids = nlargest_indexes(logits, n_top)
top_scores = [target_sent[i][(masked_position[i] + 1)][j] for j in top_ids]
top_tokens = self._bpe_tokenizer.convert_ids_to_tokens(top_ids)
final_top_scores.append(top_scores)
final_top_tokens.append(top_tokens)
return (final_top_tokens, final_top_scores)
def adjust_logits(self, logits, label=0):
if self.logits_postprocessor:
return self.logits_postprocessor(logits, label=(label or 0))
return logits
def predict_single_word(self, bpe_tokens, masked_position, mask_token, n_top, n_units, fix_multiunit, multiunit_lookup, max_multiunit, label=None):
(pred_tokens, scores) = self.predict_single_unit(bpe_tokens, masked_position, mask_token=mask_token, n_top=n_top, label=label)
final_pred_tokens = []
final_scores = []
for j in range(len(pred_tokens)):
if (n_units > 1):
pred_tokens[j] = list(reversed(pred_tokens[j][:multiunit_lookup]))
scores[j] = list(reversed(scores[j][:multiunit_lookup]))
seq_list = self.generate_multiunit_token(masked_position[j], bpe_tokens[j], n_top=multiunit_lookup, n_units=n_units, label=label)
for seq in seq_list[:max_multiunit]:
(seq_pred, seq_scores) = seq
multiunit_token = '_'.join(seq_pred)
if fix_multiunit:
multiunit_token = multiunit_token.replace('#', '')
multiunit_token = multiunit_token.replace('_', '')
multiunit_score = self.mean(seq_scores)
ind = bisect.bisect(scores[j], multiunit_score)
pred_tokens[j].insert(ind, multiunit_token)
scores[j].insert(ind, multiunit_score)
pred_tokens[j] = list(reversed(pred_tokens[j]))
scores[j] = list(reversed(scores[j]))
logger.debug(f'Predicted words: {pred_tokens[j]}')
final_pred_tokens.append(pred_tokens[j][:n_top])
final_scores.append(scores[j][:n_top])
return (final_pred_tokens, final_scores)
def generate_multiunit_token(self, masked_position, bpe_tokens, n_top, n_units, label=None):
final_result = []
final_result_scores = []
bpe_tokens = copy.deepcopy(bpe_tokens)
bpe_tokens.insert(masked_position, '[MASK]')
(predictions, scores) = self.predict_single_unit([bpe_tokens], [(masked_position + 1)], n_top=n_top, mask_token=self._mask_in_multiunit, label=label)
if (len(predictions) == 0):
return []
predictions = predictions[0]
scores = scores[0]
good_preds = []
b_bpe_tokens = []
for (i, pred) in (e for e in enumerate(predictions) if (e[1][0] == '#')):
tmp = copy.deepcopy(bpe_tokens)
tmp[(masked_position + 1)] = pred
b_bpe_tokens.append(tmp)
good_preds.append((i, pred))
if (not good_preds):
return []
loader = DataLoader(b_bpe_tokens, batch_size=10, collate_fn=(lambda _: _))
preds = []
pred_scores = []
for batch in loader:
(bb_preds, bb_pred_scores) = self.predict_single_unit(batch, [masked_position for _ in range(len(batch))], mask_token=False, n_top=n_top, label=label)
preds += bb_preds
pred_scores += bb_pred_scores
for i in range(len(preds)):
result = [preds[i][0], good_preds[i][1]]
result_scores = [pred_scores[i][0], scores[good_preds[i][0]]]
(tail, tail_scores) = self.generate_from_tail(preds[i][0], b_bpe_tokens[i], masked_position, max_subunits=(n_units - 2), n_top=n_top, label=label)
result = (tail + result)
result_scores = (tail_scores + result_scores)
final_result.append(result)
final_result_scores.append(result_scores)
return list(zip(final_result, final_result_scores))
def generate_from_tail(self, pred, bpe_tokens, masked_position, max_subunits, n_top, label=None):
result = []
result_scores = []
it = 0
while ((pred[0] == '#') and (it < max_subunits)):
bpe_tokens[masked_position] = pred
bpe_tokens.insert(masked_position, '[MASK]')
(preds, pred_scores) = self.predict_single_unit([bpe_tokens], [masked_position], n_top=n_top, mask_token=False, label=label)
pred = preds[0][0]
result.append(pred)
result_scores.append(pred_scores[0][0])
it += 1
return (list(reversed(result)), list(reversed(result_scores)))
def generate_variants(self, bpe_tokens, mask_pos, gen_tokens, gen_scores, seq_len):
batch_size = len(bpe_tokens)
if (not gen_tokens):
(yield (bpe_tokens, ([0.0] * batch_size), [[] for _ in range(batch_size)], mask_pos))
return
for var_num in range(len(gen_tokens[0])):
if (not gen_tokens[0][var_num]):
continue
variant = []
new_mask = []
var_t = []
var_s = []
for i in range(batch_size):
new_bpe = copy.deepcopy(bpe_tokens[i])
for seq_num in range(len(gen_tokens[i][var_num])):
new_bpe[(mask_pos[i] + seq_num)] = gen_tokens[i][var_num][seq_num]
var_t.append(gen_tokens[i][var_num])
var_s.append(gen_scores[i][var_num])
new_mask.append((mask_pos[i] + len(gen_tokens[i][var_num])))
variant.append(new_bpe)
(yield (variant, var_s, var_t, new_mask))
def update_beam(self, prev_tokens, prev_score, new_scores, new_tokens, gen_scores, gen_tokens):
for i in range(len(gen_scores)):
final_gen_score = (prev_score + gen_scores[i])
insert_pos = bisect.bisect(new_scores, final_gen_score)
new_scores.insert(insert_pos, final_gen_score)
del new_scores[0]
new_tokens.insert(insert_pos, (prev_tokens + [gen_tokens[i]]))
if (len(new_tokens) > len(new_scores)):
del new_tokens[0]
def predict_token_sequence(self, bpe_tokens, masked_pos, mask_token, n_top, seq_len, beam_size, n_units, fix_multiunit, multiunit_lookup, max_multiunit, label=None):
bpe_tokens = copy.deepcopy(bpe_tokens)
batch_size = len(bpe_tokens)
for i in range(batch_size):
for seq_num in range((seq_len - 1)):
bpe_tokens[i].insert((masked_pos[i] + 1), '[MASK]')
gen_scores = []
gen_tokens = []
for seq_num in range(seq_len):
gen_scores_seq = [[0.0 for __ in range(beam_size)] for _ in range(batch_size)]
gen_tokens_seq = [[[] for __ in range(beam_size)] for _ in range(batch_size)]
for (variant, variant_score, prev_tokens, new_mask) in self.generate_variants(bpe_tokens, masked_pos, gen_tokens, gen_scores, seq_len=seq_len):
(top_tokens, top_scores) = self.predict_single_word(variant, new_mask, mask_token=True, n_top=n_top, n_units=n_units, fix_multiunit=fix_multiunit, multiunit_lookup=multiunit_lookup, max_multiunit=max_multiunit, label=label)
for i in range(batch_size):
self.update_beam(prev_tokens[i], variant_score[i], gen_scores_seq[i], gen_tokens_seq[i], top_scores[i], top_tokens[i])
gen_tokens = gen_tokens_seq
gen_scores = gen_scores_seq
gen_scores = [[(e / seq_len) for e in l] for l in gen_scores]
return ([list(reversed(e)) for e in gen_tokens], [list(reversed(e)) for e in gen_scores])
|
def find_bpe_position_by_offset(bpe_offsets, target_offset):
bpe_nums = []
for (sent_num, sent) in enumerate(bpe_offsets):
if (sent[(- 1)][0] < target_offset[0]):
continue
for (bpe_num, bpe) in enumerate(sent):
if ((target_offset[0] <= bpe[0]) and (bpe[1] <= target_offset[1])):
bpe_nums.append(bpe_num)
return (sent_num, bpe_nums)
|
def generate_seq_indexes(indexes):
if (not indexes):
(yield [])
return
for ind in indexes[0]:
for seq in generate_seq_indexes(indexes[1:]):
(yield ([ind] + seq))
|
class PairsDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, idx):
assert (idx < len(self.x['input_ids']))
item = {key: val[idx] for (key, val) in self.x.items()}
item['decoder_attention_mask'] = self.y['attention_mask'][idx]
item['labels'] = self.y['input_ids'][idx]
return item
@property
def n(self):
return len(self.x['input_ids'])
def __len__(self):
return self.n
|
class TrAr(TrainingArguments):
@cached_property
def _setup_devices(self):
return device
|
class DataCollatorWithPadding():
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, features: List[Dict[(str, Union[(List[int], torch.Tensor)])]]) -> Dict[(str, torch.Tensor)]:
batch = self.tokenizer.pad(features, padding=True)
ybatch = self.tokenizer.pad({'input_ids': batch['labels'], 'attention_mask': batch['decoder_attention_mask']}, padding=True)
batch['labels'] = ybatch['input_ids']
batch['decoder_attention_mask'] = ybatch['attention_mask']
return {k: torch.tensor(v) for (k, v) in batch.items()}
|
def detokenize(text):
text = text.replace(' .', '.').replace(' ,', ',').replace(' !', '!')
text = text.replace(' ?', '?').replace(' )', ')').replace('( ', '(')
return text
|
def drop_bad_words(text, max_len=30, return_digits=None):
parts = re.split('(\\W)', text)
if max_len:
parts = [w for w in parts if (len(w) <= max_len)]
if (return_digits is not None):
parts = [(str(return_digits) if (p == 'DIGIT') else p) for p in parts]
return ''.join(parts)
|
def text_preprocess(text):
text = text.lstrip(punkt)
text = detokenize(text)
text = drop_bad_words(text)
return text
|
def text_postprocess(text):
res2 = text.rstrip(punkt)
if (len(res2) < len(text)):
res2 += text[len(res2)]
return res2
|
class DType(IntEnum):
'データ型定義\n '
BIT = (0 + 1)
BINARY = (0 + 2)
FP16 = (256 + 16)
FP32 = (256 + 32)
FP64 = (256 + 64)
INT8 = (512 + 8)
INT16 = (512 + 16)
INT32 = (512 + 32)
INT64 = (512 + 64)
UINT8 = (768 + 8)
UINT16 = (768 + 16)
UINT32 = (768 + 32)
UINT64 = (768 + 64)
|
class Border(IntEnum):
CONSTANT = 0
REFLECT = 1
REFLECT_101 = 2
REPLICATE = 3
WRAP = 4
|
def dtype_numpy_to_bb(dtype):
if (dtype == np.float32):
return core.TYPE_FP32
elif (dtype == np.float64):
return core.TYPE_FP64
elif (dtype == np.int8):
return core.TYPE_INT8
elif (dtype == np.int16):
return core.TYPE_INT16
elif (dtype == np.int32):
return core.TYPE_INT32
elif (dtype == np.int64):
return core.TYPE_INT64
elif (dtype == np.uint8):
return core.TYPE_UINT8
elif (dtype == np.uint16):
return core.TYPE_UINT16
elif (dtype == np.uint32):
return core.TYPE_UINT32
elif (dtype == np.uint64):
return core.TYPE_UINT64
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.