code
stringlengths
17
6.64M
class Episode(): def __init__(self, task_name, initial_full_state, task_params=None, world_params=None): '\n The structure in which the data from each episode\n will be logged.\n\n :param task_name: (str) task generator name\n :param initial_full_state: (dict) dict specifying the full state\n variables of the environment.\n :param task_params: (dict) task generator parameters.\n :param world_params: (dict) causal world parameters.\n ' self.task_name = task_name self.task_params = copy.deepcopy(task_params) if ('task_name' in self.task_params): del self.task_params['task_name'] self.world_params = world_params self.initial_full_state = initial_full_state self.robot_actions = [] self.observations = [] self.rewards = [] self.infos = [] self.dones = [] self.timestamps = [] def append(self, robot_action, observation, reward, info, done, timestamp): '\n\n :param robot_action: (nd.array) action passed to step function.\n :param observation: (nd.array) observations returned after stepping\n through the environment.\n :param reward: (float) reward received from the environment.\n :param info: (dict) dictionary specifying all the extra information\n after stepping through the environment.\n :param done: (bool) true if the environment returns done.\n :param timestamp: (float) time stamp with respect to the beginning of\n the episode.\n\n :return:\n ' self.robot_actions.append(robot_action) self.observations.append(observation) self.rewards.append(reward) self.timestamps.append(timestamp) self.infos.append(info) self.dones.append(done) return
class TaskStats(): def __init__(self, task): '\n\n :param task: (str) task generator name.\n ' self.task_name = task._task_name self.task_params = task.get_task_params() self.time_steps = 0 self.num_resets = 0 def add_episode_experience(self, time_steps): '\n\n :param time_steps: (int) time steps executed in the last episode.\n\n :return:\n ' self.time_steps += time_steps self.num_resets += 1 return
class Tracker(): def __init__(self, task=None, file_path=None, world_params=None): '\n\n :param task: (causal_world.BaseTask) task to be tracked\n :param file_path: (str) path of the tracker to be loaded.\n :param world_params: (dict) causal world parameters.\n ' self.total_time_steps = 0 self.total_resets = 0 self.total_interventions = 0 self.total_intervention_steps = 0 self.invalid_intervention_steps = 0 self.invalid_out_of_bounds_intervention_steps = 0 self.invalid_robot_intervention_steps = 0 self.invalid_stage_intervention_steps = 0 self.invalid_task_generator_intervention_steps = 0 self.task_stats_log = [] if (task is None): self._curr_task_stat = None else: self._curr_task_stat = TaskStats(task) if (file_path is not None): self.load(file_path) if (world_params is not None): if (self.world_params != world_params): raise Exception('Incompatible world params') else: self.world_params = world_params def add_episode_experience(self, time_steps): '\n\n :param time_steps: (int) time steps executed in the last episode.\n\n :return:\n ' if (self._curr_task_stat is None): raise Exception('No current task stat set') if (time_steps > 0): self._curr_task_stat.add_episode_experience(time_steps) def switch_task(self, task): '\n\n :param task: (causal_world.BaseTask) the task to switch to.\n\n :return:\n ' self.total_time_steps += self._curr_task_stat.time_steps self.total_resets += self._curr_task_stat.num_resets self.total_intervention_steps += 1 self.task_stats_log.append(self._curr_task_stat) self._curr_task_stat = TaskStats(task) return def do_intervention(self, task, interventions_dict): '\n\n :param task: (causal_world.BaseTask) the task after the intervention.\n :param interventions_dict: (dict) the intervention that was just\n executed in the environment.\n\n :return:\n ' self.total_time_steps += self._curr_task_stat.time_steps self.total_resets += self._curr_task_stat.num_resets self.total_intervention_steps += 1 self.task_stats_log.append(self._curr_task_stat) self._curr_task_stat = TaskStats(task) self.total_interventions += len(interventions_dict) def add_invalid_intervention(self, interventions_info): '\n\n :param interventions_info: (dict) a dictionary about the interventions\n stats about the infeasibility of\n interventions.\n\n :return:\n ' self.invalid_intervention_steps += 1 if interventions_info['robot_infeasible']: self.invalid_robot_intervention_steps += 1 if interventions_info['stage_infeasible']: self.invalid_stage_intervention_steps += 1 if interventions_info['task_generator_infeasible']: self.invalid_task_generator_intervention_steps += 1 if interventions_info['out_bounds']: self.invalid_out_of_bounds_intervention_steps += 1 return def save(self, file_path): '\n\n :param file_path: (str) file path to save the tracker in.\n\n :return:\n ' if (self.world_params is None): raise Exception('world_params not set') tracker_dict = {'task_stats_log': (self.task_stats_log + [self._curr_task_stat]), 'total_time_steps': (self.total_time_steps + self._curr_task_stat.time_steps), 'total_resets': (self.total_resets + self._curr_task_stat.num_resets), 'total_interventions': self.total_interventions, 'total_intervention_steps': self.total_intervention_steps, 'total_invalid_intervention_steps': self.invalid_intervention_steps, 'total_invalid_robot_intervention_steps': self.invalid_robot_intervention_steps, 'total_invalid_stage_intervention_steps': self.invalid_stage_intervention_steps, 'total_invalid_task_generator_intervention_steps': self.invalid_task_generator_intervention_steps, 'total_invalid_out_of_bounds_intervention_steps': self.invalid_out_of_bounds_intervention_steps, 'world_params': self.world_params} with open(file_path, 'wb') as file_handle: pickle.dump(tracker_dict, file_handle) return def load(self, file_path): '\n\n :param file_path: (str) file path to load the tracker.\n\n :return:\n ' with open(file_path, 'rb') as file: tracker_dict = pickle.load(file) self.total_interventions += tracker_dict['total_interventions'] self.total_intervention_steps += tracker_dict['total_intervention_steps'] self.invalid_intervention_steps += tracker_dict['total_invalid_intervention_steps'] self.invalid_robot_intervention_steps += tracker_dict['total_invalid_robot_intervention_steps'] self.invalid_stage_intervention_steps += tracker_dict['total_invalid_stage_intervention_steps'] self.invalid_task_generator_intervention_steps += tracker_dict['total_invalid_task_generator_intervention_steps'] self.invalid_out_of_bounds_intervention_steps += tracker_dict['total_invalid_out_of_bounds_intervention_steps'] self.total_time_steps += tracker_dict['total_time_steps'] self.total_resets += tracker_dict['total_resets'] self.task_stats_log = tracker_dict['task_stats_log'] self.world_params = tracker_dict['world_params'] def get_total_intervention_steps(self): '\n\n :return: (int) total interventions performed so far on a\n variable level.\n ' return self.total_intervention_steps def get_total_interventions(self): '\n\n :return: (int) total interventions performed so far.\n ' return self.total_interventions def get_total_resets(self): '\n\n :return: (int) total resets performed so far.\n ' return self.total_resets def get_total_time_steps(self): '\n\n :return: (int) total time steps since the beginning of the\n initialization of the environment.\n ' return self.total_time_steps def get_total_invalid_intervention_steps(self): '\n\n :return: (int) total invalid interventions performed so far.\n ' return self.invalid_intervention_steps def get_total_invalid_robot_intervention_steps(self): '\n\n :return: (int) total invalid interventions performed so far where the\n robot reached an infeasible state.\n ' return self.invalid_robot_intervention_steps def get_total_invalid_stage_intervention_steps(self): '\n\n :return: (int) total invalid interventions performed so far where the\n stage reached an infeasible state.\n ' return self.invalid_stage_intervention_steps def get_total_invalid_task_generator_intervention_steps(self): '\n\n :return: (int) total invalid interventions performed so far where the\n task itself reached an infeasible state.\n ' return self.invalid_task_generator_intervention_steps def get_total_invalid_out_of_bounds_intervention_steps(self): '\n\n :return: (int) total interventions performed so far where the\n the values where out of bounds and thus invalidates it.\n ' return self.invalid_out_of_bounds_intervention_steps
class MeanAccumulatedRewardMetric(BaseMetric): def __init__(self): '\n The MeanAccumulatedRewardMetric to be used to calculate the mean\n accumlated reward over all episodes processed.\n ' super(MeanAccumulatedRewardMetric, self).__init__(name='mean_accumulated_reward_rate') self.accumulated_reward = 0 self.total_number_of_episodes = 0 return def process_episode(self, episode_obj): '\n\n Processes the episode to calculate the scores out of it.\n\n :param episode_obj: (causal_world.loggers.Episode) episode to process\n and calculate its metric.\n :return:\n ' self.total_number_of_episodes += 1 for rew in episode_obj.rewards: self.accumulated_reward += rew def get_metric_score(self): '\n\n :return: (float) the metric score calculated so far.\n ' return (self.accumulated_reward / float(self.total_number_of_episodes)) def reset(self): '\n resets the metric calculation of episodes.\n\n :return:\n ' self.accumulated_reward = 0 self.total_number_of_episodes = 0
class MeanFullIntegratedFractionalSuccess(BaseMetric): def __init__(self): '\n The MeanFullIntegratedFractionalSuccess to be used to calculate the mean\n of sum of fractional success over all episodes processed.\n ' super(MeanFullIntegratedFractionalSuccess, self).__init__(name='full_integrated_fractional_success') self.per_episode_scores = [] self.total_number_of_episodes = 0 return def process_episode(self, episode_obj): '\n\n Processes the episode to calculate the scores out of it.\n\n :param episode_obj: (causal_world.loggers.Episode) episode to process\n and calculate its metric.\n :return:\n ' self.total_number_of_episodes += 1 in_episode_accumulated_success = 0.0 for info in episode_obj.infos: in_episode_accumulated_success += info['fractional_success'] self.per_episode_scores.append((in_episode_accumulated_success / len(episode_obj.infos))) def get_metric_score(self): '\n\n :return: (tuple) the mean of the metric score,\n the std of the metric score.\n ' return (np.mean(self.per_episode_scores), np.std(self.per_episode_scores)) def reset(self): '\n resets the metric calculation of episodes.\n\n :return:\n ' self.per_episode_scores = [] self.total_number_of_episodes = 0
class MeanLastFractionalSuccess(BaseMetric): def __init__(self): '\n The MeanLastFractionalSuccess to be used to calculate the mean\n last fractional success over all episodes processed.\n ' super(MeanLastFractionalSuccess, self).__init__(name='last_fractional_success') self.per_episode_scores = [] self.total_number_of_episodes = 0 return def process_episode(self, episode_obj): '\n\n Processes the episode to calculate the scores out of it.\n\n :param episode_obj: (causal_world.loggers.Episode) episode to process\n and calculate its metric.\n :return:\n ' self.total_number_of_episodes += 1 self.per_episode_scores.append(episode_obj.infos[(- 1)]['fractional_success']) def get_metric_score(self): '\n\n :return: (tuple) the mean of the metric score,\n the std of the metric score.\n ' return (np.mean(self.per_episode_scores), np.std(self.per_episode_scores)) def reset(self): '\n resets the metric calculation of episodes.\n\n :return:\n ' self.per_episode_scores = [] self.total_number_of_episodes = 0
class MeanLastIntegratedFractionalSuccess(BaseMetric): def __init__(self): '\n The MeanLastIntegratedFractionalSuccess to be used to calculate the mean\n over last 20 fractional successes over all episodes processed.\n ' super(MeanLastIntegratedFractionalSuccess, self).__init__(name='last_integrated_fractional_success') self.per_episode_scores = [] self.total_number_of_episodes = 0 return def process_episode(self, episode_obj): '\n\n Processes the episode to calculate the scores out of it.\n\n :param episode_obj: (causal_world.loggers.Episode) episode to process\n and calculate its metric.\n :return:\n ' self.total_number_of_episodes += 1 in_episode_accumulated_success = 0.0 for index in range(20): in_episode_accumulated_success += episode_obj.infos[(- index)]['fractional_success'] self.per_episode_scores.append((in_episode_accumulated_success / 20)) def get_metric_score(self): '\n\n :return: (tuple) the mean of the metric score,\n the std of the metric score.\n ' return (np.mean(self.per_episode_scores), np.std(self.per_episode_scores)) def reset(self): '\n resets the metric calculation of episodes.\n\n :return:\n ' self.per_episode_scores = [] self.total_number_of_episodes = 0
class BaseMetric(object): def __init__(self, name): '\n The metric base to be used for any metric to calculate over the\n episodes evaluated.\n\n :param name: (str) metric name.\n ' self.name = name return def process_episode(self, episode_obj): '\n Processes the episode to calculate the scores out of it.\n\n :param episode_obj: (causal_world.loggers.Episode) episode to process\n and calculate its metric.\n :return:\n ' raise Exception('not implemendted yet') def get_metric_score(self): '\n\n :return: (float) the metric score calculated so far.\n ' raise Exception('not implemendted yet') def reset(self): '\n resets the metric calculation of episodes.\n\n :return:\n '
class TransferReal(object): def __init__(self, env): '\n This wrapper makes the environment to execute actions on the real robot\n instead, to be used when performing sim2real experiments.\n\n :param env: (causal_world.CausalWorld) the environment to convert.\n ' self._env = env self._real_robot = robot_fingers.Robot(robot_interfaces.trifinger, robot_fingers.create_trifinger_backend, 'trifinger.yml') self._real_robot.initialize() self._frontend = self._real_robot.frontend self._repetitions = (1000.0 / (self._env._simulation_time / self._env._skip_frame)) def step(self, action): '\n Used to step through the real robot.\n\n :param action: (nd.array) specifies which action should be taken by\n the robot.\n\n :return: (nd.array) specifies the observations returned after stepping\n through the robot.\n ' for i in range(self._repetitions): t = self._frontend.append_desired_action(robot_interfaces.trifinger.Action(position=action)) self._frontend.wait_until_time_index(t) current_position = self._frontend.get_observation(t).position current_velocity = self._frontend.get_observation(t).velocity current_torque = self._frontend.get_observation(t).torque obs = np.array([current_position, current_velocity, current_torque]) return obs.flatten() def reset(self): '\n Resets the real robot to the current starting state of the environment.\n\n :return: (nd.array) specifies the observations returned after resetting\n the robot.\n ' raise Exception('Not implemented yet')
class RealisticRobotWrapper(gym.Wrapper): def __init__(self, env): '\n This wrapper makes the simulated environment close to the real robot.\n\n :param env: (causal_world.CausalWorld) the environment to make realistic.\n ' super(RealisticRobotWrapper, self).__init__(env) self.env.set_starting_state({'robot_finger_60_link_0': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_60_link_1': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_60_link_2': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_60_link_3': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_120_link_0': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_120_link_1': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_120_link_2': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_120_link_3': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_300_link_0': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_300_link_1': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_300_link_2': {'color': [0.2, 0.2, 0.2]}, 'robot_finger_300_link_3': {'color': [0.2, 0.2, 0.2]}}) return
class CreativeStackedBlocksGeneratorTask(BaseTask): def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([]), activate_sparse_reward=False, tool_block_mass=0.08, joint_positions=None, blocks_min_size=0.035, num_of_levels=8, max_level_width=0.12): '\n This task generator generates a task in the family of create stacked\n blocks which generate a random configuration of stacked blocks,\n however only the first level and the last level are shown explicitly,\n the rest is left for the "imagination" of the agent itself.\n\n :param variables_space: (str) space to be used either \'space_a\' or\n \'space_b\' or \'space_a_b\'\n :param fractional_reward_weight: (float) weight multiplied by the\n fractional volumetric\n overlap in the reward.\n :param dense_reward_weights: (list float) specifies the reward weights\n for all the other reward\n terms calculated in the\n calculate_dense_rewards\n function.\n :param activate_sparse_reward: (bool) specified if you want to\n sparsify the reward by having\n +1 or 0 if the volumetric\n fraction overlap more than 90%.\n :param tool_block_mass: (float) specifies the blocks mass.\n :param joint_positions: (nd.array) specifies the joints position to start\n the episode with. None if the default\n to be used.\n :param blocks_min_size: (float) specifies the blocks minimum size/\n side length for the goal shape generator.\n :param num_of_levels: (int) specifies the number of levels to be\n generated.\n :param max_level_width: (float) specifies the maximum width of the\n goal shape.\n ' super().__init__(task_name='creative_stacked_blocks', variables_space=variables_space, fractional_reward_weight=fractional_reward_weight, dense_reward_weights=dense_reward_weights, activate_sparse_reward=activate_sparse_reward) self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions'] self._task_params['tool_block_mass'] = tool_block_mass self._task_params['joint_positions'] = joint_positions self._task_params['blocks_min_size'] = blocks_min_size self._task_params['num_of_levels'] = num_of_levels self._task_params['max_level_width'] = max_level_width self.current_stack_levels = self._task_params['num_of_levels'] self.current_blocks_mass = self._task_params['tool_block_mass'] self.current_blocks_min_size = self._task_params['blocks_min_size'] self.current_max_level_width = self._task_params['max_level_width'] self.current_number_of_obstacles = 0 def get_description(self): '\n\n :return: (str) returns the description of the task itself.\n ' return 'Task where the goal is to stack arbitrary shapes of cuboids' def _set_up_stage_arena(self): '\n Sets up the stage arena.\n\n :return:\n ' number_of_blocks_per_level = int((self._task_params['max_level_width'] / self._task_params['blocks_min_size'])) default_start_position = ((- (number_of_blocks_per_level * self._task_params['blocks_min_size'])) / 2) default_start_position += (self._task_params['blocks_min_size'] / 2) curr_height = (0 - (self._task_params['blocks_min_size'] / 2)) change_per_level = 0.005 rigid_block_side = 0.1 silhouettes_creation_dicts = [] for level in range(self._task_params['num_of_levels']): change_per_level *= (- 1) curr_height += self._task_params['blocks_min_size'] start_position = (default_start_position + change_per_level) rigid_block_side *= (- 1) for i in range(number_of_blocks_per_level): creation_dict = {'name': (((('tool_' + 'level_') + str(level)) + '_num_') + str(i)), 'shape': 'cube', 'initial_position': [start_position, rigid_block_side, curr_height], 'initial_orientation': [0, 0, 0, 1], 'size': np.repeat(self._task_params['blocks_min_size'], 3), 'mass': self._task_params['tool_block_mass']} self._stage.add_rigid_general_object(**creation_dict) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_orientation')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_linear_velocity')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_angular_velocity')) if (level in [0, (self._task_params['num_of_levels'] - 1)]): creation_dict = {'name': (((('goal_' + 'level_') + str(level)) + '_num_') + str(i)), 'shape': 'cube', 'position': [start_position, 0, curr_height], 'orientation': [0, 0, 0, 1], 'size': np.repeat(self._task_params['blocks_min_size'], 3)} silhouettes_creation_dicts.append(copy.deepcopy(creation_dict)) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_orientation')) start_position += self._task_params['blocks_min_size'] for i in range(len(silhouettes_creation_dicts)): self._stage.add_silhoutte_general_object(**silhouettes_creation_dicts[i]) return def sample_new_goal(self, level=None): '\n Used to sample new goal from the corresponding shape families.\n\n :param level: (int) specifying the level - not used for now.\n\n :return: (dict) the corresponding interventions dict that could then\n be applied to get a new sampled goal.\n ' intervention_dict = dict() if (self._task_params['variables_space'] == 'space_a'): intervention_space = self._intervention_space_a elif (self._task_params['variables_space'] == 'space_b'): intervention_space = self._intervention_space_b elif (self._task_params['variables_space'] == 'space_a_b'): intervention_space = self._intervention_space_a_b intervention_dict['stack_levels'] = np.random.uniform(intervention_space['stack_levels'][0], intervention_space['stack_levels'][1]) intervention_dict['blocks_mass'] = np.random.uniform(intervention_space['blocks_mass'][0], intervention_space['blocks_mass'][1]) intervention_dict['blocks_min_size'] = np.random.uniform(intervention_space['blocks_min_size'][0], intervention_space['blocks_min_size'][1]) intervention_dict['max_level_width'] = np.random.uniform(intervention_space['max_level_width'][0], intervention_space['max_level_width'][1]) return intervention_dict def get_task_generator_variables_values(self): '\n\n :return: (dict) specifying the variables belonging to the task itself.\n ' return {'stack_levels': self.current_stack_levels, 'blocks_mass': self.current_blocks_mass, 'blocks_min_size': self.current_blocks_min_size, 'max_level_width': self.current_max_level_width, 'number_of_obstacles': self.current_number_of_obstacles} def apply_task_generator_interventions(self, interventions_dict): '\n\n :param interventions_dict: (dict) variables and their corresponding\n intervention value.\n\n :return: (tuple) first position if the intervention was successful or\n not, and second position indicates if\n observation_space needs to be reset.\n ' if (len(interventions_dict) == 0): return (True, False) reset_observation_space = True if ('number_of_obstacles' in interventions_dict): if (int(interventions_dict['number_of_obstacles']) > self.current_number_of_obstacles): for i in range(self.current_number_of_obstacles, int(interventions_dict['number_of_obstacles'])): self._stage.add_rigid_general_object(name=('obstacle_' + str(i)), shape='static_cube', size=np.array([0.01, 0.01, 0.01]), color=np.array([0, 0, 0]), position=np.random.uniform(self._stage.get_arena_bb()[0], self._stage.get_arena_bb()[1])) self.current_number_of_obstacles += 1 self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_type')) self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_size')) self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_orientation')) if (len(interventions_dict) == 1): return (True, True) if ('max_level_width' in interventions_dict): self.current_max_level_width = interventions_dict['max_level_width'] if ('blocks_min_size' in interventions_dict): self.current_blocks_min_size = interventions_dict['blocks_min_size'] if ('stack_levels' in interventions_dict): self.current_stack_levels = interventions_dict['stack_levels'] if ('blocks_mass' in interventions_dict): self.current_blocks_mass = interventions_dict['blocks_mass'] if (('max_level_width' in interventions_dict) or ('blocks_min_size' in interventions_dict) or ('stack_levels' in interventions_dict)): self._create_new_challenge(num_of_levels=int(self.current_stack_levels), blocks_min_size=self.current_blocks_min_size, blocks_mass=self.current_blocks_mass, max_level_width=self.current_max_level_width) elif ('blocks_mass' in interventions_dict): new_interventions_dict = dict() for rigid_object in self._stage.get_rigid_objects(): if self._stage.get_rigid_objects()[rigid_object].is_not_fixed(): new_interventions_dict[rigid_object] = dict() new_interventions_dict[rigid_object]['mass'] = self.current_blocks_mass self._stage.apply_interventions(new_interventions_dict) else: raise Exception('this task generator variable is not yet defined') self._set_intervention_space_b() self._set_intervention_space_a() self._set_intervention_space_a_b() self._stage.finalize_stage() return (True, reset_observation_space) def _set_intervention_space_a(self): '\n Used to the set space A limits for all the variables.\n\n :return:\n ' super(CreativeStackedBlocksGeneratorTask, self)._set_intervention_space_a() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_a[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_a[rigid_object]['size'] self._intervention_space_a['stack_levels'] = np.array([3, 5]) self._intervention_space_a['blocks_mass'] = np.array([0.02, 0.06]) self._intervention_space_a['blocks_min_size'] = np.array([0.035, 0.065]) self._intervention_space_a['max_level_width'] = np.array([0.035, 0.12]) self._intervention_space_a['number_of_obstacles'] = np.array([1, 5]) return def _set_intervention_space_b(self): '\n Used to the set space B limits for all the variables.\n\n :return:\n ' super(CreativeStackedBlocksGeneratorTask, self)._set_intervention_space_b() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_b[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_b[rigid_object]['size'] self._intervention_space_b['stack_levels'] = np.array([6, 8]) self._intervention_space_b['blocks_mass'] = np.array([0.06, 0.08]) self._intervention_space_b['blocks_min_size'] = np.array([0.065, 0.075]) self._intervention_space_b['max_level_width'] = np.array([0.12, 0.15]) self._intervention_space_b['number_of_obstacles'] = np.array([1, 5]) return def _create_new_challenge(self, num_of_levels, blocks_min_size, blocks_mass, max_level_width): '\n\n :param num_of_levels: (int) specifies the number of levels to be\n generated.\n :param blocks_min_size: (float) specifies the blocks minimum size/\n side length for the goal shape generator.\n :param blocks_mass: (float) specifies the blocks mass.\n :param max_level_width: float) specifies the maximum width of the\n goal shape.\n :return:\n ' self.current_number_of_obstacles = 0 self._stage.remove_everything() self._task_stage_observation_keys = [] silhouettes_creation_dicts = [] (block_sizes, positions, chosen_y) = self._generate_random_target(num_of_levels=num_of_levels, min_size=blocks_min_size, max_level_width=max_level_width) for level_num in range(len(block_sizes)): for i in range(len(block_sizes[level_num])): creation_dict = {'name': (((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)), 'shape': 'cube', 'mass': blocks_mass, 'color': np.random.uniform(0, 1, size=[3]), 'size': block_sizes[level_num][i]} self._stage.add_rigid_general_object(**creation_dict) block_position = self._stage.random_position(height_limits=(block_sizes[level_num][i][(- 1)] / 2.0)) block_orientation = euler_to_quaternion([0, 0, np.random.uniform((- np.pi), np.pi)]) self._stage.set_objects_pose(names=[(((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i))], positions=[block_position], orientations=[block_orientation]) trial_index = 0 self._robot.step_simulation() while ((not self._stage.check_feasiblity_of_stage()) and (trial_index < 10)): block_position = self._stage.random_position(height_limits=[(block_sizes[level_num][i][(- 1)] / 2.0), 0.15]) block_orientation = euler_to_quaternion([0, 0, np.random.uniform((- np.pi), np.pi)]) self._stage.set_objects_pose(names=[(((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i))], positions=[block_position], orientations=[block_orientation]) self._robot.step_simulation() trial_index += 1 silhouette_position = [positions[level_num][i], chosen_y, (((level_num + 1) * blocks_min_size) + (((- blocks_min_size) / 2) + 0))] self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_orientation')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_linear_velocity')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_angular_velocity')) if (level_num in [0, (num_of_levels - 1)]): creation_dict = {'name': (((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)), 'shape': 'cube', 'position': np.array(silhouette_position), 'size': np.array(block_sizes[level_num][i])} silhouettes_creation_dicts.append(copy.deepcopy(creation_dict)) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_orientation')) self.current_stack_levels = num_of_levels self.current_blocks_mass = blocks_mass self.current_blocks_min_size = blocks_min_size self.current_max_level_width = max_level_width for i in range(len(silhouettes_creation_dicts)): self._stage.add_silhoutte_general_object(**silhouettes_creation_dicts[i]) return def _generate_random_block(self, allowed_boundaries, start_z, min_size=0.035, max_level_width=0.12): '\n This function will return a random position and size of a block\n while respecting the allowed boundaries passed\n\n :param allowed_boundaries: (list) specifies a bounding box (2, 2)\n :param start_z: (float) z position to start generating the blocks from.\n :param min_size:\n\n :return:\n ' allowed_boundaries[0][0] = max((self._stage.get_arena_bb()[0][0] + min_size), allowed_boundaries[0][0]) allowed_boundaries[1][0] = min((self._stage.get_arena_bb()[1][0] - min_size), allowed_boundaries[1][0]) allowed_boundaries[0][1] = max((self._stage.get_arena_bb()[0][1] + min_size), allowed_boundaries[0][1]) allowed_boundaries[1][1] = min((self._stage.get_arena_bb()[1][1] - min_size), allowed_boundaries[1][1]) position_x_y = np.random.uniform(allowed_boundaries[0][:2], allowed_boundaries[1][:2]) allowed_max_width = (min((self._stage.get_arena_bb()[1][0] - position_x_y[0]), (position_x_y[0] - self._stage.get_arena_bb()[0][0])) * 2) allowed_max_width = min(allowed_max_width, max_level_width) size = np.random.uniform(min_size, [allowed_max_width, min_size, min_size]) position_z = (start_z + (size[(- 1)] / 2)) position = np.array([position_x_y[0], position_x_y[1], position_z]) return (size, position) def _generate_random_target(self, num_of_levels=4, min_size=0.035, max_level_width=0.12): '\n This function generated a sampled target, should be modified to new\n sample goal\n\n :param num_of_levels: (int) specifies the number of levels to be\n generated.\n :param min_size: (float) specifies the blocks minimum size/\n side length for the goal shape generator.\n :param max_level_width: (float) specifies the maximum width of the\n goal shape.\n\n :return: (tuple): block_sizes, positions, chosen_y\n ' level_blocks = [] current_boundaries = np.array([self._stage.get_arena_bb()[0][:2], self._stage.get_arena_bb()[1][:2]]) start_z = 0 (size, position) = self._generate_random_block(allowed_boundaries=current_boundaries, start_z=start_z, min_size=min_size, max_level_width=max_level_width) level_blocks.append([[size[0], position[0]]]) for level_index in range(1, num_of_levels): start_z = (start_z + size[(- 1)]) new_allowed_boundaries = [(position[:2] - (size[:2] / 2)), (position[:2] + (size[:2] / 2))] current_boundaries = [np.maximum(current_boundaries[0], new_allowed_boundaries[0]), np.minimum(current_boundaries[1], new_allowed_boundaries[1])] (size, position) = self._generate_random_block(allowed_boundaries=current_boundaries, start_z=start_z, min_size=min_size, max_level_width=max_level_width) level_blocks.append([[size[0], position[0]]]) chosen_y = position[1] new_level_blocks = self._generate_blocks_to_use(level_blocks, min_size=min_size) new_level_blocks = self._generate_blocks_to_use(new_level_blocks, min_size=min_size) new_level_blocks = self._generate_blocks_to_use(new_level_blocks, min_size=min_size) (block_sizes, positions) = self._get_block_sizes(new_level_blocks, min_size) return (block_sizes, positions, chosen_y) def _generate_blocks_to_use(self, level_blocks, min_size): '\n\n :param level_blocks:\n :param min_size:\n\n :return:\n ' new_level_blocks = list(level_blocks) for i in range(len(level_blocks)): current_level_blocks = level_blocks[i] for j in range(len(current_level_blocks)): if (current_level_blocks[j][0] > (min_size * 2)): block_1_center = (current_level_blocks[j][1] + (current_level_blocks[j][0] / 4)) block_1_size = (current_level_blocks[j][0] / 2) block_2_size = (current_level_blocks[j][0] - block_1_size) block_2_center = ((block_1_center - (block_1_size / 2)) - (block_2_size / 2)) stability_levels_check = copy.deepcopy(new_level_blocks[:(i + 1)]) stability_levels_check[i][j] = [block_1_size, block_1_center] stability_levels_check[i].append([block_2_size, block_2_center]) if self._is_stable_structure(stability_levels_check): new_level_blocks[:(i + 1)] = stability_levels_check return new_level_blocks def _is_stable_structure(self, level_blocks): '\n\n :param level_blocks:\n\n :return:\n ' current_min = (- 0.5) current_max = 0.5 for i in range(len(level_blocks)): current_level_blocks = level_blocks[i] new_min = 0.5 new_max = (- 0.5) for block in current_level_blocks: new_min = min(new_min, (block[1] - (block[0] / 2))) new_max = max(new_max, (block[1] + (block[0] / 2))) if ((block[1] > current_max) or (block[1] < current_min)): return False current_min = new_min current_max = new_max return True def _get_block_sizes(self, level_blocks, min_size): '\n\n :param level_blocks:\n :param min_size:\n\n :return:\n ' block_sizes = [] positions = [] for i in range(len(level_blocks)): block_sizes.append([]) positions.append([]) current_level_blocks = level_blocks[i] for block in current_level_blocks: block_sizes[(- 1)].append([block[0], min_size, min_size]) positions[(- 1)].append(block[1]) return (block_sizes, positions)
class GeneralGeneratorTask(BaseTask): def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([]), activate_sparse_reward=False, tool_block_mass=0.08, joint_positions=None, tool_block_size=0.05, nums_objects=5): "\n This task generator generates a general/ random configuration of the\n blocks by dropping random blocks from the air and waiting till it comes\n to a rest position and then this becomes the new shape/goal that the\n actor needs to achieve.\n\n :param variables_space: (str) space to be used either 'space_a' or\n 'space_b' or 'space_a_b'\n :param fractional_reward_weight: (float) weight multiplied by the\n fractional volumetric\n overlap in the reward.\n :param dense_reward_weights: (list float) specifies the reward weights\n for all the other reward\n terms calculated in the\n calculate_dense_rewards\n function.\n :param activate_sparse_reward: (bool) specified if you want to\n sparsify the reward by having\n +1 or 0 if the volumetric\n fraction overlap more than 90%.\n :param tool_block_mass: (float) specifies the blocks mass.\n :param joint_positions: (nd.array) specifies the joints position to start\n the episode with. None if the default\n to be used.\n :param tool_block_size: (float) specifies the blocks size.\n :param nums_objects: (int) specifies the number of objects to be dropped\n from the air.\n " super().__init__(task_name='general', variables_space=variables_space, fractional_reward_weight=fractional_reward_weight, dense_reward_weights=dense_reward_weights, activate_sparse_reward=activate_sparse_reward) self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions'] self._task_params['tool_block_mass'] = tool_block_mass self._task_params['joint_positions'] = joint_positions self._task_params['nums_objects'] = nums_objects self._task_params['tool_block_size'] = tool_block_size self.default_drop_positions = [[0.1, 0.1, 0.2], [0, 0, 0.2], [0.05, 0.05, 0.3], [(- 0.05), (- 0.05), 0.1], [(- 0.12), (- 0.12), 0.2], [(- 0.12), 0.12, 0.2], [0.12, (- 0.1), 0.3], [0.09, (- 0.08), 0.1]] self.tool_mass = self._task_params['tool_block_mass'] self.nums_objects = self._task_params['nums_objects'] self.tool_block_size = np.array(self._task_params['tool_block_size']) def get_description(self): '\n\n :return: (str) returns the description of the task itself.\n ' return 'Task where the goal is to rearrange available objects into a target configuration' def _set_up_stage_arena(self): '\n\n :return:\n ' self._generate_goal_configuration_with_objects(default_bool=True) return def _set_intervention_space_a(self): '\n\n :return:\n ' super(GeneralGeneratorTask, self)._set_intervention_space_a() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_a[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_a[rigid_object]['size'] self._intervention_space_a['nums_objects'] = np.array([1, 5]) self._intervention_space_a['blocks_mass'] = np.array([0.02, 0.06]) self._intervention_space_a['tool_block_size'] = np.array([0.05, 0.07]) return def _set_intervention_space_b(self): '\n\n :return:\n ' super(GeneralGeneratorTask, self)._set_intervention_space_b() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_b[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_b[rigid_object]['size'] self._intervention_space_b['nums_objects'] = np.array([6, 9]) self._intervention_space_b['blocks_mass'] = np.array([0.06, 0.08]) self._intervention_space_b['tool_block_size'] = np.array([0.04, 0.05]) return def sample_new_goal(self, level=None): '\n Used to sample new goal from the corresponding shape families.\n\n :param level: (int) specifying the level - not used for now.\n\n :return: (dict) the corresponding interventions dict that could then\n be applied to get a new sampled goal.\n ' intervention_dict = dict() if (self._task_params['variables_space'] == 'space_a'): intervention_space = self._intervention_space_a elif (self._task_params['variables_space'] == 'space_b'): intervention_space = self._intervention_space_b elif (self._task_params['variables_space'] == 'space_a_b'): intervention_space = self._intervention_space_a_b intervention_dict['nums_objects'] = np.random.randint(intervention_space['nums_objects'][0], intervention_space['nums_objects'][1]) intervention_dict['blocks_mass'] = np.random.uniform(intervention_space['blocks_mass'][0], intervention_space['blocks_mass'][1]) intervention_dict['tool_block_size'] = np.random.uniform(intervention_space['tool_block_size'][0], intervention_space['tool_block_size'][1]) return intervention_dict def get_task_generator_variables_values(self): '\n\n :return: (dict) specifying the variables belonging to the task itself.\n ' return {'nums_objects': self.nums_objects, 'blocks_mass': self.tool_mass, 'tool_block_size': self.tool_block_size} def apply_task_generator_interventions(self, interventions_dict): '\n\n :param interventions_dict: (dict) variables and their corresponding\n intervention value.\n\n :return: (tuple) first position if the intervention was successful or\n not, and second position indicates if\n observation_space needs to be reset.\n ' if (len(interventions_dict) == 0): return (True, False) reset_observation_space = True if ('nums_objects' in interventions_dict): self.nums_objects = interventions_dict['nums_objects'] if ('tool_block_size' in interventions_dict): self.tool_block_size = interventions_dict['tool_block_size'] if ('blocks_mass' in interventions_dict): self.tool_mass = interventions_dict['blocks_mass'] if (('nums_objects' in interventions_dict) or ('tool_block_size' in interventions_dict)): self._generate_goal_configuration_with_objects(default_bool=False) elif ('blocks_mass' in interventions_dict): new_interventions_dict = dict() for rigid_object in self._stage.get_rigid_objects(): if self._stage.get_rigid_objects()[rigid_object].is_not_fixed(): new_interventions_dict[rigid_object] = dict() new_interventions_dict[rigid_object]['mass'] = self.tool_mass self._stage.apply_interventions(new_interventions_dict) else: raise Exception('this task generator variable is not yet defined') self._set_intervention_space_b() self._set_intervention_space_a() self._set_intervention_space_a_b() self._stage.finalize_stage() return (True, reset_observation_space) def _generate_goal_configuration_with_objects(self, default_bool): '\n\n :param default_bool:\n\n :return:\n ' self._stage.remove_everything() stage_low_bound = np.array(self._stage.get_arena_bb()[0]) stage_low_bound[:2] += 0.04 stage_upper_bound = np.array(self._stage.get_arena_bb()[1]) stage_upper_bound[:2] -= 0.04 stage_upper_bound[2] -= 0.08 self._task_stage_observation_keys = [] joint_positions = self._robot.get_joint_positions_raised() self._robot.reset_state(joint_positions=joint_positions, joint_velocities=np.zeros(9)) for object_num in range(self.nums_objects): if default_bool: dropping_position = self.default_drop_positions[(object_num % len(self.default_drop_positions))] dropping_orientation = [0, 0, 0, 1] else: dropping_position = np.random.uniform(stage_low_bound, stage_upper_bound) dropping_orientation = euler_to_quaternion(np.random.uniform(low=0, high=(2 * math.pi), size=3)) creation_dict = {'name': ('tool_' + str(object_num)), 'shape': 'cube', 'initial_position': dropping_position, 'initial_orientation': dropping_orientation, 'mass': self.tool_mass, 'size': np.repeat(self.tool_block_size, 3)} self._stage.add_rigid_general_object(**creation_dict) self._task_stage_observation_keys.append((('tool_' + str(object_num)) + '_type')) self._task_stage_observation_keys.append((('tool_' + str(object_num)) + '_size')) self._task_stage_observation_keys.append((('tool_' + str(object_num)) + '_cartesian_position')) self._task_stage_observation_keys.append((('tool_' + str(object_num)) + '_orientation')) self._task_stage_observation_keys.append((('tool_' + str(object_num)) + '_linear_velocity')) self._task_stage_observation_keys.append((('tool_' + str(object_num)) + '_angular_velocity')) self._robot.forward_simulation(time=0.8) for rigid_object in self._stage._rigid_objects: creation_dict = {'name': rigid_object.replace('tool', 'goal'), 'shape': 'cube', 'position': self._stage.get_object_state(rigid_object, 'cartesian_position'), 'orientation': self._stage.get_object_state(rigid_object, 'orientation'), 'size': np.repeat(self.tool_block_size, 3)} self._stage.add_silhoutte_general_object(**creation_dict) self._task_stage_observation_keys.append((rigid_object.replace('tool', 'goal') + '_type')) self._task_stage_observation_keys.append((rigid_object.replace('tool', 'goal') + '_size')) self._task_stage_observation_keys.append((rigid_object.replace('tool', 'goal') + '_cartesian_position')) self._task_stage_observation_keys.append((rigid_object.replace('tool', 'goal') + '_orientation')) trial_index = 1 block_position = self._stage.random_position(height_limits=[(self.tool_block_size / 2.0), 0.15]) block_orientation = euler_to_quaternion([0, 0, np.random.uniform((- np.pi), np.pi)]) self._stage.set_objects_pose(names=[rigid_object], positions=[block_position], orientations=[block_orientation]) self._robot.step_simulation() while ((not self._stage.check_feasiblity_of_stage()) and (trial_index < 10)): block_position = self._stage.random_position(height_limits=[(self.tool_block_size / 2.0), 0.15]) block_orientation = euler_to_quaternion([0, 0, np.random.uniform((- np.pi), np.pi)]) self._stage.set_objects_pose(names=[rigid_object], positions=[block_position], orientations=[block_orientation]) self._robot.step_simulation() trial_index += 1 self._robot.reset_state(joint_positions=joint_positions, joint_velocities=np.zeros([9])) self._robot.update_latest_full_state() return
class PickingTaskGenerator(BaseTask): def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), activate_sparse_reward=False, tool_block_mass=0.02, joint_positions=None, tool_block_position=np.array([0, 0, 0.0325]), tool_block_orientation=np.array([0, 0, 0, 1]), goal_height=0.15): "\n This task generates a task for picking an object in the air.\n\n :param variables_space: (str) space to be used either 'space_a' or\n 'space_b' or 'space_a_b'\n :param fractional_reward_weight: (float) weight multiplied by the\n fractional volumetric\n overlap in the reward.\n :param dense_reward_weights: (list float) specifies the reward weights\n for all the other reward\n terms calculated in the\n calculate_dense_rewards\n function.\n :param activate_sparse_reward: (bool) specified if you want to\n sparsify the reward by having\n +1 or 0 if the volumetric\n fraction overlap more than 90%.\n :param tool_block_mass: (float) specifies the blocks mass.\n :param joint_positions: (nd.array) specifies the joints position to start\n the episode with. None if the default\n to be used.\n :param tool_block_position: (nd.array) specifies the cartesian position\n of the tool block, x, y, z.\n :param tool_block_orientation: (nd.array) specifies the euler orientation\n of the tool block, yaw, roll, pitch.\n :param goal_height: (float) specifies the goal height that needs to be\n reached.\n " super().__init__(task_name='picking', variables_space=variables_space, fractional_reward_weight=fractional_reward_weight, dense_reward_weights=dense_reward_weights, activate_sparse_reward=activate_sparse_reward) self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions'] self._task_params['goal_height'] = goal_height self._task_params['tool_block_mass'] = tool_block_mass self._task_params['joint_positions'] = joint_positions self._task_params['tool_block_position'] = tool_block_position self._task_params['tool_block_orientation'] = tool_block_orientation self.previous_object_position = None self.previous_end_effector_positions = None self.previous_joint_velocities = None def get_description(self): '\n\n :return: (str) returns the description of the task itself.\n ' return 'Task where the goal is to pick a cube towards a goal height' def _set_up_stage_arena(self): '\n\n :return:\n ' creation_dict = {'name': 'tool_block', 'shape': 'cube', 'initial_position': self._task_params['tool_block_position'], 'initial_orientation': self._task_params['tool_block_orientation'], 'mass': self._task_params['tool_block_mass']} self._stage.add_rigid_general_object(**creation_dict) goal_block_position = np.array(self._task_params['tool_block_position']) goal_block_position[(- 1)] = self._task_params['goal_height'] creation_dict = {'name': 'goal_block', 'shape': 'cube', 'position': goal_block_position, 'orientation': self._task_params['tool_block_orientation']} self._stage.add_silhoutte_general_object(**creation_dict) self._task_stage_observation_keys = ['tool_block_type', 'tool_block_size', 'tool_block_cartesian_position', 'tool_block_orientation', 'tool_block_linear_velocity', 'tool_block_angular_velocity', 'goal_block_type', 'goal_block_size', 'goal_block_cartesian_position', 'goal_block_orientation'] return def _set_intervention_space_a(self): '\n\n :return:\n ' super(PickingTaskGenerator, self)._set_intervention_space_a() for visual_object in self._stage.get_visual_objects(): self._intervention_space_a[visual_object]['cylindrical_position'][0][(- 1)] = 0.08 self._intervention_space_a[visual_object]['cylindrical_position'][1][(- 1)] = 0.2 return def _set_intervention_space_b(self): '\n\n :return:\n ' super(PickingTaskGenerator, self)._set_intervention_space_b() for visual_object in self._stage.get_visual_objects(): self._intervention_space_b[visual_object]['cylindrical_position'][0][(- 1)] = 0.2 self._intervention_space_b[visual_object]['cylindrical_position'][1][(- 1)] = 0.25 return def _calculate_dense_rewards(self, desired_goal, achieved_goal): '\n\n :param desired_goal:\n :param achieved_goal:\n\n :return:\n ' rewards = list() block_position = self._stage.get_object_state('tool_block', 'cartesian_position') target_height = self._stage.get_object_state('goal_block', 'cartesian_position')[(- 1)] joint_velocities = self._robot.get_latest_full_state()['velocities'] previous_block_to_goal = abs((self.previous_object_position[2] - target_height)) current_block_to_goal = abs((block_position[2] - target_height)) rewards.append((previous_block_to_goal - current_block_to_goal)) rewards.append((- current_block_to_goal)) previous_block_to_center = np.sqrt(((self.previous_object_position[0] ** 2) + (self.previous_object_position[1] ** 2))) current_block_to_center = np.sqrt(((block_position[0] ** 2) + (block_position[1] ** 2))) rewards.append((previous_block_to_center - current_block_to_center)) rewards.append((- current_block_to_center)) end_effector_positions = self._robot.get_latest_full_state()['end_effector_positions'] end_effector_positions = end_effector_positions.reshape((- 1), 3) current_distance_from_block = np.linalg.norm((end_effector_positions - block_position)) previous_distance_from_block = np.linalg.norm((self.previous_end_effector_positions - self.previous_object_position)) rewards.append((previous_distance_from_block - current_distance_from_block)) rewards.append((- current_distance_from_block)) object_size = self._stage.get_object_state('tool_block', 'size') dist_outside_bounding_ellipsoid = np.copy(np.abs((end_effector_positions - block_position))) dist_outside_bounding_ellipsoid[(dist_outside_bounding_ellipsoid < object_size)] = 0 dist_outside_bounding_ellipsoid = np.mean(dist_outside_bounding_ellipsoid, axis=1) dist_outside_bounding_ellipsoid.sort() rewards.append((- np.sum(dist_outside_bounding_ellipsoid[:2]))) rewards.append((- np.linalg.norm((joint_velocities - self.previous_joint_velocities)))) update_task_info = {'current_end_effector_positions': end_effector_positions, 'current_tool_block_position': block_position, 'current_velocity': joint_velocities} return (rewards, update_task_info) def _update_task_state(self, update_task_info): '\n\n :param update_task_info:\n\n :return:\n ' self.previous_end_effector_positions = update_task_info['current_end_effector_positions'] self.previous_object_position = update_task_info['current_tool_block_position'] self.previous_joint_velocities = update_task_info['current_velocity'] return def _set_task_state(self): '\n\n :return:\n ' self.previous_end_effector_positions = self._robot.get_latest_full_state()['end_effector_positions'] self.previous_end_effector_positions = self.previous_end_effector_positions.reshape((- 1), 3) self.previous_object_position = self._stage.get_object_state('tool_block', 'cartesian_position') self.previous_joint_velocities = self._robot.get_latest_full_state()['velocities'] return def _handle_contradictory_interventions(self, interventions_dict): '\n\n :param interventions_dict:\n\n :return:\n ' if ('goal_block' in interventions_dict): if ('size' in interventions_dict['goal_block']): if ('tool_block' not in interventions_dict): interventions_dict['tool_block'] = dict() interventions_dict['tool_block']['size'] = interventions_dict['goal_block']['size'] if ('cylindrical_position' in interventions_dict['goal_block']): interventions_dict['goal_block']['cylindrical_position'][0] = 0 interventions_dict['goal_block']['cylindrical_position'][1] = 0 elif ('tool_block' in interventions_dict): if ('size' in interventions_dict['tool_block']): if ('goal_block' not in interventions_dict): interventions_dict['goal_block'] = dict() interventions_dict['goal_block']['size'] = interventions_dict['tool_block']['size'] return interventions_dict def sample_new_goal(self, level=None): '\n Used to sample new goal from the corresponding shape families.\n\n :param level: (int) specifying the level - not used for now.\n\n :return: (dict) the corresponding interventions dict that could then\n be applied to get a new sampled goal.\n ' intervention_dict = dict() intervention_dict['goal_block'] = dict() if (self._task_params['variables_space'] == 'space_a'): intervention_space = self._intervention_space_a elif (self._task_params['variables_space'] == 'space_b'): intervention_space = self._intervention_space_b elif (self._task_params['variables_space'] == 'space_a_b'): intervention_space = self._intervention_space_a_b intervention_dict['goal_block']['cylindrical_position'] = np.array([0, 0, np.random.uniform(intervention_space['goal_block']['cylindrical_position'][0][(- 1)], intervention_space['goal_block']['cylindrical_position'][1][(- 1)])]) return intervention_dict def _adjust_variable_spaces_after_intervention(self, interventions_dict): spaces = [self._intervention_space_a, self._intervention_space_b, self._intervention_space_a_b] if ('tool_block' in interventions_dict): if ('size' in interventions_dict['tool_block']): for variable_space in spaces: variable_space['tool_block']['cylindrical_position'][0][(- 1)] = (self._stage.get_object_state('tool_block', 'size')[(- 1)] / 2.0) variable_space['goal_block']['cylindrical_position'][0][(- 1)] = (self._stage.get_object_state('goal_block', 'size')[(- 1)] / 2.0) return
class ReachingTaskGenerator(BaseTask): def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([100000, 0, 0, 0]), default_goal_60=np.array([0, 0, 0.1]), default_goal_120=np.array([0, 0, 0.13]), default_goal_300=np.array([0, 0, 0.16]), joint_positions=None, activate_sparse_reward=False): "\n This task generator will generate a task for reaching.\n\n :param variables_space: (str) space to be used either 'space_a' or\n 'space_b' or 'space_a_b'\n :param fractional_reward_weight: (float) weight multiplied by the\n fractional volumetric\n overlap in the reward.\n :param dense_reward_weights: (list float) specifies the reward weights\n for all the other reward\n terms calculated in the\n calculate_dense_rewards\n function.\n :param default_goal_60: (nd.array) the position of the goal for first\n finger, x, y, z.\n :param default_goal_120: (nd.array) the position of the goal for second\n finger, x, y, z.\n :param default_goal_300: (nd.array) the position of the goal for third\n finger, x, y, z.\n :param joint_positions: (nd.array) specifies the joints position to start\n the episode with. None if the default\n to be used.\n :param activate_sparse_reward: (bool) specified if you want to\n sparsify the reward by having\n +1 or 0 if the mean distance\n from goal is < 0.01.\n " super().__init__(task_name='reaching', variables_space=variables_space, fractional_reward_weight=fractional_reward_weight, dense_reward_weights=dense_reward_weights, activate_sparse_reward=activate_sparse_reward) self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions'] self._task_params['default_goal_60'] = default_goal_60 self._task_params['default_goal_120'] = default_goal_120 self._task_params['default_goal_300'] = default_goal_300 self._task_params['joint_positions'] = joint_positions self.previous_end_effector_positions = None self.previous_joint_velocities = None self.current_number_of_obstacles = 0 def _set_up_stage_arena(self): '\n\n :return:\n ' creation_dict = {'name': 'goal_60', 'shape': 'sphere', 'color': np.array([1, 0, 0]), 'position': self._task_params['default_goal_60']} self._stage.add_silhoutte_general_object(**creation_dict) creation_dict = {'name': 'goal_120', 'shape': 'sphere', 'color': np.array([0, 1, 0]), 'position': self._task_params['default_goal_120']} self._stage.add_silhoutte_general_object(**creation_dict) creation_dict = {'name': 'goal_300', 'shape': 'sphere', 'color': np.array([0, 0, 1]), 'position': self._task_params['default_goal_300']} self._stage.add_silhoutte_general_object(**creation_dict) self._task_stage_observation_keys = ['goal_60_cartesian_position', 'goal_120_cartesian_position', 'goal_300_cartesian_position'] return def get_description(self): '\n\n :return: (str) returns the description of the task itself.\n ' return 'Task where the goal is to reach a goal point for each finger' def _calculate_dense_rewards(self, desired_goal, achieved_goal): '\n\n :param desired_goal:\n :param achieved_goal:\n\n :return:\n ' end_effector_positions_goal = desired_goal current_end_effector_positions = achieved_goal previous_dist_to_goal = np.linalg.norm((end_effector_positions_goal - self.previous_end_effector_positions)) current_dist_to_goal = np.linalg.norm((end_effector_positions_goal - current_end_effector_positions)) rewards = list() rewards.append((previous_dist_to_goal - current_dist_to_goal)) rewards.append((- current_dist_to_goal)) rewards.append((- np.linalg.norm(self._robot.get_latest_full_state()['torques']))) rewards.append((- np.linalg.norm(np.abs((self._robot.get_latest_full_state()['velocities'] - self.previous_joint_velocities)), ord=2))) update_task_info = {'current_end_effector_positions': current_end_effector_positions, 'current_velocity': self._robot.get_latest_full_state()['velocities']} return (rewards, update_task_info) def _update_task_state(self, update_task_info): '\n\n :param update_task_info:\n\n :return:\n ' self.previous_end_effector_positions = update_task_info['current_end_effector_positions'] self.previous_joint_velocities = update_task_info['current_velocity'] return def _set_task_state(self): '\n\n :return:\n ' self.previous_end_effector_positions = self._robot.get_latest_full_state()['end_effector_positions'] self.previous_joint_velocities = self._robot.get_latest_full_state()['velocities'] return def get_desired_goal(self): '\n\n :return: (nd.array) specifies the desired goal as array of all three\n positions of the finger goals.\n ' desired_goal = np.array([]) desired_goal = np.append(desired_goal, self._stage.get_object_state('goal_60', 'cartesian_position')) desired_goal = np.append(desired_goal, self._stage.get_object_state('goal_120', 'cartesian_position')) desired_goal = np.append(desired_goal, self._stage.get_object_state('goal_300', 'cartesian_position')) return desired_goal def get_achieved_goal(self): '\n\n :return: (nd.array) specifies the achieved goal as concatenated\n end-effector positions.\n ' achieved_goal = self._robot.get_latest_full_state()['end_effector_positions'] return np.array(achieved_goal) def _goal_reward(self, achieved_goal, desired_goal): '\n\n :param achieved_goal:\n :param desired_goal:\n\n :return:\n ' current_end_effector_positions = achieved_goal current_dist_to_goal = np.abs((desired_goal - current_end_effector_positions)) current_dist_to_goal_mean = np.mean(current_dist_to_goal) return np.array(current_dist_to_goal_mean) def _check_preliminary_success(self, goal_reward): '\n\n :param goal_reward:\n\n :return:\n ' if (goal_reward < 0.01): return True else: return False def _calculate_fractional_success(self, goal_reward): '\n\n :param goal_reward:\n :return:\n ' clipped_distance = np.clip(goal_reward, 0.01, 0.03) distance_from_success = (clipped_distance - 0.01) fractional_success = (1 - (distance_from_success / 0.02)) return fractional_success def get_info(self): '\n\n :return: (dict) returns the info dictionary after every step of the\n environment.\n ' info = dict() info['desired_goal'] = self._current_desired_goal info['achieved_goal'] = self._current_achieved_goal info['success'] = self._task_solved if self._is_ground_truth_state_exposed: info['ground_truth_current_state_variables'] = self.get_current_variable_values() if self._is_partial_solution_exposed: info['possible_solution_intervention'] = dict() info['possible_solution_intervention']['joint_positions'] = self._robot.get_joint_positions_from_tip_positions(self._current_desired_goal, self._robot.get_latest_full_state()['positions']) info['fractional_success'] = self._calculate_fractional_success(self._current_goal_reward) return info def _set_intervention_space_a(self): '\n\n :return:\n ' super(ReachingTaskGenerator, self)._set_intervention_space_a() self._intervention_space_a['number_of_obstacles'] = np.array([1, 5]) return def _set_intervention_space_b(self): '\n\n :return:\n ' super(ReachingTaskGenerator, self)._set_intervention_space_b() self._intervention_space_b['number_of_obstacles'] = np.array([1, 5]) return def get_task_generator_variables_values(self): '\n\n :return: (dict) specifying the variables belonging to the task itself.\n ' task_generator_variables = dict() task_generator_variables['number_of_obstacles'] = self.current_number_of_obstacles return task_generator_variables def apply_task_generator_interventions(self, interventions_dict): '\n\n :param interventions_dict: (dict) variables and their corresponding\n intervention value.\n\n :return: (tuple) first position if the intervention was successful or\n not, and second position indicates if\n observation_space needs to be reset.\n ' if (len(interventions_dict) == 0): return (True, False) reset_observation_space = False if ('number_of_obstacles' in interventions_dict): if (int(interventions_dict['number_of_obstacles']) > self.current_number_of_obstacles): reset_observation_space = True for i in range(self.current_number_of_obstacles, int(interventions_dict['number_of_obstacles'])): self._stage.add_rigid_general_object(name=('obstacle_' + str(i)), shape='static_cube', size=np.array([0.01, 0.01, 0.01]), color=np.array([0, 0, 0]), position=np.random.uniform(WorldConstants.ARENA_BB[0], WorldConstants.ARENA_BB[1])) self.current_number_of_obstacles += 1 self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_type')) self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_size')) self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append((('obstacle_' + str(i)) + '_orientation')) else: return (True, reset_observation_space) else: raise Exception('this task generator variable is not yet defined') self._set_intervention_space_b() self._set_intervention_space_a() self._set_intervention_space_a_b() self._stage.finalize_stage() return (True, reset_observation_space)
class StackedBlocksGeneratorTask(BaseTask): def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([]), activate_sparse_reward=False, tool_block_mass=0.08, joint_positions=None, blocks_min_size=0.035, num_of_levels=5, max_level_width=0.25): "\n This task generator will generate a task for stacking an arbitrary random\n configuration of blocks above each other.\n\n :param variables_space: (str) space to be used either 'space_a' or\n 'space_b' or 'space_a_b'\n :param fractional_reward_weight: (float) weight multiplied by the\n fractional volumetric\n overlap in the reward.\n :param dense_reward_weights: (list float) specifies the reward weights\n for all the other reward\n terms calculated in the\n calculate_dense_rewards\n function.\n :param activate_sparse_reward: (bool) specified if you want to\n sparsify the reward by having\n +1 or 0 if the volumetric\n fraction overlap more than 90%.\n :param tool_block_mass: (float) specifies the blocks mass.\n :param joint_positions: (nd.array) specifies the joints position to start\n the episode with. None if the default\n to be used.\n :param blocks_min_size: (float) specifies the blocks minimum size/\n side length for the goal shape generator.\n :param num_of_levels: (int) specifies the number of levels to be\n generated.\n :param max_level_width: (float) specifies the maximum width of the\n goal shape.\n " super().__init__(task_name='stacked_blocks', variables_space=variables_space, fractional_reward_weight=fractional_reward_weight, dense_reward_weights=dense_reward_weights, activate_sparse_reward=activate_sparse_reward) self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions'] self._task_params['tool_block_mass'] = tool_block_mass self._task_params['joint_positions'] = joint_positions self._task_params['blocks_min_size'] = blocks_min_size self._task_params['num_of_levels'] = num_of_levels self._task_params['max_level_width'] = max_level_width self.current_stack_levels = self._task_params['num_of_levels'] self.current_blocks_mass = self._task_params['tool_block_mass'] self.current_blocks_min_size = self._task_params['blocks_min_size'] self.current_max_level_width = self._task_params['max_level_width'] def get_description(self): '\n\n :return: (str) returns the description of the task itself.\n ' return 'Task where the goal is to stack arbitrary shapes of cuboids' def _set_up_stage_arena(self): '\n\n :return:\n ' silhouettes_creation_dicts = [] number_of_blocks_per_level = int((self._task_params['max_level_width'] / self._task_params['blocks_min_size'])) default_start_position = ((- (number_of_blocks_per_level * self._task_params['blocks_min_size'])) / 2) default_start_position += (self._task_params['blocks_min_size'] / 2) curr_height = (0 - (self._task_params['blocks_min_size'] / 2)) change_per_level = 0.005 rigid_block_side = 0.1 for level in range(self._task_params['num_of_levels']): change_per_level *= (- 1) curr_height += self._task_params['blocks_min_size'] start_position = (default_start_position + change_per_level) rigid_block_side *= (- 1) for i in range(number_of_blocks_per_level): creation_dict = {'name': (((('tool_' + 'level_') + str(level)) + '_num_') + str(i)), 'shape': 'cube', 'initial_position': [start_position, rigid_block_side, curr_height], 'initial_orientation': [0, 0, 0, 1], 'size': np.repeat(self._task_params['blocks_min_size'], 3), 'mass': self._task_params['tool_block_mass']} self._stage.add_rigid_general_object(**creation_dict) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_orientation')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_linear_velocity')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level)) + '_num_') + str(i)) + '_angular_velocity')) creation_dict = {'name': (((('goal_' + 'level_') + str(level)) + '_num_') + str(i)), 'shape': 'cube', 'position': [start_position, 0, curr_height], 'orientation': [0, 0, 0, 1], 'size': np.repeat(self._task_params['blocks_min_size'], 3)} silhouettes_creation_dicts.append(copy.deepcopy(creation_dict)) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level)) + '_num_') + str(i)) + '_orientation')) start_position += self._task_params['blocks_min_size'] for i in range(len(silhouettes_creation_dicts)): self._stage.add_silhoutte_general_object(**silhouettes_creation_dicts[i]) return def sample_new_goal(self, level=None): '\n\n Used to sample new goal from the corresponding shape families.\n\n :param level: (int) specifying the level - not used for now.\n\n :return: (dict) the corresponding interventions dict that could then\n be applied to get a new sampled goal.\n ' intervention_dict = dict() if (self._task_params['variables_space'] == 'space_a'): intervention_space = self._intervention_space_a elif (self._task_params['variables_space'] == 'space_b'): intervention_space = self._intervention_space_b elif (self._task_params['variables_space'] == 'space_a_b'): intervention_space = self._intervention_space_a_b intervention_dict['stack_levels'] = np.random.uniform(intervention_space['stack_levels'][0], intervention_space['stack_levels'][1]) intervention_dict['blocks_mass'] = np.random.uniform(intervention_space['blocks_mass'][0], intervention_space['blocks_mass'][1]) intervention_dict['blocks_min_size'] = np.random.uniform(intervention_space['blocks_min_size'][0], intervention_space['blocks_min_size'][1]) intervention_dict['max_level_width'] = np.random.uniform(intervention_space['max_level_width'][0], intervention_space['max_level_width'][1]) return intervention_dict def get_task_generator_variables_values(self): '\n\n :return: (dict) specifying the variables belonging to the task itself.\n ' return {'stack_levels': self.current_stack_levels, 'blocks_mass': self.current_blocks_mass, 'blocks_min_size': self.current_blocks_min_size, 'max_level_width': self.current_max_level_width} def apply_task_generator_interventions(self, interventions_dict): '\n\n :param interventions_dict: (dict) variables and their corresponding\n intervention value.\n\n :return: (tuple) first position if the intervention was successful or\n not, and second position indicates if\n observation_space needs to be reset.\n ' if (len(interventions_dict) == 0): return (True, False) reset_observation_space = True if ('max_level_width' in interventions_dict): self.current_max_level_width = interventions_dict['max_level_width'] if ('blocks_min_size' in interventions_dict): self.current_blocks_min_size = interventions_dict['blocks_min_size'] if ('stack_levels' in interventions_dict): self.current_stack_levels = interventions_dict['stack_levels'] if ('blocks_mass' in interventions_dict): self.current_blocks_mass = interventions_dict['blocks_mass'] if (('max_level_width' in interventions_dict) or ('blocks_min_size' in interventions_dict) or ('stack_levels' in interventions_dict)): self._create_new_challenge(num_of_levels=int(self.current_stack_levels), blocks_min_size=self.current_blocks_min_size, blocks_mass=self.current_blocks_mass, max_level_width=self.current_max_level_width) elif ('blocks_mass' in interventions_dict): new_interventions_dict = dict() for rigid_object in self._stage._rigid_objects: if self._stage._rigid_objects[rigid_object].is_not_fixed(): new_interventions_dict[rigid_object] = dict() new_interventions_dict[rigid_object]['mass'] = self.current_blocks_mass self._stage.apply_interventions(new_interventions_dict) else: raise Exception('this task generator variable is not yet defined') self._set_intervention_space_b() self._set_intervention_space_a() self._set_intervention_space_a_b() self._stage.finalize_stage() return (True, reset_observation_space) def _set_intervention_space_a(self): '\n\n :return:\n ' super(StackedBlocksGeneratorTask, self)._set_intervention_space_a() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_a[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_a[rigid_object]['size'] self._intervention_space_a['stack_levels'] = np.array([1, 5]) self._intervention_space_a['blocks_mass'] = np.array([0.02, 0.06]) self._intervention_space_a['blocks_min_size'] = np.array([0.035, 0.065]) self._intervention_space_a['max_level_width'] = np.array([0.035, 0.12]) return def _set_intervention_space_b(self): '\n\n :return:\n ' super(StackedBlocksGeneratorTask, self)._set_intervention_space_b() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_b[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_b[rigid_object]['size'] self._intervention_space_b['stack_levels'] = np.array([6, 8]) self._intervention_space_b['blocks_mass'] = np.array([0.06, 0.08]) self._intervention_space_b['blocks_min_size'] = np.array([0.065, 0.075]) self._intervention_space_b['max_level_width'] = np.array([0.12, 0.15]) return def _create_new_challenge(self, num_of_levels, blocks_min_size, blocks_mass, max_level_width): '\n\n :param num_of_levels:\n :param blocks_min_size:\n :param blocks_mass:\n :param max_level_width:\n\n :return:\n ' silhouettes_creation_dicts = [] self._stage.remove_everything() self._task_stage_observation_keys = [] (block_sizes, positions, chosen_y) = self._generate_random_target(num_of_levels=num_of_levels, min_size=blocks_min_size, max_level_width=max_level_width) for level_num in range(len(block_sizes)): for i in range(len(block_sizes[level_num])): creation_dict = {'name': (((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)), 'shape': 'cube', 'mass': blocks_mass, 'color': np.random.uniform(0, 1, size=[3]), 'size': block_sizes[level_num][i]} self._stage.add_rigid_general_object(**creation_dict) block_position = self._stage.random_position(height_limits=(block_sizes[level_num][i][(- 1)] / 2.0)) block_orientation = euler_to_quaternion([0, 0, np.random.uniform((- np.pi), np.pi)]) self._stage.set_objects_pose(names=[(((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i))], positions=[block_position], orientations=[block_orientation]) trial_index = 0 self._robot.step_simulation() while ((not self._stage.check_feasiblity_of_stage()) and (trial_index < 10)): block_position = self._stage.random_position(height_limits=[(block_sizes[level_num][i][(- 1)] / 2.0), 0.15]) block_orientation = euler_to_quaternion([0, 0, np.random.uniform((- np.pi), np.pi)]) self._stage.set_objects_pose(names=[(((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i))], positions=[block_position], orientations=[block_orientation]) self._robot.step_simulation() trial_index += 1 silhouette_position = [positions[level_num][i], chosen_y, (((level_num + 1) * blocks_min_size) + (((- blocks_min_size) / 2) + 0))] self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_orientation')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_linear_velocity')) self._task_stage_observation_keys.append(((((('tool_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_angular_velocity')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_type')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_size')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)) + '_orientation')) creation_dict = {'name': (((('goal_' + 'level_') + str(level_num)) + '_num_') + str(i)), 'shape': 'cube', 'position': np.array(silhouette_position), 'size': np.array(block_sizes[level_num][i])} silhouettes_creation_dicts.append(copy.deepcopy(creation_dict)) self.current_stack_levels = num_of_levels self.current_blocks_mass = blocks_mass self.current_blocks_min_size = blocks_min_size self.current_max_level_width = max_level_width for i in range(len(silhouettes_creation_dicts)): self._stage.add_silhoutte_general_object(**silhouettes_creation_dicts[i]) return def _generate_random_block(self, allowed_boundaries, start_z, min_size=0.035, max_level_width=0.12): '\n This function will return a random position and size of a block\n while respecting the allowed boundaries passed\n\n :param allowed_boundaries:\n :param start_z:\n :param min_size:\n\n :return:\n ' allowed_boundaries[0][0] = max((self._stage.get_arena_bb()[0][0] + min_size), allowed_boundaries[0][0]) allowed_boundaries[1][0] = min((self._stage.get_arena_bb()[1][0] - min_size), allowed_boundaries[1][0]) allowed_boundaries[0][1] = max((self._stage.get_arena_bb()[0][1] + min_size), allowed_boundaries[0][1]) allowed_boundaries[1][1] = min((self._stage.get_arena_bb()[1][1] - min_size), allowed_boundaries[1][1]) position_x_y = np.random.uniform(allowed_boundaries[0][:2], allowed_boundaries[1][:2]) allowed_max_width = (min((self._stage.get_arena_bb()[1][0] - position_x_y[0]), (position_x_y[0] - self._stage.get_arena_bb()[0][0])) * 2) allowed_max_width = min(allowed_max_width, max_level_width) size = np.random.uniform(min_size, [allowed_max_width, min_size, min_size]) position_z = (start_z + (size[(- 1)] / 2)) position = np.array([position_x_y[0], position_x_y[1], position_z]) return (size, position) def _generate_random_target(self, num_of_levels=4, min_size=0.035, max_level_width=0.12): '\n This function generated a sampled target, should be modified to new\n sample goal\n\n :param levels_num:\n :param min_size:\n\n :return:\n ' level_blocks = [] current_boundaries = np.array([self._stage.get_arena_bb()[0][:2], self._stage.get_arena_bb()[1][:2]]) start_z = 0 (size, position) = self._generate_random_block(allowed_boundaries=current_boundaries, start_z=start_z, min_size=min_size, max_level_width=max_level_width) level_blocks.append([[size[0], position[0]]]) for level_index in range(1, num_of_levels): start_z = (start_z + size[(- 1)]) new_allowed_boundaries = [(position[:2] - (size[:2] / 2)), (position[:2] + (size[:2] / 2))] current_boundaries = [np.maximum(current_boundaries[0], new_allowed_boundaries[0]), np.minimum(current_boundaries[1], new_allowed_boundaries[1])] (size, position) = self._generate_random_block(allowed_boundaries=current_boundaries, start_z=start_z, min_size=min_size, max_level_width=max_level_width) level_blocks.append([[size[0], position[0]]]) chosen_y = position[1] new_level_blocks = self._generate_blocks_to_use(level_blocks, min_size=min_size) new_level_blocks = self._generate_blocks_to_use(new_level_blocks, min_size=min_size) new_level_blocks = self._generate_blocks_to_use(new_level_blocks, min_size=min_size) (block_sizes, positions) = self._get_block_sizes(new_level_blocks, min_size) return (block_sizes, positions, chosen_y) def _generate_blocks_to_use(self, level_blocks, min_size): '\n\n :param level_blocks:\n :param min_size:\n\n :return:\n ' new_level_blocks = list(level_blocks) for i in range(len(level_blocks)): current_level_blocks = level_blocks[i] for j in range(len(current_level_blocks)): if (current_level_blocks[j][0] > (min_size * 2)): block_1_center = (current_level_blocks[j][1] + (current_level_blocks[j][0] / 4)) block_1_size = (current_level_blocks[j][0] / 2) block_2_size = (current_level_blocks[j][0] - block_1_size) block_2_center = ((block_1_center - (block_1_size / 2)) - (block_2_size / 2)) stability_levels_check = copy.deepcopy(new_level_blocks[:(i + 1)]) stability_levels_check[i][j] = [block_1_size, block_1_center] stability_levels_check[i].append([block_2_size, block_2_center]) if self._is_stable_structure(stability_levels_check): new_level_blocks[:(i + 1)] = stability_levels_check return new_level_blocks def _is_stable_structure(self, level_blocks): '\n\n :param level_blocks:\n\n :return:\n ' current_min = (- 0.5) current_max = 0.5 for i in range(len(level_blocks)): current_level_blocks = level_blocks[i] new_min = 0.5 new_max = (- 0.5) for block in current_level_blocks: new_min = min(new_min, (block[1] - (block[0] / 2))) new_max = max(new_max, (block[1] + (block[0] / 2))) if ((block[1] > current_max) or (block[1] < current_min)): return False current_min = new_min current_max = new_max return True def _get_block_sizes(self, level_blocks, min_size): '\n\n :param level_blocks:\n :param min_size:\n\n :return:\n ' block_sizes = [] positions = [] for i in range(len(level_blocks)): block_sizes.append([]) positions.append([]) current_level_blocks = level_blocks[i] for block in current_level_blocks: block_sizes[(- 1)].append([block[0], min_size, min_size]) positions[(- 1)].append(block[1]) return (block_sizes, positions)
class Stacking2TaskGenerator(BaseTask): def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([750, 250, 250, 125, 0.005]), activate_sparse_reward=False, tool_block_mass=0.02, tool_block_size=0.065, joint_positions=None, tool_block_1_position=np.array([0, 0, 0.0325]), tool_block_1_orientation=np.array([0, 0, 0, 1]), tool_block_2_position=np.array([0.01, 0.08, 0.0325]), tool_block_2_orientation=np.array([0, 0, 0, 1]), goal_position=np.array([(- 0.06), (- 0.06), 0.0325]), goal_orientation=np.array([0, 0, 0, 1])): "\n This task generates a task for stacking 2 blocks above each other.\n Note: it belongs to the same shape family of towers, we only provide a\n specific task generator for it to be able to do reward engineering\n and to reproduce the baselines for it in an easy way.\n\n :param variables_space: (str) space to be used either 'space_a' or\n 'space_b' or 'space_a_b'\n :param fractional_reward_weight: (float) weight multiplied by the\n fractional volumetric\n overlap in the reward.\n :param dense_reward_weights: (list float) specifies the reward weights\n for all the other reward\n terms calculated in the\n calculate_dense_rewards\n function.\n :param activate_sparse_reward: (bool) specified if you want to\n sparsify the reward by having\n +1 or 0 if the volumetric\n fraction overlap more than 90%.\n :param tool_block_mass: (float) specifies the blocks mass.\n :param joint_positions: (nd.array) specifies the joints position to start\n the episode with. None if the default\n to be used.\n :param tool_block_1_position: (nd.array) specifies the cartesian position\n of the first tool block, x, y, z.\n :param tool_block_1_orientation: (nd.array) specifies the euler orientation\n of the first tool block, yaw, roll, pitch.\n :param tool_block_2_position: (nd.array) specifies the cartesian position\n of the second tool block, x, y, z.\n :param tool_block_2_orientation: (nd.array) specifies the euler orientation\n of the second tool block, yaw, roll, pitch.\n :param goal_position: (nd.array) specifies the cartesian position\n of the goal stack, x, y, z.\n :param goal_orientation: (nd.array) specifies the euler orientation\n of the goal stack, yaw, roll, pitch.\n " super().__init__(task_name='stacking2', variables_space=variables_space, fractional_reward_weight=fractional_reward_weight, dense_reward_weights=dense_reward_weights, activate_sparse_reward=activate_sparse_reward) self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions'] self._task_params['tool_block_mass'] = tool_block_mass self._task_params['joint_positions'] = joint_positions self._task_params['tool_block_1_position'] = tool_block_1_position self._task_params['tool_block_1_orientation'] = tool_block_1_orientation self._task_params['tool_block_2_position'] = tool_block_2_position self._task_params['tool_block_2_orientation'] = tool_block_2_orientation self._task_params['goal_position'] = goal_position self._task_params['goal_orientation'] = goal_orientation self._task_params['tool_block_size'] = tool_block_size self.previous_tool_block_1_position = None self.previous_tool_block_2_position = None self.previous_end_effector_positions = None self.previous_joint_velocities = None def get_description(self): '\n\n :return: (str) returns the description of the task itself.\n ' return 'Task where the goal shape is a tower of two blocks' def _set_up_stage_arena(self): '\n\n :return:\n ' creation_dict = {'name': 'tool_block_1', 'shape': 'cube', 'initial_position': self._task_params['tool_block_1_position'], 'initial_orientation': self._task_params['tool_block_1_orientation'], 'mass': self._task_params['tool_block_mass']} self._stage.add_rigid_general_object(**creation_dict) creation_dict = {'name': 'tool_block_2', 'shape': 'cube', 'initial_position': self._task_params['tool_block_2_position'], 'initial_orientation': self._task_params['tool_block_2_orientation'], 'mass': self._task_params['tool_block_mass']} self._stage.add_rigid_general_object(**creation_dict) creation_dict = {'name': 'goal_block_1', 'shape': 'cube', 'position': self._task_params['goal_position'], 'orientation': self._task_params['goal_orientation']} self._stage.add_silhoutte_general_object(**creation_dict) goal_block_2_position = copy.deepcopy(np.array(self._task_params['goal_position'])) goal_block_2_position[2] += self._task_params['tool_block_size'] creation_dict = {'name': 'goal_block_2', 'shape': 'cube', 'position': goal_block_2_position, 'orientation': self._task_params['goal_orientation']} self._stage.add_silhoutte_general_object(**creation_dict) self._task_stage_observation_keys = ['tool_block_1_type', 'tool_block_1_size', 'tool_block_1_cartesian_position', 'tool_block_1_orientation', 'tool_block_1_linear_velocity', 'tool_block_1_angular_velocity', 'tool_block_2_type', 'tool_block_2_size', 'tool_block_2_cartesian_position', 'tool_block_2_orientation', 'tool_block_2_linear_velocity', 'tool_block_2_angular_velocity', 'goal_block_1_type', 'goal_block_1_size', 'goal_block_1_cartesian_position', 'goal_block_1_orientation', 'goal_block_2_type', 'goal_block_2_size', 'goal_block_2_cartesian_position', 'goal_block_2_orientation'] return def _calculate_dense_rewards(self, desired_goal, achieved_goal): '\n\n :param desired_goal:\n :param achieved_goal:\n\n :return:\n ' rewards = ([0.0] * 5) block_position_1 = self._stage.get_object_state('tool_block_1', 'cartesian_position') block_position_2 = self._stage.get_object_state('tool_block_2', 'cartesian_position') goal_block_1_position = self._stage.get_object_state('goal_block_1', 'cartesian_position') goal_block_2_position = self._stage.get_object_state('goal_block_2', 'cartesian_position') joint_velocities = self._robot.get_latest_full_state()['velocities'] end_effector_positions = self._robot.get_latest_full_state()['end_effector_positions'] end_effector_positions = end_effector_positions.reshape((- 1), 3) lower_block_positioned = False if (np.linalg.norm((block_position_1 - goal_block_1_position)) < 0.02): lower_block_positioned = True if (not lower_block_positioned): current_distance_from_block = np.linalg.norm((end_effector_positions - block_position_1)) previous_distance_from_block = np.linalg.norm((self.previous_end_effector_positions - self.previous_tool_block_1_position)) rewards[0] = (previous_distance_from_block - current_distance_from_block) previous_dist_to_goal = np.linalg.norm((goal_block_1_position - self.previous_tool_block_1_position)) current_dist_to_goal = np.linalg.norm((goal_block_1_position - block_position_1)) rewards[1] = (previous_dist_to_goal - current_dist_to_goal) else: current_distance_from_block = np.linalg.norm((end_effector_positions - block_position_2)) previous_distance_from_block = np.linalg.norm((self.previous_end_effector_positions - self.previous_tool_block_2_position)) rewards[0] = (previous_distance_from_block - current_distance_from_block) block_2_above_block_1 = False if (np.linalg.norm((block_position_1[:2] - block_position_2[:2])) < 0.005): block_2_above_block_1 = True previous_block_to_goal_height = abs((self.previous_tool_block_2_position[2] - goal_block_2_position[2])) current_block_to_goal_height = abs((block_position_2[2] - goal_block_2_position[2])) if (not block_2_above_block_1): rewards[2] = (previous_block_to_goal_height - current_block_to_goal_height) else: rewards[2] = 0.0 if (block_position_2[2] > goal_block_2_position[2]): previous_block_1_to_block_2 = np.linalg.norm((self.previous_tool_block_1_position[:2] - self.previous_tool_block_2_position[:2])) current_block_1_to_block_2 = np.linalg.norm((block_position_1[:2] - block_position_2[:2])) rewards[3] = (previous_block_1_to_block_2 - current_block_1_to_block_2) else: rewards[3] = 0.0 rewards[4] = (- np.linalg.norm((joint_velocities - self.previous_joint_velocities))) update_task_info = {'current_end_effector_positions': end_effector_positions, 'current_tool_block_1_position': block_position_1, 'current_tool_block_2_position': block_position_2, 'current_velocity': joint_velocities} return (rewards, update_task_info) def _update_task_state(self, update_task_info): '\n\n :param update_task_info:\n\n :return:\n ' self.previous_end_effector_positions = update_task_info['current_end_effector_positions'] self.previous_tool_block_1_position = update_task_info['current_tool_block_1_position'] self.previous_tool_block_2_position = update_task_info['current_tool_block_2_position'] self.previous_joint_velocities = update_task_info['current_velocity'] return def _set_task_state(self): '\n\n :return:\n ' self.previous_end_effector_positions = self._robot.get_latest_full_state()['end_effector_positions'] self.previous_end_effector_positions = self.previous_end_effector_positions.reshape((- 1), 3) self.previous_tool_block_1_position = self._stage.get_object_state('tool_block_1', 'cartesian_position') self.previous_tool_block_2_position = self._stage.get_object_state('tool_block_2', 'cartesian_position') self.previous_joint_velocities = self._robot.get_latest_full_state()['velocities'] return def _set_intervention_space_a(self): '\n\n :return:\n ' super(Stacking2TaskGenerator, self)._set_intervention_space_a() self._intervention_space_a['goal_tower'] = dict() self._intervention_space_a['goal_tower']['cylindrical_position'] = copy.deepcopy(self._intervention_space_a['goal_block_1']['cylindrical_position']) self._intervention_space_a['goal_tower']['cylindrical_position'][0][(- 1)] = (self._task_params['goal_position'][(- 1)] * 2.0) self._intervention_space_a['goal_tower']['cylindrical_position'][1][(- 1)] = (self._task_params['goal_position'][(- 1)] * 2.0) self._intervention_space_a['goal_tower']['euler_orientation'] = copy.deepcopy(self._intervention_space_a['goal_block_1']['euler_orientation']) for visual_object in self._stage.get_visual_objects(): del self._intervention_space_a[visual_object]['size'] del self._intervention_space_a[visual_object]['euler_orientation'] del self._intervention_space_a[visual_object]['cylindrical_position'] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_a[rigid_object]['size'] return def _set_intervention_space_b(self): '\n\n :return:\n ' super(Stacking2TaskGenerator, self)._set_intervention_space_b() self._intervention_space_b['goal_tower'] = dict() self._intervention_space_b['goal_tower']['cylindrical_position'] = copy.deepcopy(self._intervention_space_b['goal_block_1']['cylindrical_position']) self._intervention_space_b['goal_tower']['cylindrical_position'][0][(- 1)] = (self._task_params['goal_position'][(- 1)] * 2.0) self._intervention_space_b['goal_tower']['cylindrical_position'][1][(- 1)] = (self._task_params['goal_position'][(- 1)] * 2.0) self._intervention_space_b['goal_tower']['euler_orientation'] = copy.deepcopy(self._intervention_space_b['goal_block_1']['euler_orientation']) for visual_object in self._stage.get_visual_objects(): del self._intervention_space_b[visual_object]['size'] del self._intervention_space_b[visual_object]['euler_orientation'] del self._intervention_space_b[visual_object]['cylindrical_position'] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_b[rigid_object]['size'] return def get_task_generator_variables_values(self): '\n\n :return: (dict) specifying the variables belonging to the task itself.\n ' return {'goal_tower': {'cylindrical_position': self._stage.get_object_state('goal_block_1', 'cylindrical_position'), 'euler_orientation': self._stage.get_object_state('goal_block_1', 'euler_orientation')}} def apply_task_generator_interventions(self, interventions_dict): '\n\n :param interventions_dict: (dict) variables and their corresponding\n intervention value.\n\n :return: (tuple) first position if the intervention was successful or\n not, and second position indicates if\n observation_space needs to be reset.\n ' if (len(interventions_dict) == 0): return (True, False) reset_observation_space = False if ('goal_tower' in interventions_dict): new_interventions_dict = dict() new_interventions_dict['goal_block_1'] = dict() new_interventions_dict['goal_block_2'] = dict() if ('cylindrical_position' in interventions_dict['goal_tower']): new_interventions_dict['goal_block_1']['cylindrical_position'] = copy.deepcopy(interventions_dict['goal_tower']['cylindrical_position']) new_interventions_dict['goal_block_2']['cylindrical_position'] = copy.deepcopy(interventions_dict['goal_tower']['cylindrical_position']) new_interventions_dict['goal_block_1']['cylindrical_position'][(- 1)] = (interventions_dict['goal_tower']['cylindrical_position'][(- 1)] / 2.0) new_interventions_dict['goal_block_2']['cylindrical_position'][(- 1)] = (interventions_dict['goal_tower']['cylindrical_position'][(- 1)] * (3 / 2.0)) elif ('euler_orientation' in interventions_dict['goal_tower']): new_interventions_dict['goal_block_1']['euler_orientation'] = copy.deepcopy(interventions_dict['goal_tower']['euler_orientation']) new_interventions_dict['goal_block_2']['euler_orientation'] = copy.deepcopy(interventions_dict['goal_tower']['euler_orientation']) else: raise Exception('this task generator variable is not yet defined') self._stage.apply_interventions(new_interventions_dict) else: raise Exception('this task generator variable is not yet defined') return (True, reset_observation_space) def sample_new_goal(self, level=None): '\n Used to sample new goal from the corresponding shape families.\n\n :param level: (int) specifying the level - not used for now.\n\n :return: (dict) the corresponding interventions dict that could then\n be applied to get a new sampled goal.\n ' intervention_space = self.get_variable_space_used() intervention_dict = dict() intervention_dict['goal_tower'] = dict() intervention_dict['goal_tower']['cylindrical_position'] = np.random.uniform(intervention_space['goal_tower']['cylindrical_position'][0], intervention_space['goal_tower']['cylindrical_position'][1]) intervention_dict['goal_tower']['euler_orientation'] = np.random.uniform(intervention_space['goal_tower']['euler_orientation'][0], intervention_space['goal_tower']['euler_orientation'][1]) return intervention_dict
def generate_task(task_generator_id='reaching', **kwargs): '\n\n :param task_generator_id: picking, pushing, reaching, pick_and_place,\n stacking2, stacked_blocks, towers, general or\n creative_stacked_blocks.\n :param kwargs: args that are specific to the task generator\n\n :return: the task to be used in the CausalWorld\n ' if (task_generator_id == 'picking'): task = PickingTaskGenerator(**kwargs) elif (task_generator_id == 'pushing'): task = PushingTaskGenerator(**kwargs) elif (task_generator_id == 'reaching'): task = ReachingTaskGenerator(**kwargs) elif (task_generator_id == 'pick_and_place'): task = PickAndPlaceTaskGenerator(**kwargs) elif (task_generator_id == 'stacking2'): task = Stacking2TaskGenerator(**kwargs) elif (task_generator_id == 'stacked_blocks'): task = StackedBlocksGeneratorTask(**kwargs) elif (task_generator_id == 'towers'): task = TowersGeneratorTask(**kwargs) elif (task_generator_id == 'general'): task = GeneralGeneratorTask(**kwargs) elif (task_generator_id == 'creative_stacked_blocks'): task = CreativeStackedBlocksGeneratorTask(**kwargs) else: raise Exception('No valid task_generator_id') return task
class TowersGeneratorTask(BaseTask): def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([]), activate_sparse_reward=False, tool_block_mass=0.08, number_of_blocks_in_tower=np.array([1, 1, 5]), tower_dims=np.array([0.035, 0.035, 0.175]), tower_center=np.array([0, 0])): "\n This task generator will generate a task for stacking blocks into\n towers.\n :param variables_space: (str) space to be used either 'space_a' or\n 'space_b' or 'space_a_b'\n :param fractional_reward_weight: (float) weight multiplied by the\n fractional volumetric\n overlap in the reward.\n :param dense_reward_weights: (list float) specifies the reward weights\n for all the other reward\n terms calculated in the\n calculate_dense_rewards\n function.\n :param activate_sparse_reward: (bool) specified if you want to\n sparsify the reward by having\n +1 or 0 if the volumetric\n fraction overlap more than 90%.\n :param tool_block_mass: (float) specifies the blocks mass.\n :param number_of_blocks_in_tower: (nd.array) specifies the number of blocks\n in the tower in each\n direction x,y,z.\n :param tower_dims: (nd.array) (nd.array) specifies the dimension of\n the tower in each\n direction x,y,z.\n :param tower_center: (nd.array) specifies the cartesian position\n of the center of the tower,\n x, y, z.\n " super().__init__(task_name='towers', variables_space=variables_space, fractional_reward_weight=fractional_reward_weight, dense_reward_weights=dense_reward_weights, activate_sparse_reward=activate_sparse_reward) self._task_robot_observation_keys = ['time_left_for_task', 'joint_positions', 'joint_velocities', 'end_effector_positions'] self._task_params['tower_dims'] = tower_dims self._task_params['tower_center'] = tower_center self._task_params['tool_block_mass'] = tool_block_mass self._task_params['number_of_blocks_in_tower'] = number_of_blocks_in_tower self.current_tower_dims = np.array(self._task_params['tower_dims']) self.current_number_of_blocks_in_tower = np.array(self._task_params['number_of_blocks_in_tower']) self.current_tool_block_mass = float(self._task_params['tool_block_mass']) self.current_tower_center = np.array(self._task_params['tower_center']) def get_description(self): '\n\n :return: (str) returns the description of the task itself.\n ' return 'Task where the goal is to stack arbitrary number of towers side by side' def _set_up_stage_arena(self): '\n\n :return:\n ' self._set_up_cuboid(self.current_tower_dims, self.current_number_of_blocks_in_tower, self.current_tower_center) return def _set_up_cuboid(self, tower_dims, number_of_blocks_in_tower, center_position): '\n\n :param tower_dims:\n :param number_of_blocks_in_tower:\n :param center_position:\n\n :return:\n ' self._stage.remove_everything() joint_positions = self._robot.get_joint_positions_raised() self._robot.set_full_state(np.append(joint_positions, np.zeros(9))) self._task_stage_observation_keys = [] block_size = (tower_dims / number_of_blocks_in_tower) curr_height = (0 - (block_size[(- 1)] / 2)) rigid_block_position = np.array([(- 0.12), (- 0.12), (0 + (block_size[(- 1)] / 2))]) silhouettes_creation_dicts = [] for level in range(number_of_blocks_in_tower[(- 1)]): curr_height += block_size[(- 1)] curr_y = ((center_position[1] - (tower_dims[1] / 2)) - (block_size[1] / 2)) for col in range(number_of_blocks_in_tower[1]): curr_y += block_size[1] curr_x = ((center_position[0] - (tower_dims[0] / 2)) - (block_size[0] / 2)) for row in range(number_of_blocks_in_tower[0]): curr_x += block_size[0] creation_dict = {'name': (((((('tool_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)), 'shape': 'cube', 'initial_position': np.copy(rigid_block_position), 'initial_orientation': [0, 0, 0, 1], 'mass': self.current_tool_block_mass, 'size': block_size} self._stage.add_rigid_general_object(**creation_dict) creation_dict = {'name': (((((('goal_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)), 'shape': 'cube', 'position': [curr_x, curr_y, curr_height], 'orientation': [0, 0, 0, 1], 'size': block_size} silhouettes_creation_dicts.append(copy.deepcopy(creation_dict)) self._task_stage_observation_keys.append(((((((('tool_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_type')) self._task_stage_observation_keys.append(((((((('tool_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_size')) self._task_stage_observation_keys.append(((((((('tool_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((((('tool_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_orientation')) self._task_stage_observation_keys.append(((((((('tool_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_linear_velocity')) self._task_stage_observation_keys.append(((((((('tool_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_angular_velocity')) self._task_stage_observation_keys.append(((((((('goal_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_type')) self._task_stage_observation_keys.append(((((((('goal_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_size')) self._task_stage_observation_keys.append(((((((('goal_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_cartesian_position')) self._task_stage_observation_keys.append(((((((('goal_' + 'level_') + str(level)) + '_col_') + str(col)) + '_row_') + str(row)) + '_orientation')) rigid_block_position[:2] += block_size[:2] rigid_block_position[:2] += 0.005 if np.any((rigid_block_position[:2] > np.array([0.12, 0.12]))): rigid_block_position[0] = (- 0.12) rigid_block_position[1] = (- 0.12) rigid_block_position[2] = (rigid_block_position[2] + (block_size[(- 1)] / 2)) for i in range(len(silhouettes_creation_dicts)): self._stage.add_silhoutte_general_object(**silhouettes_creation_dicts[i]) return def _set_intervention_space_a(self): '\n\n :return:\n ' super(TowersGeneratorTask, self)._set_intervention_space_a() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_a[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_a[rigid_object]['size'] self._intervention_space_a['number_of_blocks_in_tower'] = np.array([[1, 1, 1], [4, 4, 4]]) self._intervention_space_a['blocks_mass'] = np.array([0.02, 0.06]) self._intervention_space_a['tower_dims'] = np.array([[0.08, 0.08, 0.08], [0.12, 0.12, 0.12]]) self._intervention_space_a['tower_center'] = np.array([[(- 0.1), (- 0.1)], [0.05, 0.05]]) return def _set_intervention_space_b(self): '\n\n :return:\n ' super(TowersGeneratorTask, self)._set_intervention_space_b() for visual_object in self._stage.get_visual_objects(): del self._intervention_space_b[visual_object] for rigid_object in self._stage.get_rigid_objects(): del self._intervention_space_b[rigid_object]['size'] self._intervention_space_b['number_of_blocks_in_tower'] = np.array([[4, 4, 4], [6, 6, 6]]) self._intervention_space_b['blocks_mass'] = np.array([0.06, 0.08]) self._intervention_space_b['tower_dims'] = np.array([[0.12, 0.12, 0.12], [0.2, 0.2, 0.2]]) self._intervention_space_b['tower_center'] = np.array([[0.05, 0.05], [0.1, 0.1]]) return def sample_new_goal(self, level=None): '\n Used to sample new goal from the corresponding shape families.\n\n :param level: (int) specifying the level - not used for now.\n\n :return: (dict) the corresponding interventions dict that could then\n be applied to get a new sampled goal.\n ' intervention_dict = dict() if (self._task_params['variables_space'] == 'space_a'): intervention_space = self._intervention_space_a elif (self._task_params['variables_space'] == 'space_b'): intervention_space = self._intervention_space_b elif (self._task_params['variables_space'] == 'space_a_b'): intervention_space = self._intervention_space_a_b intervention_dict['number_of_blocks_in_tower'] = [np.random.randint(intervention_space['number_of_blocks_in_tower'][0][i], intervention_space['number_of_blocks_in_tower'][1][i]) for i in range(3)] intervention_dict['blocks_mass'] = np.random.uniform(intervention_space['blocks_mass'][0], intervention_space['blocks_mass'][1]) intervention_dict['tower_dims'] = np.random.uniform(intervention_space['tower_dims'][0], intervention_space['tower_dims'][1]) intervention_dict['tower_center'] = np.random.uniform(intervention_space['tower_center'][0], intervention_space['tower_center'][1]) return intervention_dict def get_task_generator_variables_values(self): '\n\n :return: (dict) specifying the variables belonging to the task itself.\n ' return {'tower_dims': self.current_tower_dims, 'blocks_mass': self.current_tool_block_mass, 'number_of_blocks_in_tower': self.current_number_of_blocks_in_tower, 'tower_center': self.current_tower_center} def apply_task_generator_interventions(self, interventions_dict): '\n\n :param interventions_dict: (dict) variables and their corresponding\n intervention value.\n\n :return: (tuple) first position if the intervention was successful or\n not, and second position indicates if\n observation_space needs to be reset.\n ' if (len(interventions_dict) == 0): return (True, False) reset_observation_space = True if ('tower_dims' in interventions_dict): self.current_tower_dims = interventions_dict['tower_dims'] if ('number_of_blocks_in_tower' in interventions_dict): self.current_number_of_blocks_in_tower = interventions_dict['number_of_blocks_in_tower'] if ('blocks_mass' in interventions_dict): self.current_tool_block_mass = interventions_dict['blocks_mass'] if ('tower_center' in interventions_dict): self.current_tower_center = interventions_dict['tower_center'] if (('tower_dims' in interventions_dict) or ('number_of_blocks_in_tower' in interventions_dict) or ('tower_center' in interventions_dict) or ('tower_orientation' in interventions_dict)): self._set_up_cuboid(tower_dims=self.current_tower_dims, number_of_blocks_in_tower=self.current_number_of_blocks_in_tower, center_position=self.current_tower_center) elif ('blocks_mass' in interventions_dict): new_interventions_dict = dict() for rigid_object in self._stage.get_rigid_objects(): if self._stage.get_rigid_objects()[rigid_object].is_not_fixed(): new_interventions_dict[rigid_object] = dict() new_interventions_dict[rigid_object]['mass'] = self.current_tool_block_mass self._stage.apply_interventions(new_interventions_dict) else: raise Exception('this task generator variable is not yet defined') self._set_intervention_space_b() self._set_intervention_space_a() self._set_intervention_space_a_b() self._stage.finalize_stage() return (True, reset_observation_space)
def save_config_file(section_names, config_dicts, file_path): '\n\n :param section_names:\n :param config_dicts:\n :param file_path:\n :return:\n ' config = ConfigParser() for i in range(len(section_names)): section_name = section_names[i] config.add_section(section_name) for (key, value) in config_dicts[i]: config.set(section_name, key, value) with open(file_path, 'w') as f: config.write(f) return
def read_config_file(file_path): '\n\n :param file_path:\n :return:\n ' section_names = [] config_dicts = [] config = ConfigParser() config.read(file_path) for section in config.sections(): section_names.append(section) config_dicts.append(dict()) for option in config.options(section): config_dicts[(- 1)][option] = float(config.get(section, option)) return (section_names, config_dicts)
def load_world(tracker_relative_path, enable_visualization=False): '\n Loads a world again at the same state as when it was saved.\n\n :param tracker_relative_path: (str) path specifying where the tracker\n saved.\n :param enable_visualization: (bool) True if enabling visualization is\n needed.\n :return: (causal_world.CausalWorld) loaded CausalWorld env instance.\n ' tracker = Tracker(file_path=os.path.join(tracker_relative_path, 'tracker')) task_stats = tracker.task_stats_log[0] wrapper_dict = copy.deepcopy(tracker.world_params['wrappers']) del tracker.world_params['wrappers'] if ('task_name' in task_stats.task_params): del task_stats.task_params['task_name'] task = generate_task(task_generator_id=task_stats.task_name, **task_stats.task_params) env = CausalWorld(task, **tracker.world_params, enable_visualization=enable_visualization) for wrapper in wrapper_dict: if (wrapper == 'object_selector'): env = ObjectSelectorWrapper(env, **wrapper_dict[wrapper]) elif (wrapper == 'delta_action'): env = DeltaActionEnvWrapper(env, **wrapper_dict[wrapper]) elif (wrapper == 'moving_average_action'): env = MovingAverageActionEnvWrapper(env, **wrapper_dict[wrapper]) elif (wrapper == 'her_environment'): env = HERGoalEnvWrapper(env, **wrapper_dict[wrapper]) elif (wrapper == 'curriculum_environment'): intervention_actors = initialize_intervention_actors(wrapper_dict[wrapper]['actor_params']) env = CurriculumWrapper(env, intervention_actors=intervention_actors, actives=wrapper_dict[wrapper]['actives']) else: raise Exception('wrapper is not known to be loaded') return env
def scale(x, space): '\n\n :param x:\n :param space:\n :return:\n ' return (((2.0 * (x - space.low)) / (space.high - space.low)) - 1.0)
def unscale(y, space): '\n\n :param y:\n :param space:\n :return:\n ' return (space.low + (((y + 1.0) / 2.0) * (space.high - space.low)))
def combine_spaces(space_1, space_2): '\n\n :param space_1:\n :param space_2:\n :return:\n ' lower_bound = np.concatenate((space_1.low, space_2.low)) upper_bound = np.concatenate((space_1.high, space_2.high)) return spaces.Box(low=lower_bound, high=upper_bound, dtype=np.float64)
def initialize_intervention_actors(actors_params): '\n\n :param actors_params:\n :return:\n ' intervention_actors_list = [] for actor_param in actors_params: if (actor_param == 'random_actor'): intervention_actors_list.append(RandomInterventionActorPolicy(**actors_params[actor_param])) elif (actor_param == 'goal_actor'): intervention_actors_list.append(GoalInterventionActorPolicy(**actors_params[actor_param])) elif (actor_param == 'joints_actor'): intervention_actors_list.append(JointsInterventionActorPolicy(**actors_params[actor_param])) elif (actor_param == 'physical_properties_actor'): intervention_actors_list.append(PhysicalPropertiesInterventionActorPolicy(**actors_params[actor_param])) elif (actor_param == 'rigid_pose_actor'): intervention_actors_list.append(RigidPoseInterventionActorPolicy(**actors_params[actor_param])) elif (actor_param == 'visual_actor'): intervention_actors_list.append(VisualInterventionActorPolicy(**actors_params[actor_param])) else: raise Exception("The intervention actor {} can't be loaded".format(actor_param)) return intervention_actors_list
class CrossEntropyMethod(object): def __init__(self, planning_horizon, max_iterations, population_size, num_elite, action_upper_bound, action_lower_bound, model, epsilon=0.001, alpha=0.25): '\n Cross entropy method optimizer to be used.\n\n :param planning_horizon: (int) horizon for planning.\n :param max_iterations: (int) number of iterations for CEM.\n :param population_size: (int) population size per iteration.\n :param num_elite: (int) number of elites per iteration.\n :param action_upper_bound: (nd.array) action upper bound to sample.\n :param action_lower_bound: (nd.array) action lower bound to sample.\n :param model: (causal_world.dynamics_model.SimulatorModel) model to\n be used.\n :param epsilon: (float) epsilon to stop iterating when reached.\n :param alpha: (alpha) alpha to be used when moving the unimodal\n gaussian.\n ' self.max_iterations = max_iterations self.population_size = population_size self.num_elite = num_elite self.action_upper_bound = action_upper_bound self.action_lower_bound = action_lower_bound self.model = model self.epsilon = epsilon self.alpha = alpha self.planning_horizon = planning_horizon self.actions_mean = ((self.action_upper_bound + self.action_lower_bound) / 2) self.actions_mean = np.tile(np.expand_dims(self.actions_mean, 0), [self.planning_horizon, 1]) self.actions_variance = (np.square((self.action_upper_bound - self.action_lower_bound)) / 16) self.actions_variance = np.tile(np.expand_dims(self.actions_variance, 0), [self.planning_horizon, 1]) def get_actions(self): '\n\n :return: (nd.array) getting the best actions after performing\n CEM optimization.\n ' best_reward = (- np.float('inf')) best_action = None iteration_index = 0 current_actions_mean = np.array(self.actions_mean) current_actions_var = np.array(self.actions_variance) while (iteration_index < self.max_iterations): print('cem iteration number: ', iteration_index) action_samples = np.random.normal(current_actions_mean, np.sqrt(current_actions_var), size=[self.population_size, *self.actions_mean.shape]) rewards = self.model.evaluate_trajectories(action_samples) elites_indicies = rewards.argsort(axis=0)[(- self.num_elite):][::(- 1)] best_current_reward = np.max(rewards) if (best_current_reward > best_reward): best_reward = best_current_reward best_action = action_samples[np.argmax(rewards)] print("iteration's best reward is ", best_current_reward) elites = action_samples[elites_indicies] new_mean = np.mean(elites, axis=0) new_variance = np.var(elites, axis=0) current_actions_mean = ((self.alpha * current_actions_mean) + ((1 - self.alpha) * new_mean)) current_actions_var = ((self.alpha * current_actions_var) + ((1 - self.alpha) * new_variance)) iteration_index += 1 return best_action
def get_intersection(bb1, bb2): '\n\n :param bb1:\n :param bb2:\n :return:\n ' x_left = max(bb1[0][0], bb2[0][0]) x_right = min(bb1[1][0], bb2[1][0]) y_top = max(bb1[0][1], bb2[0][1]) y_bottom = min(bb1[1][1], bb2[1][1]) z_up = max(bb1[0][2], bb2[0][2]) z_down = min(bb1[1][2], bb2[1][2]) if ((x_right < x_left) or (y_bottom < y_top) or (z_down < z_up)): return 0.0 intersection_area = (((x_right - x_left) * (y_bottom - y_top)) * (z_down - z_up)) return intersection_area
def get_iou(bb1, bb2, area1, area2): '\n\n :param bb1:\n :param bb2:\n :param area1:\n :param area2:\n :return:\n ' intersection_area = get_intersection(bb1, bb2) return (intersection_area / float(((area1 + area2) - intersection_area)))
def get_bounding_box_volume(bb): '\n\n :param bb:\n :return:\n ' width = (bb[1][0] - bb[0][0]) depth = (bb[1][1] - bb[0][1]) height = (bb[1][2] - bb[0][2]) return ((width * depth) * height)
def view_episode(episode, env_wrappers=np.array([]), env_wrappers_args=np.array([])): '\n Visualizes a logged episode in the GUI\n\n :param episode: (Episode) the logged episode\n :param env_wrappers: (list) a list of gym wrappers\n :param env_wrappers_args: (list) a list of kwargs for the gym wrappers\n :return:\n ' actual_skip_frame = episode.world_params['skip_frame'] env = get_world(episode.task_name, episode.task_params, episode.world_params, enable_visualization=True, env_wrappers=env_wrappers, env_wrappers_args=env_wrappers_args) env.reset() env.set_starting_state(episode.initial_full_state, check_bounds=False) for (time, observation, reward, action) in zip(episode.timestamps, episode.observations, episode.rewards, episode.robot_actions): for _ in range(actual_skip_frame): env.step(action) env.close()
def view_policy(task, world_params, policy_fn, max_time_steps, number_of_resets, env_wrappers=np.array([]), env_wrappers_args=np.array([])): '\n Visualizes a policy for a specified environment in the GUI\n\n :param task: (Task) the task of the environment\n :param world_params: (dict) the world_params of the environment\n :param policy_fn: the policy to be evaluated\n :param max_time_steps: (int) the maximum number of time steps per episode\n :param number_of_resets: (int) the number of resets/episodes to be viewed\n :param env_wrappers: (list) a list of gym wrappers\n :param env_wrappers_args: (list) a list of kwargs for the gym wrappers\n :return:\n ' actual_skip_frame = world_params['skip_frame'] env = get_world(task.get_task_name(), task.get_task_params(), world_params, enable_visualization=True, env_wrappers=env_wrappers, env_wrappers_args=env_wrappers_args) for reset_idx in range(number_of_resets): obs = env.reset() for time in range(int((max_time_steps / number_of_resets))): desired_action = policy_fn(obs) for _ in range(actual_skip_frame): (obs, reward, done, info) = env.step(action=desired_action) env.close()
def record_video_of_policy(task, world_params, policy_fn, file_name, number_of_resets, max_time_steps=100, env_wrappers=np.array([]), env_wrappers_args=np.array([])): '\n Records a video of a policy for a specified environment\n\n :param task: (Task) the task of the environment\n :param world_params: (dict) the world_params of the environment\n :param policy_fn: the policy to be evaluated\n :param file_name: (str) full path where the video is being stored.\n :param number_of_resets: (int) the number of resets/episodes to be viewed\n :param max_time_steps: (int) the maximum number of time steps per episode\n :param env_wrappers: (list) a list of gym wrappers\n :param env_wrappers_args: (list) a list of kwargs for the gym wrappers\n :return:\n ' actual_skip_frame = world_params['skip_frame'] env = get_world(task.get_task_name(), task.get_task_params(), world_params, enable_visualization=False, env_wrappers=env_wrappers, env_wrappers_args=env_wrappers_args) recorder = VideoRecorder(env, '{}.mp4'.format(file_name)) for reset_idx in range(number_of_resets): obs = env.reset() recorder.capture_frame() for i in range(max_time_steps): desired_action = policy_fn(obs) for _ in range(actual_skip_frame): (obs, reward, done, info) = env.step(action=desired_action) recorder.capture_frame() recorder.close() env.close()
def record_video_of_random_policy(task, world_params, file_name, number_of_resets, max_time_steps=100, env_wrappers=np.array([]), env_wrappers_args=np.array([])): '\n Records a video of a random policy for a specified environment\n\n :param task: (Task) the task of the environment\n :param world_params: (dict) the world_params of the environment\n :param file_name: (str) full path where the video is being stored.\n :param number_of_resets: (int) the number of resets/episodes to be viewed\n :param max_time_steps: (int) the maximum number of time steps per episode\n :param env_wrappers: (list) a list of gym wrappers\n :param env_wrappers_args: (list) a list of kwargs for the gym wrappers\n :return:\n ' actual_skip_frame = world_params['skip_frame'] env = get_world(task.get_task_name(), task.get_task_params(), world_params, enable_visualization=False, env_wrappers=env_wrappers, env_wrappers_args=env_wrappers_args) recorder = VideoRecorder(env, '{}.mp4'.format(file_name)) for reset_idx in range(number_of_resets): obs = env.reset() recorder.capture_frame() for i in range(max_time_steps): for _ in range(actual_skip_frame): (obs, reward, done, info) = env.step(action=env.action_space.sample()) recorder.capture_frame() recorder.close() env.close()
def record_video_of_episode(episode, file_name, env_wrappers=np.array([]), env_wrappers_args=np.array([])): '\n Records a video of a logged episode for a specified environment\n\n :param episode: (Episode) the logged episode\n :param file_name: (str) full path where the video is being stored.\n :param env_wrappers: (list) a list of gym wrappers\n :param env_wrappers_args: (list) a list of kwargs for the gym wrappers\n :return:\n ' actual_skip_frame = episode.world_params['skip_frame'] env = get_world(episode.task_name, episode.task_params, episode.world_params, enable_visualization=False, env_wrappers=env_wrappers, env_wrappers_args=env_wrappers_args) env.set_starting_state(episode.initial_full_state, check_bounds=False) recorder = VideoRecorder(env, '{}.mp4'.format(file_name)) recorder.capture_frame() for (time, observation, reward, action) in zip(episode.timestamps, episode.observations, episode.rewards, episode.robot_actions): for _ in range(actual_skip_frame): env.step(action) recorder.capture_frame() recorder.close() env.close()
def get_world(task_generator_id, task_params, world_params, enable_visualization=False, env_wrappers=np.array([]), env_wrappers_args=np.array([])): '\n Returns a particular CausalWorld instance with optional wrappers\n\n :param task_generator_id: (str) id of the task of the environment\n :param task_params: (dict) task params of the environment\n :param world_params: (dict) world_params of the environment\n :param enable_visualization: (bool) if GUI visualization is enabled\n :param env_wrappers: (list) a list of gym wrappers\n :param env_wrappers_args: (list) a list of kwargs for the gym wrappers\n :return: (CausalWorld) a CausalWorld environment instance\n ' world_params['skip_frame'] = 1 if (task_params is None): task = generate_task(task_generator_id) else: if ('task_name' in task_params): del task_params['task_name'] task = generate_task(task_generator_id, **task_params) if ('enable_visualization' in world_params.keys()): world_params_temp = dict(world_params) del world_params_temp['enable_visualization'] env = CausalWorld(task, **world_params_temp, enable_visualization=enable_visualization) else: env = CausalWorld(task, **world_params, enable_visualization=enable_visualization) for i in range(len(env_wrappers)): env = env_wrappers[i](env, **env_wrappers_args[i]) return env
def record_video(env, policy, file_name, number_of_resets=1, max_time_steps=None): '\n Records a video of a policy for a specified environment\n :param env: (causal_world.CausalWorld) the environment to use for\n recording.\n :param policy: the policy to be evaluated\n :param file_name: (str) full path where the video is being stored.\n :param number_of_resets: (int) the number of resets/episodes to be viewed\n :param max_time_steps: (int) the maximum number of time steps per episode\n :return:\n ' recorder = VideoRecorder(env, '{}.mp4'.format(file_name)) for reset_idx in range(number_of_resets): policy.reset() obs = env.reset() recorder.capture_frame() if (max_time_steps is not None): for i in range(max_time_steps): desired_action = policy.act(obs) (obs, reward, done, info) = env.step(action=desired_action) recorder.capture_frame() else: while True: desired_action = policy.act(obs) (obs, reward, done, info) = env.step(action=desired_action) recorder.capture_frame() if done: break recorder.close() return
class DeltaActionEnvWrapper(gym.ActionWrapper): def __init__(self, env): '\n A delta action wrapper for the environment to turn the actions\n to a delta wrt the previous action executed.\n\n :param env: (causal_world.CausalWorld) the environment to convert.\n ' super(DeltaActionEnvWrapper, self).__init__(env) self.env.add_wrapper_info({'delta_action': dict()}) def action(self, action): '\n Processes the action to transform to a delta action.\n\n :param action: (nd.array) the raw action to be processed.\n :return: (nd.array) the delta action.\n ' if (self.env.get_action_mode() == 'joint_positions'): offset = self.env.get_robot().get_last_applied_joint_positions() elif (self.env.get_action_mode() == 'joint_torques'): offset = self.env.get_robot().get_latest_full_state()['torques'] elif (self.env.get_action_mode() == 'end_effector_positions'): offset = self.env.get_robot().get_latest_full_state()['end_effector_positions'] else: raise Exception('action mode is not known') if self.env.are_actions_normalized(): offset = self.env.get_robot().normalize_observation_for_key(observation=offset, key=self.env.get_action_mode()) return (action + offset) def reverse_action(self, action): '\n Reverses processing the action to transform to a raw action again.\n\n :param action: (nd.array) the delta action.\n :return: (nd.array) the raw action before processing.\n ' if (self.env.get_action_mode() == 'joint_positions'): offset = self.env.get_robot().get_last_applied_joint_positions() elif (self.env.get_action_mode() == 'joint_torques'): offset = self.env.get_robot().get_latest_full_state()['torques'] elif (self.env.get_action_mode() == 'end_effector_positions'): offset = self.env.get_robot().get_latest_full_state()['end_effector_positions'] else: raise Exception('action mode is not known') if self.env.are_actions_normalized(): offset = self.env.get_robot().normalize_observation_for_key(observation=offset, key=self.env.action_mode) return (action - offset)
class MovingAverageActionEnvWrapper(gym.ActionWrapper): def __init__(self, env, widow_size=8, initial_value=0): '\n\n :param env: (causal_world.CausalWorld) the environment to convert.\n :param widow_size: (int) the window size for avergaing and smoothing\n the actions.\n :param initial_value: (float) intial values to fill the window with.\n ' super(MovingAverageActionEnvWrapper, self).__init__(env) self._policy = DummyActorPolicy() self._policy = MovingAverageActionWrapperActorPolicy(self._policy, widow_size=widow_size, initial_value=initial_value) self.env.add_wrapper_info({'moving_average_action': {'widow_size': widow_size, 'initial_value': initial_value}}) return def action(self, action): '\n Processes the action to transform to a smoothed action.\n\n :param action: (nd.array) the raw action to be processed.\n :return: (nd.array) the smoothed action.\n ' self._policy.policy.add_action(action) return self._policy.act(obs=None) def reverse_action(self, action): '\n Reverses processing the action to transform to a raw action again.\n\n :param action: (nd.array) the smoothed action.\n :return: (nd.array) the raw action before processing.\n ' raise Exception('not implemented yet')
class CurriculumWrapper(gym.Wrapper): def __init__(self, env, intervention_actors, actives): '\n\n :param env: (causal_world.CausalWorld) the environment to convert.\n :param intervention_actors: (list) list of intervention actors\n :param actives: (list of tuples) each tuple indicates (episode_start,\n episode_end, episode_periodicity,\n time_step_for_intervention)\n ' super(CurriculumWrapper, self).__init__(env) self.interventions_curriculum = Curriculum(intervention_actors=intervention_actors, actives=actives) self.interventions_curriculum.initialize_actors(env=env) self.env.add_wrapper_info({'curriculum_environment': self.interventions_curriculum.get_params()}) self._elapsed_episodes = (- 1) self._elapsed_timesteps = 0 return def step(self, action): '\n Used to step through the enviroment.\n\n :param action: (nd.array) specifies which action should be taken by\n the robot, should follow the same action\n mode specified.\n\n :return: (nd.array) specifies the observations returned after stepping\n through the environment. Again, it follows the\n observation_mode specified.\n ' (observation, reward, done, info) = self.env.step(action) invalid_interventions = 0 self._elapsed_timesteps += 1 interventions_dict = self.interventions_curriculum.get_interventions(current_task_params=self.env.get_current_state_variables(), episode=self._elapsed_episodes, time_step=self._elapsed_timesteps) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict=interventions_dict) while ((not success_signal) and (invalid_interventions < 5)): invalid_interventions += 1 interventions_dict = self.interventions_curriculum.get_interventions(current_task_params=self.env.get_current_state_variables(), episode=self._elapsed_episodes, time_step=self._elapsed_timesteps) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict=interventions_dict) else: break return (observation, reward, done, info) def reset(self): '\n Resets the environment to the current starting state of the environment.\n\n :return: (nd.array) specifies the observations returned after resetting\n the environment. Again, it follows the\n observation_mode specified.\n ' self._elapsed_episodes += 1 invalid_interventions = 0 interventions_dict = self.interventions_curriculum.get_interventions(current_task_params=self.env.get_current_state_variables(), episode=self._elapsed_episodes, time_step=0) if (interventions_dict is not None): (success_signal, obs) = self.env.set_starting_state(interventions_dict) while ((not success_signal) and (invalid_interventions < 5)): invalid_interventions += 1 interventions_dict = self.interventions_curriculum.get_interventions(current_task_params=self.env.get_current_state_variables(), episode=self._elapsed_episodes, time_step=0) if (interventions_dict is not None): (success_signal, obs) = self.env.set_starting_state(interventions_dict) else: obs = self.env.reset() break else: obs = self.env.reset() return obs
class HERGoalEnvWrapper(gym.Env): def __init__(self, env, activate_sparse_reward=False): '\n\n :param env: (causal_world.CausalWorld) the environment to convert.\n :param activate_sparse_reward: (bool) True to activate sparse rewards.\n ' super(HERGoalEnvWrapper, self).__init__() self.env = env self.metadata = self.env.metadata self.action_space = env.action_space current_goal = self.env.get_task().get_achieved_goal().flatten() goal_space_shape = current_goal.shape self.action_space = self.env.action_space if activate_sparse_reward: self.env.get_task().activate_sparse_reward() self.observation_space = spaces.Dict(dict(desired_goal=spaces.Box((- np.inf), np.inf, shape=goal_space_shape, dtype=np.float64), achieved_goal=spaces.Box((- np.inf), np.inf, shape=goal_space_shape, dtype=np.float64), observation=self.env.observation_space)) self.reward_range = self.env.reward_range self.metadata = self.env.metadata self.env.add_wrapper_info({'her_environment': {'activate_sparse_reward': activate_sparse_reward}}) def __getattr__(self, name): if name.startswith('_'): raise AttributeError("attempted to get missing private attribute '{}'".format(name)) return getattr(self.env, name) @property def spec(self): '\n\n :return:\n ' return self.env.spec @classmethod def class_name(cls): '\n\n :return:\n ' return cls.__name__ def step(self, action): '\n Used to step through the enviroment.\n\n :param action: (nd.array) specifies which action should be taken by\n the robot, should follow the same action\n mode specified.\n\n :return: (nd.array) specifies the observations returned after stepping\n through the environment. Again, it follows the\n observation_mode specified.\n ' obs_dict = dict() (normal_obs, reward, done, info) = self.env.step(action) obs_dict['observation'] = normal_obs obs_dict['achieved_goal'] = info['achieved_goal'].flatten() obs_dict['desired_goal'] = info['desired_goal'].flatten() return (obs_dict, reward, done, info) def reset(self): '\n Resets the environment to the current starting state of the environment.\n\n :return: (nd.array) specifies the observations returned after resetting\n the environment. Again, it follows the\n observation_mode specified.\n ' obs_dict = dict() normal_obs = self.env.reset() obs_dict['observation'] = normal_obs obs_dict['achieved_goal'] = self.env.get_task().get_achieved_goal().flatten() obs_dict['desired_goal'] = self.env.get_task().get_desired_goal().flatten() return obs_dict def render(self, mode='human', **kwargs): '\n Returns an RGB image taken from above the platform.\n\n :param mode: (str) not taken in account now.\n\n :return: (nd.array) an RGB image taken from above the platform.\n ' return self.env.render(mode, **kwargs) def close(self): '\n closes the environment in a safe manner should be called at the\n end of the program.\n\n :return: None\n ' return self.env.close() def seed(self, seed=None): '\n Used to set the seed of the environment,\n to reproduce the same randomness.\n\n :param seed: (int) specifies the seed number\n\n :return: (int in list) the numpy seed that you can use further.\n ' return self.env.seed(seed) def compute_reward(self, achieved_goal, desired_goal, info): '\n Used to calculate the reward given a hypothetical situation that could\n be used in hindsight experience replay algorithms variants.\n Can only be used in the spare reward setting for the other setting\n it can be tricky here.\n\n :param achieved_goal: (nd.array) specifies the achieved goal as bounding boxes of\n objects by default.\n :param desired_goal: (nd.array) specifies the desired goal as bounding boxes of\n goal shapes by default.\n :param info: (dict) not used for now.\n\n :return: (float) the final reward achieved given the hypothetical\n situation.\n ' return self.env.get_task().compute_reward(achieved_goal, desired_goal, info) def __str__(self): '\n\n :return:\n ' return '<{}{}>'.format(type(self).__name__, self.env) def __repr__(self): '\n\n :return:\n ' return str(self) @property def unwrapped(self): '\n\n :return:\n ' return self.env.unwrapped
class ObjectSelectorActorPolicy(BaseInterventionActorPolicy): def __init__(self): '\n\n ' super(ObjectSelectorActorPolicy, self).__init__() self.low_joint_positions = None self.current_action = None self.selected_object = None def initialize_actor(self, env): '\n\n :param env:\n :return:\n ' self.low_joint_positions = env.get_joint_positions_raised() return def add_action(self, action, selected_object): '\n\n :param action:\n :param selected_object:\n :return:\n ' self.current_action = action self.selected_object = selected_object def _act(self, variables_dict): '\n\n :param variables_dict:\n :return:\n ' interventions_dict = dict() interventions_dict[self.selected_object] = dict() if (self.current_action[1] != 0): interventions_dict[self.selected_object]['cartesian_position'] = variables_dict[self.selected_object]['cartesian_position'] if (self.current_action[1] == 1): interventions_dict[self.selected_object]['cartesian_position'][(- 1)] += 0.002 elif (self.current_action[1] == 2): interventions_dict[self.selected_object]['cartesian_position'][(- 1)] -= 0.002 elif (self.current_action[1] == 3): interventions_dict[self.selected_object]['cartesian_position'][0] += 0.002 elif (self.current_action[1] == 4): interventions_dict[self.selected_object]['cartesian_position'][0] -= 0.002 elif (self.current_action[1] == 5): interventions_dict[self.selected_object]['cartesian_position'][1] += 0.002 elif (self.current_action[1] == 6): interventions_dict[self.selected_object]['cartesian_position'][1] -= 0.002 else: raise Exception('The passed action mode is not supported') if (self.current_action[2] != 0): euler_orientation = quaternion_to_euler(variables_dict[self.selected_object]['orientation']) if (self.current_action[2] == 1): euler_orientation[(- 1)] += 0.1 elif (self.current_action[2] == 2): euler_orientation[(- 1)] -= 0.1 elif (self.current_action[2] == 3): euler_orientation[1] += 0.1 elif (self.current_action[2] == 4): euler_orientation[1] -= 0.1 elif (self.current_action[2] == 5): euler_orientation[0] += 0.1 elif (self.current_action[2] == 6): euler_orientation[0] -= 0.1 else: raise Exception('The passed action mode is not supported') interventions_dict[self.selected_object]['orientation'] = euler_to_quaternion(euler_orientation) return interventions_dict
class ObjectSelectorWrapper(gym.Wrapper): def __init__(self, env): '\n\n :param env: (causal_world.CausalWorld) the environment to convert.\n ' super(ObjectSelectorWrapper, self).__init__(env) self.env = env self.env.set_skip_frame(1) self.intervention_actor = ObjectSelectorActorPolicy() self.intervention_actor.initialize_actor(self.env) self.observation_space = gym.spaces.Box(self.env.observation_space.low[28:], self.env.observation_space.high[28:], dtype=np.float64) self.objects_order = list(self.env.get_stage().get_rigid_objects().keys()) self.objects_order.sort() number_of_objects = len(self.objects_order) self.action_space = gym.spaces.Tuple((gym.spaces.Discrete(number_of_objects), gym.spaces.Discrete(7), gym.spaces.Discrete(7))) self.env.add_wrapper_info({'object_selector': dict()}) def step(self, action): '\n Used to step through the enviroment.\n\n :param action: (nd.array) specifies which action should be taken by\n the robot, should follow the same action\n mode specified.\n\n :return: (nd.array) specifies the observations returned after stepping\n through the environment. Again, it follows the\n observation_mode specified.\n ' self.intervention_actor.add_action(action, self.objects_order[action[0]]) intervention_dict = self.intervention_actor.act(self.env.get_current_state_variables()) self.env.do_intervention(intervention_dict, check_bounds=False) (obs, reward, done, info) = self.env.step(self.env.action_space.low) obs = obs[28:] return (obs, reward, done, info) def reset(self): '\n Resets the environment to the current starting state of the environment.\n\n :return: (nd.array) specifies the observations returned after resetting\n the environment. Again, it follows the\n observation_mode specified.\n ' result = self.env.reset() interventions_dict = dict() interventions_dict['joint_positions'] = self.intervention_actor.low_joint_positions self.env.do_intervention(interventions_dict, check_bounds=False) return result
class MovingAverageActionWrapperActorPolicy(BaseActorPolicy): def __init__(self, policy, widow_size=8, initial_value=0): '\n\n :param policy: (causal_world.actors.BaseActorPolicy) policy to be used.\n :param widow_size: (int) the window size for avergaing and smoothing\n the actions.\n :param initial_value: (float) intial values to fill the window with.\n ' super(BaseActorPolicy, self).__init__() self.__widow_size = widow_size self.__buffer = ([(initial_value / widow_size)] * widow_size) self.__avg = initial_value self.__p = 0 self.__start_smoothing = False self.__initial_counter = 0 self.__policy = policy @property def avg(self): '\n\n :return: \n ' return self.__avg @property def policy(self): '\n\n :return:\n ' return self.__policy def act(self, obs): '\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n ' unsmoothed_action = self.__policy.act(obs) self.__avg -= self.__buffer[self.__p] self.__buffer[self.__p] = (unsmoothed_action / self.__widow_size) self.__avg += self.__buffer[self.__p] self.__p = ((self.__p + 1) % self.__widow_size) if (not self.__start_smoothing): self.__initial_counter += 1 if (self.__initial_counter >= self.__widow_size): self.__start_smoothing = True if self.__start_smoothing: return self.__avg else: return unsmoothed_action
class ProtocolWrapper(gym.Wrapper): def __init__(self, env, protocol): '\n\n :param env: (causal_world.CausalWorld) the environment to convert.\n :param protocol: (causal_world.evaluation.ProtocolBase) protocol to evaluate.\n ' super(ProtocolWrapper, self).__init__(env) self.protocol = protocol self.env.add_wrapper_info({'evaluation_environment': self.protocol.get_name()}) self._elapsed_episodes = 0 self._elapsed_timesteps = 0 return def step(self, action): '\n Used to step through the enviroment.\n\n :param action: (nd.array) specifies which action should be taken by\n the robot, should follow the same action\n mode specified.\n\n :return: (nd.array) specifies the observations returned after stepping\n through the environment. Again, it follows the\n observation_mode specified.\n ' (observation, reward, done, info) = self.env.step(action) self._elapsed_timesteps += 1 invalid_interventions = 0 interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict=interventions_dict) while ((not success_signal) and (invalid_interventions < 5)): invalid_interventions += 1 interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=self._elapsed_episodes) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict=interventions_dict) else: break return (observation, reward, done, info) def reset(self): '\n Resets the environment to the current starting state of the environment.\n\n :return: (nd.array) specifies the observations returned after resetting\n the environment. Again, it follows the\n observation_mode specified.\n ' self._elapsed_episodes += 1 self._elapsed_timesteps = 0 invalid_interventions = 0 observation = self.env.reset() interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict) while ((not success_signal) and (invalid_interventions < 5)): invalid_interventions += 1 interventions_dict = self.protocol.get_intervention(episode=self._elapsed_episodes, timestep=0) if (interventions_dict is not None): (success_signal, observation) = self.env.do_intervention(interventions_dict) else: break return observation
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, her_config, total_time_steps, validate_every_timesteps, task_name): task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([100000, 0, 0, 0]), fractional_reward_weight=0) env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=seed_num, max_episode_length=maximum_episode_length) env = HERGoalEnvWrapper(env) env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=[(0, 1000000000, 1, 0)]) set_global_seeds(seed_num) checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model') model = HER(MlpPolicy, env, SAC, verbose=1, policy_kwargs=dict(layers=[256, 256, 256]), **her_config, seed=seed_num) model.learn(total_timesteps=total_time_steps, tb_log_name='her_sac', callback=checkpoint_callback) return
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, ppo_config, total_time_steps, validate_every_timesteps, task_name): def _make_env(rank): def _init(): task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([100000, 0, 0, 0]), fractional_reward_weight=0) env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=(seed_num + rank), max_episode_length=maximum_episode_length) env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=[(0, 1000000000, 1, 0)]) return env set_global_seeds(seed_num) return _init policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=[256, 128]) env = SubprocVecEnv([_make_env(rank=i) for i in range(num_of_envs)]) checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model') model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, verbose=1, **ppo_config) model.learn(total_timesteps=total_time_steps, tb_log_name='ppo2', callback=checkpoint_callback) return
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, ddpg_config, total_time_steps, validate_every_timesteps, task_name): print('Using MPI for multiprocessing with {} workers'.format(MPI.COMM_WORLD.Get_size())) rank = MPI.COMM_WORLD.Get_rank() print('Worker rank: {}'.format(rank)) task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02) env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=0, max_episode_length=maximum_episode_length, normalize_actions=False, normalize_observations=False) n_actions = env.action_space.shape[(- 1)] param_noise = None action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=(float(0.5) * np.ones(n_actions))) policy_kwargs = dict(layers=[256, 256]) checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model') model = DDPG(MlpPolicy, env, verbose=2, param_noise=param_noise, action_noise=action_noise, policy_kwargs=policy_kwargs, **ddpg_config) model.learn(total_timesteps=total_time_steps, tb_log_name='ddpg', callback=checkpoint_callback) return
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, her_config, total_time_steps, validate_every_timesteps, task_name): task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([0, 0, 0, 0, 0, 0, 0, 0]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02) env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=seed_num, max_episode_length=maximum_episode_length) env = HERGoalEnvWrapper(env) set_global_seeds(seed_num) checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model') model = HER(MlpPolicy, env, SAC, verbose=1, policy_kwargs=dict(layers=[256, 256]), **her_config, seed=seed_num) model.learn(total_timesteps=total_time_steps, tb_log_name='her_sac', callback=checkpoint_callback) return
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, ppo_config, total_time_steps, validate_every_timesteps, task_name): def _make_env(rank): def _init(): task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02) env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=(seed_num + rank), max_episode_length=maximum_episode_length) return env set_global_seeds(seed_num) return _init policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=[256, 256]) env = SubprocVecEnv([_make_env(rank=i) for i in range(num_of_envs)]) checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model') model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, verbose=1, **ppo_config) model.learn(total_timesteps=total_time_steps, tb_log_name='ppo2', callback=checkpoint_callback) return
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, sac_config, total_time_steps, validate_every_timesteps, task_name): task = generate_task(task_generator_id=task_name, dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02) env = CausalWorld(task=task, skip_frame=skip_frame, enable_visualization=False, seed=seed_num, max_episode_length=maximum_episode_length) set_global_seeds(seed_num) policy_kwargs = dict(layers=[256, 256]) checkpoint_callback = CheckpointCallback(save_freq=int((validate_every_timesteps / num_of_envs)), save_path=log_relative_path, name_prefix='model') model = SAC(MlpPolicy, env, verbose=1, policy_kwargs=policy_kwargs, **sac_config, seed=seed_num) model.learn(total_timesteps=total_time_steps, tb_log_name='sac', callback=checkpoint_callback) return
def _make_env(rank): task = generate_task(task_generator_id='picking', dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02) env = CausalWorld(task=task, skip_frame=3, enable_visualization=False, seed=0, max_episode_length=600) env = GymEnvWrapper(env) return env
def build_and_train(): p = psutil.Process() cpus = p.cpu_affinity() affinity = dict(cuda_idx=None, master_cpus=cpus, workers_cpus=list(([x] for x in cpus)), set_affinity=True) sampler = CpuSampler(EnvCls=_make_env, env_kwargs=dict(rank=0), batch_T=1, batch_B=4, max_decorrelation_steps=0, CollectorCls=CpuResetCollector) algo = SAC(batch_size=256, min_steps_learn=10000, replay_size=1000000, replay_ratio=(256 / 4), target_update_interval=1, target_entropy=(- 9), target_update_tau=0.01, learning_rate=0.00025, action_prior='uniform', reward_scale=1, reparameterize=True, clip_grad_norm=1000000000.0, n_step_return=1, updates_per_sync=1, bootstrap_timelimit=False) agent = SacAgent(model_kwargs={'hidden_sizes': [256, 256]}) runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, n_steps=50000000.0, log_interval_steps=10000, affinity=affinity) config = dict(env_id='picking') name = 'sac_rlpyt_picking' log_dir = os.path.join(os.path.dirname(__file__), 'sac_rlpyt_picking') with logger_context(log_dir, 0, name, config, use_summary_writer=True, snapshot_mode='all'): runner.train()
def _make_env(rank): task = generate_task('pushing', dense_reward_weights=np.array([2500, 2500, 0]), variables_space='space_a', fractional_reward_weight=100) env = CausalWorld(task=task, skip_frame=3, enable_visualization=False, seed=(0 + rank)) env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=(0, 1000000000.0, 2, 0)) env = GymEnvWrapper(env) return env
def build_and_train(): p = psutil.Process() cpus = p.cpu_affinity() affinity = dict(cuda_idx=None, master_cpus=cpus, workers_cpus=list(([x] for x in cpus)), set_affinity=True) sampler = CpuSampler(EnvCls=_make_env, env_kwargs=dict(rank=0), max_decorrelation_steps=0, batch_T=6000, batch_B=len(cpus)) model_kwargs = dict(model_kwargs=dict(hidden_sizes=[256, 256])) ppo_config = {'discount': 0.98, 'entropy_loss_coeff': 0.01, 'learning_rate': 0.00025, 'value_loss_coeff': 0.5, 'clip_grad_norm': 0.5, 'minibatches': 40, 'gae_lambda': 0.95, 'ratio_clip': 0.2, 'epochs': 4} algo = PPO(**ppo_config) agent = MujocoFfAgent(**model_kwargs) runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, n_steps=int(60000000.0), log_interval_steps=int(1000000.0), affinity=affinity) config = dict(rank=0, env_id='picking') name = 'ppo_rlpyt_pushing' log_dir = os.path.join(os.path.dirname(__file__), name) with logger_context(log_dir, 0, name, config, use_summary_writer=True, snapshot_mode='all'): runner.train()
def baseline_model(model_num, task): if (task == 'pushing'): benchmarks = utils.sweep('benchmarks', [PUSHING_BENCHMARK]) task_configs = [{'task_configs': {'variables_space': 'space_a', 'fractional_reward_weight': 1, 'dense_reward_weights': [750, 250, 0]}}] elif (task == 'picking'): benchmarks = utils.sweep('benchmarks', [PICKING_BENCHMARK]) task_configs = [{'task_configs': {'variables_space': 'space_a', 'fractional_reward_weight': 1, 'dense_reward_weights': [250, 0, 125, 0, 750, 0, 0, 0.005]}}] elif (task == 'pick_and_place'): benchmarks = utils.sweep('benchmarks', [PICK_AND_PLACE_BENCHMARK]) task_configs = [{'task_configs': {'variables_space': 'space_a', 'fractional_reward_weight': 1, 'dense_reward_weights': [750, 50, 250, 0, 0.005]}}] elif (task == 'stacking2'): benchmarks = utils.sweep('benchmarks', [STACKING2_BENCHMARK]) task_configs = [{'task_configs': {'variables_space': 'space_a', 'fractional_reward_weight': 1, 'dense_reward_weights': [750, 250, 250, 125, 0.005]}}] else: benchmarks = utils.sweep('benchmarks', [PUSHING_BENCHMARK]) task_configs = [{'task_configs': {'variables_space': 'space_a', 'fractional_reward_weight': 1, 'dense_reward_weights': [750, 250, 0]}}] world_params = [{'world_params': {'skip_frame': 3, 'enable_visualization': False, 'observation_mode': 'structured', 'normalize_observations': True, 'action_mode': 'joint_positions'}}] net_layers = utils.sweep('NET_LAYERS', [[256, 256]]) world_seed = utils.sweep('world_seed', [0]) NUM_RANDOM_SEEDS = 5 random_seeds = utils.sweep('seed', list(range(NUM_RANDOM_SEEDS))) ppo = {'num_of_envs': 20, 'algorithm': 'PPO', 'validate_every_timesteps': int(2000000), 'total_time_steps': int(100000000), 'train_configs': {'gamma': 0.99, 'n_steps': int((120000 / 20)), 'ent_coef': 0.01, 'learning_rate': 0.00025, 'vf_coef': 0.5, 'max_grad_norm': 0.5, 'nminibatches': 40, 'noptepochs': 4}} sac = {'num_of_envs': 1, 'algorithm': 'SAC', 'validate_every_timesteps': int(500000), 'total_time_steps': int(10000000), 'train_configs': {'gamma': 0.95, 'tau': 0.001, 'ent_coef': 0.001, 'target_entropy': 'auto', 'learning_rate': 0.0001, 'buffer_size': 1000000, 'learning_starts': 1000, 'batch_size': 256}} td3 = {'num_of_envs': 1, 'algorithm': 'TD3', 'validate_every_timesteps': int(500000), 'total_time_steps': int(10000000), 'train_configs': {'gamma': 0.96, 'tau': 0.02, 'learning_rate': 0.0001, 'buffer_size': 500000, 'learning_starts': 1000, 'batch_size': 128}} algorithms = [ppo, sac, td3] curriculum_kwargs_1 = {'intervention_actors': [], 'actives': []} curriculum_kwargs_2 = {'intervention_actors': [GoalInterventionActorPolicy()], 'actives': [(0, 1000000000.0, 1, 0)]} curriculum_kwargs_3 = {'intervention_actors': [RandomInterventionActorPolicy()], 'actives': [(0, 1000000000.0, 1, 0)]} curriculum_kwargs = [curriculum_kwargs_1, curriculum_kwargs_2, curriculum_kwargs_3] return utils.outer_product([benchmarks, world_params, task_configs, algorithms, curriculum_kwargs, random_seeds, world_seed, net_layers])[model_num]
def checkpoints_in_folder(folder): def is_checkpoint_file(f): full_path = os.path.join(folder, f) return (os.path.isfile(full_path) and f.startswith('model_') and f.endswith('_steps.zip')) filenames = [f for f in os.listdir(folder) if is_checkpoint_file(f)] regex = re.compile('\\d+') numbers = list([int(regex.search(n).group(0)) for n in filenames]) assert (len(filenames) == len(numbers)) sorted_idx = np.argsort(numbers) numbers = list([numbers[i] for i in sorted_idx]) filenames = list([filenames[i] for i in sorted_idx]) return (filenames, numbers)
def get_latest_checkpoint_path(model_path): (filenames, numbers) = checkpoints_in_folder(model_path) if (len(filenames) == 0): return (None, 0) else: ckpt_name = filenames[np.argmax(numbers)] ckpt_step = numbers[np.argmax(numbers)] ckpt_path = os.path.join(model_path, ckpt_name) return (ckpt_path, ckpt_step)
def save_model_settings(file_path, model_settings): model_settings['intervention_actors'] = [actor.__class__.__name__ for actor in model_settings['intervention_actors']] with open(file_path, 'w') as fout: json.dump(model_settings, fout, indent=4, default=(lambda x: x.__dict__))
def sweep(key, values): return [{key: value} for value in values]
def outer_product(list_of_settings): if (len(list_of_settings) == 1): return list_of_settings[0] result = [] other_items = outer_product(list_of_settings[1:]) for first_dict in list_of_settings[0]: for second_dict in other_items: result_dict = dict() result_dict.update(first_dict) result_dict.update(second_dict) result.append(result_dict) return result
class PrintTimestepCallback(BaseCallback): def _on_step(self) -> bool: print(self.model.num_timesteps, flush=True)
def get_single_process_env(model_settings, model_path, ckpt_step): task = generate_task(model_settings['benchmarks']['task_generator_id'], **model_settings['task_configs']) env = CausalWorld(task=task, **model_settings['world_params'], seed=model_settings['world_seed']) env = CurriculumWrapper(env, intervention_actors=model_settings['intervention_actors'], actives=model_settings['actives']) if (ckpt_step is None): prefix = 0 else: prefix = ckpt_step monitor_file = os.path.join(model_path, str(prefix)) env = Monitor(env, filename=monitor_file, info_keywords=('fractional_success',)) return env
def get_multi_process_env(model_settings, model_path, num_of_envs, ckpt_step): def _make_env(rank): def _init(): task = generate_task(model_settings['benchmarks']['task_generator_id'], **model_settings['task_configs']) env = CausalWorld(task=task, **model_settings['world_params'], seed=(model_settings['world_seed'] + rank)) env = CurriculumWrapper(env, intervention_actors=model_settings['intervention_actors'], actives=model_settings['actives']) if (ckpt_step is None): prefix = 0 else: prefix = ckpt_step monitor_file = os.path.join(model_path, ((str(rank) + '_') + str(prefix))) env = Monitor(env, filename=monitor_file, info_keywords=('fractional_success',)) return env return _init return SubprocVecEnv([_make_env(rank=i) for i in range(num_of_envs)])
def get_TD3_model(model_settings, model_path, ckpt_path, ckpt_step, tb_path): policy_kwargs = dict(layers=model_settings['NET_LAYERS']) env = get_single_process_env(model_settings, model_path, ckpt_step) n_actions = env.action_space.shape[(- 1)] action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=(0.1 * np.ones(n_actions))) if (ckpt_path is not None): print("Loading model from checkpoint '{}'".format(ckpt_path)) model = TD3.load(ckpt_path, env=env, _init_setup_model=True, policy_kwargs=policy_kwargs, **model_settings['train_configs'], action_noise=action_noise, verbose=1, tensorboard_log=tb_path) model.num_timesteps = ckpt_step else: model = TD3(TD3MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, action_noise=action_noise, **model_settings['train_configs'], verbose=1, tensorboard_log=tb_path) return (model, env)
def get_SAC_model(model_settings, model_path, ckpt_path, ckpt_step, tb_path): policy_kwargs = dict(layers=model_settings['NET_LAYERS']) env = get_single_process_env(model_settings, model_path, ckpt_step) if (ckpt_path is not None): print("Loading model from checkpoint '{}'".format(ckpt_path)) model = SAC.load(ckpt_path, env=env, _init_setup_model=True, policy_kwargs=policy_kwargs, **model_settings['train_configs'], verbose=1, tensorboard_log=tb_path) model.num_timesteps = ckpt_step else: model = SAC(SACMlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, **model_settings['train_configs'], verbose=1, tensorboard_log=tb_path) return (model, env)
def get_PPO_model(model_settings, model_path, ckpt_path, ckpt_step, num_of_envs, tb_path): policy_kwargs = dict(act_fun=tf.nn.tanh, net_arch=model_settings['NET_LAYERS']) env = get_multi_process_env(model_settings, model_path, num_of_envs, ckpt_step) if (ckpt_path is not None): print("Loading model from checkpoint '{}'".format(ckpt_path)) model = PPO2.load(ckpt_path, env=env, _init_setup_model=True, policy_kwargs=policy_kwargs, **model_settings['train_configs'], verbose=1, tensorboard_log=tb_path) model.num_timesteps = ckpt_step else: model = PPO2(MlpPolicy, env, _init_setup_model=True, policy_kwargs=policy_kwargs, **model_settings['train_configs'], verbose=1, tensorboard_log=tb_path) return (model, env)
def train_model(model_settings, output_path, tensorboard_logging=False): num_of_envs = model_settings['num_of_envs'] model_path = os.path.join(output_path, 'model') if tensorboard_logging: tb_path = model_path else: tb_path = None try: os.makedirs(model_path) ckpt_path = None ckpt_step = 0 except FileExistsError: print("Folder '{}' already exists".format(model_path)) (ckpt_path, ckpt_step) = utils.get_latest_checkpoint_path(model_path) set_global_seeds(model_settings['seed']) if (model_settings['algorithm'] == 'PPO'): (model, env) = get_PPO_model(model_settings, model_path, ckpt_path, ckpt_step, num_of_envs, tb_path) num_of_active_envs = num_of_envs total_time_steps = (model_settings['total_time_steps'] - ckpt_step) validate_every_timesteps = model_settings['validate_every_timesteps'] elif (model_settings['algorithm'] == 'SAC'): (model, env) = get_SAC_model(model_settings, model_path, ckpt_path, ckpt_step, tb_path) num_of_active_envs = num_of_envs total_time_steps = (model_settings['total_time_steps'] - ckpt_step) validate_every_timesteps = model_settings['validate_every_timesteps'] elif (model_settings['algorithm'] == 'TD3'): (model, env) = get_TD3_model(model_settings, model_path, ckpt_path, ckpt_step, tb_path) num_of_active_envs = num_of_envs total_time_steps = (model_settings['total_time_steps'] - ckpt_step) validate_every_timesteps = model_settings['validate_every_timesteps'] else: raise Exception('{} is not supported for training in the baselines'.format(model_settings['algorithm'])) ckpt_frequency = int((validate_every_timesteps / num_of_active_envs)) checkpoint_callback = CheckpointCallback(save_freq=ckpt_frequency, save_path=model_path, name_prefix='model') if (ckpt_path is None): utils.save_model_settings(os.path.join(model_path, 'model_settings.json'), model_settings) model.learn(int(total_time_steps), callback=checkpoint_callback, reset_num_timesteps=(ckpt_path is None)) model.save(save_path=os.path.join(model_path, 'model_{}_steps'.format(total_time_steps))) if (env.__class__.__name__ == 'SubprocVecEnv'): env.env_method('save_world', output_path) else: env.save_world(output_path) env.close() return model
def get_mean_scores(scores_list): scores_mean = dict() num_scores = len(scores_list) for key in list(scores_list[0].keys())[:]: scores_mean[key] = {} for sub_key in scores_list[0][key].keys(): scores_mean[key][sub_key] = np.mean([scores_list[i][key][sub_key] for i in range(num_scores)]) scores_mean[key][(sub_key + '_std')] = np.std([scores_list[i][key][sub_key] for i in range(num_scores)]) return scores_mean
@pytest.fixture(scope='module') def as_jp_norm(): return TriFingerAction(action_mode='joint_positions', normalize_actions=True)
@pytest.fixture(scope='module') def as_jp_full(): return TriFingerAction(action_mode='joint_positions', normalize_actions=False)
@pytest.fixture(scope='module') def as_jt_norm(): return TriFingerAction(action_mode='joint_torques', normalize_actions=True)
@pytest.fixture(scope='module') def as_jt_full(): return TriFingerAction(action_mode='joint_torques', normalize_actions=False)
@pytest.fixture(scope='module') def as_default(): return TriFingerAction()
@pytest.fixture(scope='module') def as_custom(): return TriFingerAction(normalize_actions=False)
def test_set_action_space(as_custom): as_custom.set_action_space(custom_action_lower_bound, custom_action_upper_bound) assert (as_custom.get_action_space().low == custom_action_lower_bound).all() assert (as_custom.get_action_space().high == custom_action_upper_bound).all() assert (as_custom.normalize_action(custom_action_upper_bound) == custom_action_norm_upper_bound).all() assert (as_custom.normalize_action(custom_action_lower_bound) == custom_action_norm_lower_bound).all
def test_get_action_space(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm): assert (as_default.get_action_space().low == (- 1.0)).all() assert (as_jp_norm.get_action_space().low == (- 1.0)).all() assert (as_jt_norm.get_action_space().low == (- 1.0)).all() assert (as_default.get_action_space().high == 1.0).all() assert (as_jp_norm.get_action_space().high == 1.0).all() assert (as_jt_norm.get_action_space().high == 1.0).all() assert (as_jp_full.get_action_space().low == lower_100_denormalized_jp_action).all() assert (as_jt_full.get_action_space().low == lower_100_denormalized_jt_action).all() assert (as_jp_full.get_action_space().high == upper_100_denormalized_jp_action).all() assert (as_jt_full.get_action_space().high == upper_100_denormalized_jt_action).all()
def test_is_normalized(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm): assert as_default.is_normalized() assert as_jt_norm.is_normalized() assert (not as_jt_full.is_normalized()) assert as_jt_norm.is_normalized() assert (not as_jp_full.is_normalized())
def test_satisfy_constraints(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm): assert as_default.satisfy_constraints(upper_99_normalized_action) assert (not as_default.satisfy_constraints(upper_100_normalized_action)) assert (not as_default.satisfy_constraints(upper_101_normalized_action)) assert as_jt_norm.satisfy_constraints(upper_99_normalized_action) assert (not as_jt_norm.satisfy_constraints(upper_100_normalized_action)) assert (not as_jt_norm.satisfy_constraints(upper_101_normalized_action)) assert as_jt_full.satisfy_constraints(as_jt_full.denormalize_action(upper_99_normalized_action)) assert (not as_jt_full.satisfy_constraints(as_jt_full.denormalize_action(upper_100_normalized_action))) assert (not as_jt_full.satisfy_constraints(as_jt_full.denormalize_action(upper_101_normalized_action))) assert as_jp_norm.satisfy_constraints(upper_99_normalized_action) assert (not as_jp_norm.satisfy_constraints(upper_100_normalized_action)) assert (not as_jp_norm.satisfy_constraints(upper_101_normalized_action)) assert as_jp_full.satisfy_constraints(as_jp_full.denormalize_action(upper_99_normalized_action)) assert (not as_jp_full.satisfy_constraints(as_jp_full.denormalize_action(upper_100_normalized_action))) assert (not as_jp_full.satisfy_constraints(as_jp_full.denormalize_action(upper_101_normalized_action))) assert as_default.satisfy_constraints(lower_99_normalized_action) assert (not as_default.satisfy_constraints(lower_100_normalized_action)) assert (not as_default.satisfy_constraints(lower_101_normalized_action)) assert as_jt_norm.satisfy_constraints(lower_99_normalized_action) assert (not as_jt_norm.satisfy_constraints(lower_100_normalized_action)) assert (not as_jt_norm.satisfy_constraints(lower_101_normalized_action)) assert as_jt_full.satisfy_constraints(as_jt_full.denormalize_action(lower_99_normalized_action)) assert (not as_jt_full.satisfy_constraints(as_jt_full.denormalize_action(lower_100_normalized_action))) assert (not as_jt_full.satisfy_constraints(as_jt_full.denormalize_action(lower_101_normalized_action))) assert as_jp_norm.satisfy_constraints(lower_99_normalized_action) assert (not as_jp_norm.satisfy_constraints(lower_100_normalized_action)) assert (not as_jp_norm.satisfy_constraints(lower_101_normalized_action)) assert as_jp_full.satisfy_constraints(as_jp_full.denormalize_action(lower_99_normalized_action)) assert (not as_jp_full.satisfy_constraints(as_jp_full.denormalize_action(lower_100_normalized_action))) assert (not as_jp_full.satisfy_constraints(as_jp_full.denormalize_action(lower_101_normalized_action)))
def test_clip_action(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm): assert (as_default.clip_action(upper_99_normalized_action) == upper_99_normalized_action).all() assert (as_default.clip_action(lower_99_normalized_action) == lower_99_normalized_action).all() assert (as_default.clip_action(upper_100_normalized_action) == upper_100_normalized_action).all() assert (as_default.clip_action(lower_100_normalized_action) == lower_100_normalized_action).all() assert (as_default.clip_action(upper_101_normalized_action) == upper_100_normalized_action).all() assert (as_default.clip_action(lower_101_normalized_action) == lower_100_normalized_action).all() assert (as_default.clip_action(normalized_action_to_be_clipped) == normalized_action_clipped).all() assert (as_jt_norm.clip_action(normalized_action_to_be_clipped) == normalized_action_clipped).all() assert (as_jp_norm.clip_action(normalized_action_to_be_clipped) == normalized_action_clipped).all() assert (as_jp_full.clip_action(as_jp_full.denormalize_action(normalized_action_to_be_clipped)) == pytest.approx(as_jp_full.denormalize_action(normalized_action_clipped)))
def test_normalize_action(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm): assert (as_default.normalize_action(upper_100_denormalized_jp_action) == upper_100_normalized_action).all() assert (as_jp_full.normalize_action(upper_100_denormalized_jp_action) == upper_100_normalized_action).all() assert (as_jp_norm.normalize_action(upper_100_denormalized_jp_action) == upper_100_normalized_action).all() assert (as_jt_full.normalize_action(upper_100_denormalized_jt_action) == upper_100_normalized_action).all() assert (as_jt_norm.normalize_action(upper_100_denormalized_jt_action) == upper_100_normalized_action).all() assert (as_jt_full.normalize_action(upper_100_denormalized_jp_action) != upper_100_normalized_action).all() assert (as_jt_norm.normalize_action(upper_100_denormalized_jp_action) != upper_100_normalized_action).all() assert (as_jp_full.normalize_action(upper_100_denormalized_jt_action) != upper_100_normalized_action).all() assert (as_jp_norm.normalize_action(upper_100_denormalized_jt_action) != upper_100_normalized_action).all() assert (as_default.normalize_action(lower_100_denormalized_jp_action) == lower_100_normalized_action).all() assert (as_jp_full.normalize_action(lower_100_denormalized_jp_action) == lower_100_normalized_action).all() assert (as_jp_norm.normalize_action(lower_100_denormalized_jp_action) == lower_100_normalized_action).all() assert (as_jt_full.normalize_action(lower_100_denormalized_jt_action) == lower_100_normalized_action).all() assert (as_jt_norm.normalize_action(lower_100_denormalized_jt_action) == lower_100_normalized_action).all() assert (as_jt_full.normalize_action(lower_100_denormalized_jp_action) != lower_100_normalized_action).all() assert (as_jt_norm.normalize_action(lower_100_denormalized_jp_action) != lower_100_normalized_action).all() assert (as_jp_full.normalize_action(lower_100_denormalized_jt_action) != lower_100_normalized_action).all() assert (as_jp_norm.normalize_action(lower_100_denormalized_jt_action) != lower_100_normalized_action).all() assert (as_jp_norm.denormalize_action(as_jp_norm.normalize_action(upper_100_denormalized_jp_action)) == pytest.approx(upper_100_denormalized_jp_action)) assert (as_jt_norm.denormalize_action(as_jt_norm.normalize_action(upper_100_denormalized_jt_action)) == pytest.approx(upper_100_denormalized_jt_action)) assert (as_jp_norm.denormalize_action(as_jp_norm.normalize_action(lower_100_denormalized_jp_action)) == pytest.approx(lower_100_denormalized_jp_action)) assert (as_jt_norm.denormalize_action(as_jt_norm.normalize_action(lower_100_denormalized_jt_action)) == pytest.approx(lower_100_denormalized_jt_action))
def test_denormalize_action(as_default, as_jt_full, as_jt_norm, as_jp_full, as_jp_norm): assert (as_default.denormalize_action(upper_100_normalized_action) == pytest.approx(upper_100_denormalized_jp_action)) assert (as_jp_full.denormalize_action(upper_100_normalized_action) == pytest.approx(upper_100_denormalized_jp_action)) assert (as_jp_norm.denormalize_action(upper_100_normalized_action) == pytest.approx(upper_100_denormalized_jp_action)) assert (as_jt_norm.denormalize_action(upper_100_normalized_action) == pytest.approx(upper_100_denormalized_jt_action)) assert (as_jt_full.denormalize_action(upper_100_normalized_action) == pytest.approx(upper_100_denormalized_jt_action)) assert (as_jt_norm.denormalize_action(upper_100_normalized_action) != upper_100_denormalized_jp_action).all() assert (as_jt_full.denormalize_action(upper_100_normalized_action) != upper_100_denormalized_jp_action).all() assert (as_jp_full.denormalize_action(upper_100_normalized_action) != upper_100_denormalized_jt_action).all() assert (as_jp_norm.denormalize_action(upper_100_normalized_action) != upper_100_denormalized_jt_action).all() assert (as_default.denormalize_action(lower_100_normalized_action) == pytest.approx(lower_100_denormalized_jp_action)) assert (as_jp_full.denormalize_action(lower_100_normalized_action) == pytest.approx(lower_100_denormalized_jp_action)) assert (as_jp_norm.denormalize_action(lower_100_normalized_action) == pytest.approx(lower_100_denormalized_jp_action)) assert (as_jt_full.denormalize_action(lower_100_normalized_action) == pytest.approx(lower_100_denormalized_jt_action)) assert (as_jt_norm.denormalize_action(lower_100_normalized_action) == pytest.approx(lower_100_denormalized_jt_action)) assert (as_jt_full.denormalize_action(lower_100_normalized_action) != lower_100_denormalized_jp_action).all() assert (as_jt_norm.denormalize_action(lower_100_normalized_action) != lower_100_denormalized_jp_action).all() assert (as_jp_full.denormalize_action(lower_100_normalized_action) != lower_100_denormalized_jt_action).all() assert (as_jp_norm.denormalize_action(lower_100_normalized_action) != lower_100_denormalized_jt_action).all() assert (as_jp_norm.denormalize_action(as_jp_norm.normalize_action(upper_100_denormalized_jp_action)) == pytest.approx(upper_100_denormalized_jp_action)) assert (as_jt_norm.denormalize_action(as_jt_norm.normalize_action(upper_100_denormalized_jt_action)) == pytest.approx(upper_100_denormalized_jt_action)) assert (as_jp_norm.denormalize_action(as_jp_norm.normalize_action(lower_100_denormalized_jp_action)) == pytest.approx(lower_100_denormalized_jp_action)) assert (as_jt_norm.denormalize_action(as_jt_norm.normalize_action(lower_100_denormalized_jt_action)) == pytest.approx(lower_100_denormalized_jt_action))
@pytest.fixture(scope='module') def os_camera_full(): return TriFingerObservations(observation_mode='pixel', normalize_observations=False)
@pytest.fixture(scope='module') def os_structured_full(): return TriFingerObservations(observation_mode='structured', observation_keys=['action_joint_positions', 'joint_velocities', 'joint_torques'], normalize_observations=False)
@pytest.fixture(scope='module') def os_default(): return TriFingerObservations()
@pytest.fixture(scope='module') def os_custom_keys_norm(): return TriFingerObservations(observation_mode='structured', normalize_observations=True, observation_keys=['end_effector_positions', 'action_joint_positions'])
def test_get_observation_spaces(os_default, os_camera_full, os_structured_full, os_custom_keys_norm): assert (os_default.get_observation_spaces().low == (- 1.0)).all() assert (os_custom_keys_norm.get_observation_spaces().low == (- 1.0)).all() assert (os_default.get_observation_spaces().high == 1.0).all() assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() assert (os_structured_full.get_observation_spaces().low == lower_obs_space_structured_full).all() assert (os_camera_full.get_observation_spaces().low == 0).all() assert (os_structured_full.get_observation_spaces().high == upper_obs_space_structured_full).all() assert (os_camera_full.get_observation_spaces().high == 255).all()
def test_is_normalized(os_default, os_camera_full, os_structured_full, os_custom_keys_norm): assert os_default.is_normalized() assert os_custom_keys_norm.is_normalized() assert (not os_camera_full.is_normalized()) assert (not os_structured_full.is_normalized())
def test_normalize_observation(os_structured_full, os_custom_keys_norm): assert (os_structured_full.normalize_observation(upper_obs_space_structured_full) == upper_100_structured_norm).all() assert (os_structured_full.normalize_observation(lower_obs_space_structured_full) == lower_100_structured_norm).all() assert (os_custom_keys_norm.normalize_observation(upper_obs_space_custom) == 1).all() assert (os_custom_keys_norm.normalize_observation(lower_obs_space_custom) == (- 1)).all()
def test_denormalize_observation(os_structured_full, os_custom_keys_norm): assert (os_structured_full.denormalize_observation(np.array(([1.0] * 27))) == pytest.approx(upper_obs_space_structured_full)) assert (os_structured_full.denormalize_observation(np.array(([(- 1.0)] * 27))) == pytest.approx(lower_obs_space_structured_full)) assert (os_custom_keys_norm.denormalize_observation(np.array(([1.0] * 18))) == pytest.approx(upper_obs_space_custom)) assert (os_custom_keys_norm.denormalize_observation(np.array(([(- 1.0)] * 18))) == pytest.approx(lower_obs_space_custom))
def test_satisfy_constraints(os_default, os_structured_full): assert os_default.satisfy_constraints(upper_99_structured_norm) assert (not os_default.satisfy_constraints(upper_100_structured_norm)) assert (not os_default.satisfy_constraints(upper_101_structured_norm)) assert os_structured_full.satisfy_constraints((lower_obs_space_structured_full * 0.5)) assert (not os_structured_full.satisfy_constraints((lower_obs_space_structured_full * 1.01))) assert os_default.satisfy_constraints(lower_99_structured_norm) assert (not os_default.satisfy_constraints(lower_100_structured_norm)) assert (not os_default.satisfy_constraints(lower_101_structured_norm))
def test_clip_observation(os_default, os_structured_full): assert (os_default.clip_observation(upper_99_structured_norm) == upper_99_structured_norm).all() assert (os_default.clip_observation(lower_99_structured_norm) == lower_99_structured_norm).all() assert (os_default.clip_observation(upper_100_structured_norm) == upper_100_structured_norm).all() assert (os_default.clip_observation(lower_100_structured_norm) == lower_100_structured_norm).all() assert (os_default.clip_observation(upper_101_structured_norm) == upper_100_structured_norm).all() assert (os_default.clip_observation(lower_101_structured_norm) == lower_100_structured_norm).all() assert (os_structured_full.clip_observation(os_structured_full.denormalize_observation(normalized_struct_obs_to_be_clipped)) == pytest.approx(os_structured_full.denormalize_observation(normalized_struct_obs_clipped)))
def test_add_and_remove_observation(os_custom_keys_norm): os_custom_keys_norm.add_observation('joint_torques') assert (os_custom_keys_norm._observations_keys == ['end_effector_positions', 'action_joint_positions', 'joint_torques']) assert (len(os_custom_keys_norm.get_observation_spaces().low) == 27) assert (len(os_custom_keys_norm.get_observation_spaces().high) == 27) assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() os_custom_keys_norm.remove_observations(['joint_torques']) assert (os_custom_keys_norm._observations_keys == ['end_effector_positions', 'action_joint_positions']) assert (len(os_custom_keys_norm.get_observation_spaces().low) == 18) assert (len(os_custom_keys_norm.get_observation_spaces().high) == 18) assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() def dummy_fn(robot): return [1, 1, 1] os_custom_keys_norm.add_observation('dummy_key', lower_bound=[(- 4), (- 4), (- 4)], upper_bound=[2, 2, 2], observation_fn=dummy_fn) assert (os_custom_keys_norm._observations_keys == ['end_effector_positions', 'action_joint_positions', 'dummy_key']) assert (len(os_custom_keys_norm.get_observation_spaces().low) == 21) assert (len(os_custom_keys_norm.get_observation_spaces().high) == 21) assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() os_custom_keys_norm.remove_observations(['dummy_key']) assert (os_custom_keys_norm._observations_keys == ['end_effector_positions', 'action_joint_positions']) assert (len(os_custom_keys_norm.get_observation_spaces().low) == 18) assert (len(os_custom_keys_norm.get_observation_spaces().high) == 18) assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all() assert (os_custom_keys_norm.get_observation_spaces().high == 1.0).all()
@pytest.fixture(scope='module') def robot_jp_structured(): task = generate_task(task_generator_id='pushing') return CausalWorld(task=task, enable_visualization=False, observation_mode='structured')
@pytest.fixture(scope='module') def robot_jp_camera(): task = generate_task(task_generator_id='pushing') return CausalWorld(task=task, enable_visualization=False, observation_mode='pixel')
def test_action_mode_switching(robot_jp_structured): robot_jp_structured.set_action_mode('joint_torques') assert (robot_jp_structured.get_action_mode() == 'joint_torques') robot_jp_structured.set_action_mode('joint_positions') assert (robot_jp_structured.get_action_mode() == 'joint_positions')
def test_pd_gains(): np.random.seed(0) task = generate_task(task_generator_id='pushing') skip_frame = 1 env = CausalWorld(task=task, enable_visualization=False, skip_frame=skip_frame, normalize_observations=False, normalize_actions=False, seed=0) zero_hold = int((5000 / skip_frame)) obs = env.reset() for _ in range(zero_hold): chosen_action = np.zeros(9) (obs, reward, done, info) = env.step(chosen_action) current_joint_positions = obs[1:10] if ((current_joint_positions - chosen_action) > 0.1).any(): raise AssertionError('The pd controller failed to reach these values {} but reached instead {}'.format(chosen_action, current_joint_positions)) for _ in range(zero_hold): chosen_action = env.action_space.high (obs, reward, done, info) = env.step(chosen_action) current_joint_positions = obs[1:10] if ((current_joint_positions - chosen_action) > 0.1).any(): raise AssertionError('The pd controller failed to reach these values {} but reached instead {}'.format(chosen_action, current_joint_positions)) env.close()
class TestWorld(unittest.TestCase): def setUp(self): return def tearDown(self): return def test_determinism(self): task = generate_task(task_generator_id='stacked_blocks') observations_v1 = [] observations_v2 = [] observations_v3 = [] rewards_v1 = [] rewards_v2 = [] rewards_v3 = [] horizon = 30 env_v1 = CausalWorld(task=task, enable_visualization=False, seed=27) obs = env_v1.reset() observations_v1.append(obs) for _ in range(horizon): (obs, reward, done, info) = env_v1.step(env_v1.action_space.low) observations_v1.append(obs) rewards_v1.append(reward) env_v1.close() task = generate_task(task_generator_id='stacked_blocks') env_v2 = CausalWorld(task=task, enable_visualization=False, seed=27) obs = env_v2.reset() observations_v2.append(obs) for _ in range(horizon): (obs, reward, done, info) = env_v2.step(env_v2.action_space.low) observations_v2.append(obs) rewards_v2.append(reward) env_v2.close() task = generate_task(task_generator_id='stacked_blocks') env_v3 = CausalWorld(task=task, enable_visualization=False, seed=54) obs = env_v3.reset() observations_v3.append(obs) for _ in range(horizon): (obs, reward, done, info) = env_v3.step(env_v3.action_space.low) observations_v3.append(obs) rewards_v3.append(reward) env_v3.close() assert all((np.array_equal(observations_v1[i], observations_v2[i]) for i in range(horizon))) assert (rewards_v1 == rewards_v2) assert all((np.array_equal(observations_v1[i], observations_v3[i]) for i in range(horizon))) assert (rewards_v1 == rewards_v3) def test_parallelism(self): task = generate_task(task_generator_id='stacked_blocks') env1 = CausalWorld(task=task, enable_visualization=False, seed=0) env1.reset() task2 = generate_task(task_generator_id='stacked_blocks') env2 = CausalWorld(task=task2, enable_visualization=False, seed=0) (observations_env1_v1, rewards_env1_v1, _, _) = env1.step(env1.action_space.low) env2.reset() (observations_env2_v1, rewards_env2_v1, _, _) = env2.step(env2.action_space.low) env1.close() env2.close() assert np.array_equal(observations_env2_v1, observations_env1_v1) return def test_timing_profile(self): from pybullet_envs.bullet.kukaGymEnv import KukaGymEnv import time kuka_env = KukaGymEnv(renders=False, isDiscrete=False) task = generate_task(task_generator_id='pushing') causal_rl_env = CausalWorld(task=task, enable_visualization=False, seed=0, skip_frame=10, normalize_actions=False, normalize_observations=False) start = time.time() kuka_env.reset() end = time.time() kuka_reset_time = (end - start) start = time.time() causal_rl_env.reset() end = time.time() causal_rl_reset_time = (end - start) self.assertLess(causal_rl_reset_time, (kuka_reset_time * 1.25)) start = time.time() kuka_env.step(kuka_env.action_space.sample()) end = time.time() kuka_step_time = (end - start) start = time.time() causal_rl_env.step(causal_rl_env.action_space.sample()) end = time.time() causal_rl_step_time = (end - start) print('time 1', causal_rl_step_time) print('time 2', kuka_step_time) self.assertLess(causal_rl_step_time, (kuka_step_time * 10)) start = time.time() kuka_env.render() end = time.time() kuka_render_time = (end - start) start = time.time() causal_rl_env.render() end = time.time() causal_rl_render_time = (end - start) self.assertLess(causal_rl_render_time, (kuka_render_time * 1.25)) causal_rl_env.close() kuka_env.close() return def test_camera_timing_profile(self): from pybullet_envs.bullet.kukaCamGymEnv import KukaCamGymEnv import time kuka_env = KukaCamGymEnv(renders=False, isDiscrete=False) task = generate_task(task_generator_id='pushing') causal_rl_env = CausalWorld(task=task, enable_visualization=False, seed=0, skip_frame=10, normalize_actions=False, normalize_observations=False) start = time.time() kuka_env.reset() end = time.time() kuka_reset_time = (end - start) start = time.time() causal_rl_env.reset() end = time.time() causal_rl_reset_time = (end - start) self.assertLess(causal_rl_reset_time, (kuka_reset_time * 3)) start = time.time() kuka_env.step(kuka_env.action_space.sample()) end = time.time() kuka_step_time = (end - start) start = time.time() causal_rl_env.step(causal_rl_env.action_space.sample()) end = time.time() causal_rl_step_time = (end - start) self.assertLess(causal_rl_step_time, (kuka_step_time * 1)) causal_rl_env.close() kuka_env.close() return @unittest.skip('This test will fail because of a potential bug in pybullet') def test_save_state(self): task = generate_task(task_generator_id='creative_stacked_blocks') env = CausalWorld(task=task, enable_visualization=False, seed=0) actions = [env.action_space.sample() for _ in range(200)] env.reset() observations_1 = [] rewards_1 = [] for i in range(200): (observations, rewards, _, _) = env.step(actions[i]) if (i == 100): state = env.get_state() observations_1.append(observations) rewards_1.append(rewards) env.set_state(state) for i in range(101, 200): (observations, rewards, _, _) = env.step(actions[i]) if (not np.array_equal(observations_1[i], observations)): print('step', i) print((observations_1[i] - observations)) assert np.array_equal(observations_1[i], observations) env.close() return def test_reset_default_state(self): task = generate_task(task_generator_id='picking') env = CausalWorld(task=task, enable_visualization=False, seed=0) actions = [env.action_space.sample() for _ in range(200)] observations_1 = [] rewards_1 = [] env.reset() for i in range(200): (observations, rewards, _, _) = env.step(actions[i]) observations_1.append(observations) rewards_1.append(rewards) env.set_starting_state({'goal_block': {'cylindrical_position': [0.1, np.pi, 0.1]}}) for i in range(200): (observations, rewards, _, _) = env.step(actions[i]) env.reset_default_state() for i in range(200): (observations, rewards, _, _) = env.step(actions[i]) assert np.array_equal(observations_1[i], observations) env.close() return
class TestCreativeStackedBlocks(unittest.TestCase): def setUp(self): self.task = generate_task(task_generator_id='creative_stacked_blocks') self.env = CausalWorld(task=self.task, enable_visualization=False) return def tearDown(self): self.env.close() return def test_determinism(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) obs = self.env.reset() observations_1.append(obs) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) for _ in range(10): observations_2 = [] rewards_2 = [] obs = self.env.reset() observations_2.append(obs) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) if (not np.array_equal(observations_1[i], observations_2[i])): print((np.array(observations_1[i]) - np.array(observations_2[i]))) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2) def test_determinism_w_interventions(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) new_goal = self.env.sample_new_goal() self.env.set_starting_state(interventions_dict=new_goal) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) for _ in range(10): observations_2 = [] rewards_2 = [] self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) if (not np.array_equal(observations_1[i], observations_2[i])): print((np.array(observations_1[i]) - np.array(observations_2[i]))) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2) def test_determinism_w_in_episode_interventions(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) if (i == 50): success_signal = self.env.do_intervention({'tool_level_0_num_1': {'cylindrical_position': [0, 0, 0.2]}}) observations_2 = [] rewards_2 = [] self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2)
class TestGeneral(unittest.TestCase): def setUp(self): self.task = generate_task(task_generator_id='general') self.env = CausalWorld(task=self.task, enable_visualization=False) self.env.reset() return def tearDown(self): self.env.close() return def test_determinism(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) obs = self.env.reset() observations_1.append(obs) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) for _ in range(10): observations_2 = [] rewards_2 = [] obs = self.env.reset() observations_2.append(obs) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) if (not np.array_equal(observations_1[i], observations_2[i])): print((np.array(observations_1[i]) - np.array(observations_2[i]))) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2) def test_determinism_w_interventions(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) new_goal = self.env.sample_new_goal() self.env.set_starting_state(interventions_dict=new_goal) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) for _ in range(10): observations_2 = [] rewards_2 = [] self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2) def test_determinism_w_in_episode_interventions(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) if (i == 50): success_signal = self.env.do_intervention({'tool_0': {'cylindrical_position': [0, 0, 0.2]}}) observations_2 = [] rewards_2 = [] self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2)
class TestPickAndPlace(unittest.TestCase): def setUp(self): self.task = generate_task(task_generator_id='pick_and_place') self.env = CausalWorld(task=self.task, enable_visualization=False) return def tearDown(self): self.env.close() return def test_determinism(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) obs = self.env.reset() observations_1.append(obs) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) for _ in range(10): observations_2 = [] rewards_2 = [] obs = self.env.reset() observations_2.append(obs) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) if (not np.array_equal(observations_1[i], observations_2[i])): print((np.array(observations_1[i]) - np.array(observations_2[i]))) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2) def test_determinism_w_interventions(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) new_goal = self.env.sample_new_goal() self.env.set_starting_state(interventions_dict=new_goal) for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) for _ in range(10): observations_2 = [] rewards_2 = [] self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) if (not np.array_equal(observations_1[i], observations_2[i])): print(i) print((np.array(observations_1[i]) - np.array(observations_2[i]))) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2) def test_determinism_w_in_episode_interventions(self): observations_1 = [] rewards_1 = [] horizon = 100 actions = [self.env.action_space.sample() for _ in range(horizon)] actions = np.array(actions) self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_1.append(obs) rewards_1.append(reward) self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) if (i == 50): success_signal = self.env.do_intervention({'tool_block': {'cylindrical_position': [0.1, (np.pi / 2), 0.0325]}}) observations_2 = [] rewards_2 = [] self.env.reset() for i in range(horizon): (obs, reward, done, info) = self.env.step(actions[i]) observations_2.append(obs) rewards_2.append(reward) assert np.array_equal(observations_1[i], observations_2[i]) assert (rewards_1 == rewards_2) def test_goal_intervention(self): task = generate_task(task_generator_id='pick_and_place') env = CausalWorld(task=task, enable_visualization=False, normalize_observations=False) for _ in range(10): invalid_interventions_before = env.get_tracker().invalid_intervention_steps new_goal = env.sample_new_goal() env.set_starting_state(interventions_dict=new_goal) invalid_interventions_after = env.get_tracker().invalid_intervention_steps for _ in range(2): for _ in range(100): (obs, reward, done, info) = env.step(env.action_space.low) if (invalid_interventions_before == invalid_interventions_after): assert np.array_equal(cyl2cart(new_goal['goal_block']['cylindrical_position']), obs[(- 18):(- 15)]) env.reset() env.close()