text
stringlengths
81
112k
This is called whenever a trial makes progress. When all live trials in the bracket have no more iterations left, Trials will be successively halved. If bracket is done, all non-running trials will be stopped and cleaned up, and during each halving phase, bad trials will be stopped while good trials will return to "PENDING". def _process_bracket(self, trial_runner, bracket, trial): """This is called whenever a trial makes progress. When all live trials in the bracket have no more iterations left, Trials will be successively halved. If bracket is done, all non-running trials will be stopped and cleaned up, and during each halving phase, bad trials will be stopped while good trials will return to "PENDING".""" action = TrialScheduler.PAUSE if bracket.cur_iter_done(): if bracket.finished(): bracket.cleanup_full(trial_runner) return TrialScheduler.STOP good, bad = bracket.successive_halving(self._reward_attr) # kill bad trials self._num_stopped += len(bad) for t in bad: if t.status == Trial.PAUSED: trial_runner.stop_trial(t) elif t.status == Trial.RUNNING: bracket.cleanup_trial(t) action = TrialScheduler.STOP else: raise Exception("Trial with unexpected status encountered") # ready the good trials - if trial is too far ahead, don't continue for t in good: if t.status not in [Trial.PAUSED, Trial.RUNNING]: raise Exception("Trial with unexpected status encountered") if bracket.continue_trial(t): if t.status == Trial.PAUSED: trial_runner.trial_executor.unpause_trial(t) elif t.status == Trial.RUNNING: action = TrialScheduler.CONTINUE return action
Notification when trial terminates. Trial info is removed from bracket. Triggers halving if bracket is not finished. def on_trial_remove(self, trial_runner, trial): """Notification when trial terminates. Trial info is removed from bracket. Triggers halving if bracket is not finished.""" bracket, _ = self._trial_info[trial] bracket.cleanup_trial(trial) if not bracket.finished(): self._process_bracket(trial_runner, bracket, trial)
Fair scheduling within iteration by completion percentage. List of trials not used since all trials are tracked as state of scheduler. If iteration is occupied (ie, no trials to run), then look into next iteration. def choose_trial_to_run(self, trial_runner): """Fair scheduling within iteration by completion percentage. List of trials not used since all trials are tracked as state of scheduler. If iteration is occupied (ie, no trials to run), then look into next iteration. """ for hyperband in self._hyperbands: # band will have None entries if no resources # are to be allocated to that bracket. scrubbed = [b for b in hyperband if b is not None] for bracket in sorted( scrubbed, key=lambda b: b.completion_percentage()): for trial in bracket.current_trials(): if (trial.status == Trial.PENDING and trial_runner.has_resources(trial.resources)): return trial return None
This provides a progress notification for the algorithm. For each bracket, the algorithm will output a string as follows: Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%): {PENDING: 2, RUNNING: 3, TERMINATED: 2} "Max Size" indicates the max number of pending/running experiments set according to the Hyperband algorithm. "Milestone" indicates the iterations a trial will run for before the next halving will occur. "Completed" indicates an approximate progress metric. Some brackets, like ones that are unfilled, will not reach 100%. def debug_string(self): """This provides a progress notification for the algorithm. For each bracket, the algorithm will output a string as follows: Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%): {PENDING: 2, RUNNING: 3, TERMINATED: 2} "Max Size" indicates the max number of pending/running experiments set according to the Hyperband algorithm. "Milestone" indicates the iterations a trial will run for before the next halving will occur. "Completed" indicates an approximate progress metric. Some brackets, like ones that are unfilled, will not reach 100%. """ out = "Using HyperBand: " out += "num_stopped={} total_brackets={}".format( self._num_stopped, sum(len(band) for band in self._hyperbands)) for i, band in enumerate(self._hyperbands): out += "\nRound #{}:".format(i) for bracket in band: out += "\n {}".format(bracket) return out
Add trial to bracket assuming bracket is not filled. At a later iteration, a newly added trial will be given equal opportunity to catch up. def add_trial(self, trial): """Add trial to bracket assuming bracket is not filled. At a later iteration, a newly added trial will be given equal opportunity to catch up.""" assert not self.filled(), "Cannot add trial to filled bracket!" self._live_trials[trial] = None self._all_trials.append(trial)
Checks if all iterations have completed. TODO(rliaw): also check that `t.iterations == self._r` def cur_iter_done(self): """Checks if all iterations have completed. TODO(rliaw): also check that `t.iterations == self._r`""" return all( self._get_result_time(result) >= self._cumul_r for result in self._live_trials.values())
Update result for trial. Called after trial has finished an iteration - will decrement iteration count. TODO(rliaw): The other alternative is to keep the trials in and make sure they're not set as pending later. def update_trial_stats(self, trial, result): """Update result for trial. Called after trial has finished an iteration - will decrement iteration count. TODO(rliaw): The other alternative is to keep the trials in and make sure they're not set as pending later.""" assert trial in self._live_trials assert self._get_result_time(result) >= 0 delta = self._get_result_time(result) - \ self._get_result_time(self._live_trials[trial]) assert delta >= 0 self._completed_progress += delta self._live_trials[trial] = result
Cleans up bracket after bracket is completely finished. Lets the last trial continue to run until termination condition kicks in. def cleanup_full(self, trial_runner): """Cleans up bracket after bracket is completely finished. Lets the last trial continue to run until termination condition kicks in.""" for trial in self.current_trials(): if (trial.status == Trial.PAUSED): trial_runner.stop_trial(trial)
Read the client table. Args: redis_client: A client to the primary Redis shard. Returns: A list of information about the nodes in the cluster. def parse_client_table(redis_client): """Read the client table. Args: redis_client: A client to the primary Redis shard. Returns: A list of information about the nodes in the cluster. """ NIL_CLIENT_ID = ray.ObjectID.nil().binary() message = redis_client.execute_command("RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.CLIENT, "", NIL_CLIENT_ID) # Handle the case where no clients are returned. This should only # occur potentially immediately after the cluster is started. if message is None: return [] node_info = {} gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0) ordered_client_ids = [] # Since GCS entries are append-only, we override so that # only the latest entries are kept. for i in range(gcs_entry.EntriesLength()): client = (ray.gcs_utils.ClientTableData.GetRootAsClientTableData( gcs_entry.Entries(i), 0)) resources = { decode(client.ResourcesTotalLabel(i)): client.ResourcesTotalCapacity(i) for i in range(client.ResourcesTotalLabelLength()) } client_id = ray.utils.binary_to_hex(client.ClientId()) # If this client is being removed, then it must # have previously been inserted, and # it cannot have previously been removed. if not client.IsInsertion(): assert client_id in node_info, "Client removed not found!" assert node_info[client_id]["IsInsertion"], ( "Unexpected duplicate removal of client.") else: ordered_client_ids.append(client_id) node_info[client_id] = { "ClientID": client_id, "IsInsertion": client.IsInsertion(), "NodeManagerAddress": decode( client.NodeManagerAddress(), allow_none=True), "NodeManagerPort": client.NodeManagerPort(), "ObjectManagerPort": client.ObjectManagerPort(), "ObjectStoreSocketName": decode( client.ObjectStoreSocketName(), allow_none=True), "RayletSocketName": decode( client.RayletSocketName(), allow_none=True), "Resources": resources } # NOTE: We return the list comprehension below instead of simply doing # 'list(node_info.values())' in order to have the nodes appear in the order # that they joined the cluster. Python dictionaries do not preserve # insertion order. We could use an OrderedDict, but then we'd have to be # sure to only insert a given node a single time (clients that die appear # twice in the GCS log). return [node_info[client_id] for client_id in ordered_client_ids]
Initialize the GlobalState object by connecting to Redis. It's possible that certain keys in Redis may not have been fully populated yet. In this case, we will retry this method until they have been populated or we exceed a timeout. Args: redis_address: The Redis address to connect. redis_password: The password of the redis server. def _initialize_global_state(self, redis_address, redis_password=None, timeout=20): """Initialize the GlobalState object by connecting to Redis. It's possible that certain keys in Redis may not have been fully populated yet. In this case, we will retry this method until they have been populated or we exceed a timeout. Args: redis_address: The Redis address to connect. redis_password: The password of the redis server. """ self.redis_client = services.create_redis_client( redis_address, redis_password) start_time = time.time() num_redis_shards = None redis_shard_addresses = [] while time.time() - start_time < timeout: # Attempt to get the number of Redis shards. num_redis_shards = self.redis_client.get("NumRedisShards") if num_redis_shards is None: print("Waiting longer for NumRedisShards to be populated.") time.sleep(1) continue num_redis_shards = int(num_redis_shards) if num_redis_shards < 1: raise Exception("Expected at least one Redis shard, found " "{}.".format(num_redis_shards)) # Attempt to get all of the Redis shards. redis_shard_addresses = self.redis_client.lrange( "RedisShards", start=0, end=-1) if len(redis_shard_addresses) != num_redis_shards: print("Waiting longer for RedisShards to be populated.") time.sleep(1) continue # If we got here then we successfully got all of the information. break # Check to see if we timed out. if time.time() - start_time >= timeout: raise Exception("Timed out while attempting to initialize the " "global state. num_redis_shards = {}, " "redis_shard_addresses = {}".format( num_redis_shards, redis_shard_addresses)) # Get the rest of the information. self.redis_clients = [] for shard_address in redis_shard_addresses: self.redis_clients.append( services.create_redis_client(shard_address.decode(), redis_password))
Execute a Redis command on the appropriate Redis shard based on key. Args: key: The object ID or the task ID that the query is about. args: The command to run. Returns: The value returned by the Redis command. def _execute_command(self, key, *args): """Execute a Redis command on the appropriate Redis shard based on key. Args: key: The object ID or the task ID that the query is about. args: The command to run. Returns: The value returned by the Redis command. """ client = self.redis_clients[key.redis_shard_hash() % len( self.redis_clients)] return client.execute_command(*args)
Execute the KEYS command on all Redis shards. Args: pattern: The KEYS pattern to query. Returns: The concatenated list of results from all shards. def _keys(self, pattern): """Execute the KEYS command on all Redis shards. Args: pattern: The KEYS pattern to query. Returns: The concatenated list of results from all shards. """ result = [] for client in self.redis_clients: result.extend(list(client.scan_iter(match=pattern))) return result
Fetch and parse the object table information for a single object ID. Args: object_id: An object ID to get information about. Returns: A dictionary with information about the object ID in question. def _object_table(self, object_id): """Fetch and parse the object table information for a single object ID. Args: object_id: An object ID to get information about. Returns: A dictionary with information about the object ID in question. """ # Allow the argument to be either an ObjectID or a hex string. if not isinstance(object_id, ray.ObjectID): object_id = ray.ObjectID(hex_to_binary(object_id)) # Return information about a single object ID. message = self._execute_command(object_id, "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.OBJECT, "", object_id.binary()) if message is None: return {} gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) assert gcs_entry.EntriesLength() > 0 entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData( gcs_entry.Entries(0), 0) object_info = { "DataSize": entry.ObjectSize(), "Manager": entry.Manager(), } return object_info
Fetch and parse the object table info for one or more object IDs. Args: object_id: An object ID to fetch information about. If this is None, then the entire object table is fetched. Returns: Information from the object table. def object_table(self, object_id=None): """Fetch and parse the object table info for one or more object IDs. Args: object_id: An object ID to fetch information about. If this is None, then the entire object table is fetched. Returns: Information from the object table. """ self._check_connected() if object_id is not None: # Return information about a single object ID. return self._object_table(object_id) else: # Return the entire object table. object_keys = self._keys(ray.gcs_utils.TablePrefix_OBJECT_string + "*") object_ids_binary = { key[len(ray.gcs_utils.TablePrefix_OBJECT_string):] for key in object_keys } results = {} for object_id_binary in object_ids_binary: results[binary_to_object_id(object_id_binary)] = ( self._object_table(binary_to_object_id(object_id_binary))) return results
Fetch and parse the task table information for a single task ID. Args: task_id: A task ID to get information about. Returns: A dictionary with information about the task ID in question. def _task_table(self, task_id): """Fetch and parse the task table information for a single task ID. Args: task_id: A task ID to get information about. Returns: A dictionary with information about the task ID in question. """ assert isinstance(task_id, ray.TaskID) message = self._execute_command(task_id, "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.RAYLET_TASK, "", task_id.binary()) if message is None: return {} gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) assert gcs_entries.EntriesLength() == 1 task_table_message = ray.gcs_utils.Task.GetRootAsTask( gcs_entries.Entries(0), 0) execution_spec = task_table_message.TaskExecutionSpec() task_spec = task_table_message.TaskSpecification() task = ray._raylet.Task.from_string(task_spec) function_descriptor_list = task.function_descriptor_list() function_descriptor = FunctionDescriptor.from_bytes_list( function_descriptor_list) task_spec_info = { "DriverID": task.driver_id().hex(), "TaskID": task.task_id().hex(), "ParentTaskID": task.parent_task_id().hex(), "ParentCounter": task.parent_counter(), "ActorID": (task.actor_id().hex()), "ActorCreationID": task.actor_creation_id().hex(), "ActorCreationDummyObjectID": ( task.actor_creation_dummy_object_id().hex()), "ActorCounter": task.actor_counter(), "Args": task.arguments(), "ReturnObjectIDs": task.returns(), "RequiredResources": task.required_resources(), "FunctionID": function_descriptor.function_id.hex(), "FunctionHash": binary_to_hex(function_descriptor.function_hash), "ModuleName": function_descriptor.module_name, "ClassName": function_descriptor.class_name, "FunctionName": function_descriptor.function_name, } return { "ExecutionSpec": { "Dependencies": [ execution_spec.Dependencies(i) for i in range(execution_spec.DependenciesLength()) ], "LastTimestamp": execution_spec.LastTimestamp(), "NumForwards": execution_spec.NumForwards() }, "TaskSpec": task_spec_info }
Fetch and parse the task table information for one or more task IDs. Args: task_id: A hex string of the task ID to fetch information about. If this is None, then the task object table is fetched. Returns: Information from the task table. def task_table(self, task_id=None): """Fetch and parse the task table information for one or more task IDs. Args: task_id: A hex string of the task ID to fetch information about. If this is None, then the task object table is fetched. Returns: Information from the task table. """ self._check_connected() if task_id is not None: task_id = ray.TaskID(hex_to_binary(task_id)) return self._task_table(task_id) else: task_table_keys = self._keys( ray.gcs_utils.TablePrefix_RAYLET_TASK_string + "*") task_ids_binary = [ key[len(ray.gcs_utils.TablePrefix_RAYLET_TASK_string):] for key in task_table_keys ] results = {} for task_id_binary in task_ids_binary: results[binary_to_hex(task_id_binary)] = self._task_table( ray.TaskID(task_id_binary)) return results
Fetch and parse the function table. Returns: A dictionary that maps function IDs to information about the function. def function_table(self, function_id=None): """Fetch and parse the function table. Returns: A dictionary that maps function IDs to information about the function. """ self._check_connected() function_table_keys = self.redis_client.keys( ray.gcs_utils.FUNCTION_PREFIX + "*") results = {} for key in function_table_keys: info = self.redis_client.hgetall(key) function_info_parsed = { "DriverID": binary_to_hex(info[b"driver_id"]), "Module": decode(info[b"module"]), "Name": decode(info[b"name"]) } results[binary_to_hex(info[b"function_id"])] = function_info_parsed return results
Get the profile events for a given batch of profile events. Args: batch_id: An identifier for a batch of profile events. Returns: A list of the profile events for the specified batch. def _profile_table(self, batch_id): """Get the profile events for a given batch of profile events. Args: batch_id: An identifier for a batch of profile events. Returns: A list of the profile events for the specified batch. """ # TODO(rkn): This method should support limiting the number of log # events and should also support returning a window of events. message = self._execute_command(batch_id, "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.PROFILE, "", batch_id.binary()) if message is None: return [] gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) profile_events = [] for i in range(gcs_entries.EntriesLength()): profile_table_message = ( ray.gcs_utils.ProfileTableData.GetRootAsProfileTableData( gcs_entries.Entries(i), 0)) component_type = decode(profile_table_message.ComponentType()) component_id = binary_to_hex(profile_table_message.ComponentId()) node_ip_address = decode( profile_table_message.NodeIpAddress(), allow_none=True) for j in range(profile_table_message.ProfileEventsLength()): profile_event_message = profile_table_message.ProfileEvents(j) profile_event = { "event_type": decode(profile_event_message.EventType()), "component_id": component_id, "node_ip_address": node_ip_address, "component_type": component_type, "start_time": profile_event_message.StartTime(), "end_time": profile_event_message.EndTime(), "extra_data": json.loads( decode(profile_event_message.ExtraData())), } profile_events.append(profile_event) return profile_events
Return a list of profiling events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. def chrome_tracing_dump(self, filename=None): """Return a list of profiling events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. """ # TODO(rkn): Support including the task specification data in the # timeline. # TODO(rkn): This should support viewing just a window of time or a # limited number of events. profile_table = self.profile_table() all_events = [] for component_id_hex, component_events in profile_table.items(): # Only consider workers and drivers. component_type = component_events[0]["component_type"] if component_type not in ["worker", "driver"]: continue for event in component_events: new_event = { # The category of the event. "cat": event["event_type"], # The string displayed on the event. "name": event["event_type"], # The identifier for the group of rows that the event # appears in. "pid": event["node_ip_address"], # The identifier for the row that the event appears in. "tid": event["component_type"] + ":" + event["component_id"], # The start time in microseconds. "ts": self._seconds_to_microseconds(event["start_time"]), # The duration in microseconds. "dur": self._seconds_to_microseconds(event["end_time"] - event["start_time"]), # What is this? "ph": "X", # This is the name of the color to display the box in. "cname": self._default_color_mapping[event["event_type"]], # The extra user-defined data. "args": event["extra_data"], } # Modify the json with the additional user-defined extra data. # This can be used to add fields or override existing fields. if "cname" in event["extra_data"]: new_event["cname"] = event["extra_data"]["cname"] if "name" in event["extra_data"]: new_event["name"] = event["extra_data"]["name"] all_events.append(new_event) if filename is not None: with open(filename, "w") as outfile: json.dump(all_events, outfile) else: return all_events
Return a list of transfer events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. def chrome_tracing_object_transfer_dump(self, filename=None): """Return a list of transfer events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. """ client_id_to_address = {} for client_info in ray.global_state.client_table(): client_id_to_address[client_info["ClientID"]] = "{}:{}".format( client_info["NodeManagerAddress"], client_info["ObjectManagerPort"]) all_events = [] for key, items in self.profile_table().items(): # Only consider object manager events. if items[0]["component_type"] != "object_manager": continue for event in items: if event["event_type"] == "transfer_send": object_id, remote_client_id, _, _ = event["extra_data"] elif event["event_type"] == "transfer_receive": object_id, remote_client_id, _, _ = event["extra_data"] elif event["event_type"] == "receive_pull_request": object_id, remote_client_id = event["extra_data"] else: assert False, "This should be unreachable." # Choose a color by reading the first couple of hex digits of # the object ID as an integer and turning that into a color. object_id_int = int(object_id[:2], 16) color = self._chrome_tracing_colors[object_id_int % len( self._chrome_tracing_colors)] new_event = { # The category of the event. "cat": event["event_type"], # The string displayed on the event. "name": event["event_type"], # The identifier for the group of rows that the event # appears in. "pid": client_id_to_address[key], # The identifier for the row that the event appears in. "tid": client_id_to_address[remote_client_id], # The start time in microseconds. "ts": self._seconds_to_microseconds(event["start_time"]), # The duration in microseconds. "dur": self._seconds_to_microseconds(event["end_time"] - event["start_time"]), # What is this? "ph": "X", # This is the name of the color to display the box in. "cname": color, # The extra user-defined data. "args": event["extra_data"], } all_events.append(new_event) # Add another box with a color indicating whether it was a send # or a receive event. if event["event_type"] == "transfer_send": additional_event = new_event.copy() additional_event["cname"] = "black" all_events.append(additional_event) elif event["event_type"] == "transfer_receive": additional_event = new_event.copy() additional_event["cname"] = "grey" all_events.append(additional_event) else: pass if filename is not None: with open(filename, "w") as outfile: json.dump(all_events, outfile) else: return all_events
Get a dictionary mapping worker ID to worker information. def workers(self): """Get a dictionary mapping worker ID to worker information.""" worker_keys = self.redis_client.keys("Worker*") workers_data = {} for worker_key in worker_keys: worker_info = self.redis_client.hgetall(worker_key) worker_id = binary_to_hex(worker_key[len("Workers:"):]) workers_data[worker_id] = { "node_ip_address": decode(worker_info[b"node_ip_address"]), "plasma_store_socket": decode( worker_info[b"plasma_store_socket"]) } if b"stderr_file" in worker_info: workers_data[worker_id]["stderr_file"] = decode( worker_info[b"stderr_file"]) if b"stdout_file" in worker_info: workers_data[worker_id]["stdout_file"] = decode( worker_info[b"stdout_file"]) return workers_data
Get the current total cluster resources. Note that this information can grow stale as nodes are added to or removed from the cluster. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster. def cluster_resources(self): """Get the current total cluster resources. Note that this information can grow stale as nodes are added to or removed from the cluster. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster. """ resources = defaultdict(int) clients = self.client_table() for client in clients: # Only count resources from live clients. if client["IsInsertion"]: for key, value in client["Resources"].items(): resources[key] += value return dict(resources)
Get the current available cluster resources. This is different from `cluster_resources` in that this will return idle (available) resources rather than total resources. Note that this information can grow stale as tasks start and finish. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster. def available_resources(self): """Get the current available cluster resources. This is different from `cluster_resources` in that this will return idle (available) resources rather than total resources. Note that this information can grow stale as tasks start and finish. Returns: A dictionary mapping resource name to the total quantity of that resource in the cluster. """ available_resources_by_id = {} subscribe_clients = [ redis_client.pubsub(ignore_subscribe_messages=True) for redis_client in self.redis_clients ] for subscribe_client in subscribe_clients: subscribe_client.subscribe(ray.gcs_utils.XRAY_HEARTBEAT_CHANNEL) client_ids = self._live_client_ids() while set(available_resources_by_id.keys()) != client_ids: for subscribe_client in subscribe_clients: # Parse client message raw_message = subscribe_client.get_message() if (raw_message is None or raw_message["channel"] != ray.gcs_utils.XRAY_HEARTBEAT_CHANNEL): continue data = raw_message["data"] gcs_entries = ( ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( data, 0)) heartbeat_data = gcs_entries.Entries(0) message = (ray.gcs_utils.HeartbeatTableData. GetRootAsHeartbeatTableData(heartbeat_data, 0)) # Calculate available resources for this client num_resources = message.ResourcesAvailableLabelLength() dynamic_resources = {} for i in range(num_resources): resource_id = decode(message.ResourcesAvailableLabel(i)) dynamic_resources[resource_id] = ( message.ResourcesAvailableCapacity(i)) # Update available resources for this client client_id = ray.utils.binary_to_hex(message.ClientId()) available_resources_by_id[client_id] = dynamic_resources # Update clients in cluster client_ids = self._live_client_ids() # Remove disconnected clients for client_id in available_resources_by_id.keys(): if client_id not in client_ids: del available_resources_by_id[client_id] # Calculate total available resources total_available_resources = defaultdict(int) for available_resources in available_resources_by_id.values(): for resource_id, num_available in available_resources.items(): total_available_resources[resource_id] += num_available # Close the pubsub clients to avoid leaking file descriptors. for subscribe_client in subscribe_clients: subscribe_client.close() return dict(total_available_resources)
Get the error messages for a specific driver. Args: driver_id: The ID of the driver to get the errors for. Returns: A list of the error messages for this driver. def _error_messages(self, driver_id): """Get the error messages for a specific driver. Args: driver_id: The ID of the driver to get the errors for. Returns: A list of the error messages for this driver. """ assert isinstance(driver_id, ray.DriverID) message = self.redis_client.execute_command( "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.ERROR_INFO, "", driver_id.binary()) # If there are no errors, return early. if message is None: return [] gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) error_messages = [] for i in range(gcs_entries.EntriesLength()): error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData( gcs_entries.Entries(i), 0) assert driver_id.binary() == error_data.DriverId() error_message = { "type": decode(error_data.Type()), "message": decode(error_data.ErrorMessage()), "timestamp": error_data.Timestamp(), } error_messages.append(error_message) return error_messages
Get the error messages for all drivers or a specific driver. Args: driver_id: The specific driver to get the errors for. If this is None, then this method retrieves the errors for all drivers. Returns: A dictionary mapping driver ID to a list of the error messages for that driver. def error_messages(self, driver_id=None): """Get the error messages for all drivers or a specific driver. Args: driver_id: The specific driver to get the errors for. If this is None, then this method retrieves the errors for all drivers. Returns: A dictionary mapping driver ID to a list of the error messages for that driver. """ if driver_id is not None: assert isinstance(driver_id, ray.DriverID) return self._error_messages(driver_id) error_table_keys = self.redis_client.keys( ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*") driver_ids = [ key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):] for key in error_table_keys ] return { binary_to_hex(driver_id): self._error_messages( ray.DriverID(driver_id)) for driver_id in driver_ids }
Get checkpoint info for the given actor id. Args: actor_id: Actor's ID. Returns: A dictionary with information about the actor's checkpoint IDs and their timestamps. def actor_checkpoint_info(self, actor_id): """Get checkpoint info for the given actor id. Args: actor_id: Actor's ID. Returns: A dictionary with information about the actor's checkpoint IDs and their timestamps. """ self._check_connected() message = self._execute_command( actor_id, "RAY.TABLE_LOOKUP", ray.gcs_utils.TablePrefix.ACTOR_CHECKPOINT_ID, "", actor_id.binary(), ) if message is None: return None gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( message, 0) entry = ( ray.gcs_utils.ActorCheckpointIdData.GetRootAsActorCheckpointIdData( gcs_entry.Entries(0), 0)) checkpoint_ids_str = entry.CheckpointIds() num_checkpoints = len(checkpoint_ids_str) // ID_SIZE assert len(checkpoint_ids_str) % ID_SIZE == 0 checkpoint_ids = [ ray.ActorCheckpointID( checkpoint_ids_str[(i * ID_SIZE):((i + 1) * ID_SIZE)]) for i in range(num_checkpoints) ] return { "ActorID": ray.utils.binary_to_hex(entry.ActorId()), "CheckpointIds": checkpoint_ids, "Timestamps": [ entry.Timestamps(i) for i in range(num_checkpoints) ], }
Returns the total length of all of the flattened variables. Returns: The length of all flattened variables concatenated. def get_flat_size(self): """Returns the total length of all of the flattened variables. Returns: The length of all flattened variables concatenated. """ return sum( np.prod(v.get_shape().as_list()) for v in self.variables.values())
Gets the weights and returns them as a flat array. Returns: 1D Array containing the flattened weights. def get_flat(self): """Gets the weights and returns them as a flat array. Returns: 1D Array containing the flattened weights. """ self._check_sess() return np.concatenate([ v.eval(session=self.sess).flatten() for v in self.variables.values() ])
Sets the weights to new_weights, converting from a flat array. Note: You can only set all weights in the network using this function, i.e., the length of the array must match get_flat_size. Args: new_weights (np.ndarray): Flat array containing weights. def set_flat(self, new_weights): """Sets the weights to new_weights, converting from a flat array. Note: You can only set all weights in the network using this function, i.e., the length of the array must match get_flat_size. Args: new_weights (np.ndarray): Flat array containing weights. """ self._check_sess() shapes = [v.get_shape().as_list() for v in self.variables.values()] arrays = unflatten(new_weights, shapes) placeholders = [ self.placeholders[k] for k, v in self.variables.items() ] self.sess.run( list(self.assignment_nodes.values()), feed_dict=dict(zip(placeholders, arrays)))
Returns a dictionary containing the weights of the network. Returns: Dictionary mapping variable names to their weights. def get_weights(self): """Returns a dictionary containing the weights of the network. Returns: Dictionary mapping variable names to their weights. """ self._check_sess() return { k: v.eval(session=self.sess) for k, v in self.variables.items() }
Sets the weights to new_weights. Note: Can set subsets of variables as well, by only passing in the variables you want to be set. Args: new_weights (Dict): Dictionary mapping variable names to their weights. def set_weights(self, new_weights): """Sets the weights to new_weights. Note: Can set subsets of variables as well, by only passing in the variables you want to be set. Args: new_weights (Dict): Dictionary mapping variable names to their weights. """ self._check_sess() assign_list = [ self.assignment_nodes[name] for name in new_weights.keys() if name in self.assignment_nodes ] assert assign_list, ("No variables in the input matched those in the " "network. Possible cause: Two networks were " "defined in the same TensorFlow graph. To fix " "this, place each network definition in its own " "tf.Graph.") self.sess.run( assign_list, feed_dict={ self.placeholders[name]: value for (name, value) in new_weights.items() if name in self.placeholders })
Construct a serialized ErrorTableData object. Args: driver_id: The ID of the driver that the error should go to. If this is nil, then the error will go to all drivers. error_type: The type of the error. message: The error message. timestamp: The time of the error. Returns: The serialized object. def construct_error_message(driver_id, error_type, message, timestamp): """Construct a serialized ErrorTableData object. Args: driver_id: The ID of the driver that the error should go to. If this is nil, then the error will go to all drivers. error_type: The type of the error. message: The error message. timestamp: The time of the error. Returns: The serialized object. """ builder = flatbuffers.Builder(0) driver_offset = builder.CreateString(driver_id.binary()) error_type_offset = builder.CreateString(error_type) message_offset = builder.CreateString(message) ray.core.generated.ErrorTableData.ErrorTableDataStart(builder) ray.core.generated.ErrorTableData.ErrorTableDataAddDriverId( builder, driver_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddType( builder, error_type_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddErrorMessage( builder, message_offset) ray.core.generated.ErrorTableData.ErrorTableDataAddTimestamp( builder, timestamp) error_data_offset = ray.core.generated.ErrorTableData.ErrorTableDataEnd( builder) builder.Finish(error_data_offset) return bytes(builder.Output())
Initialize synchronously. def init(): """ Initialize synchronously. """ loop = asyncio.get_event_loop() if loop.is_running(): raise Exception("You must initialize the Ray async API by calling " "async_api.init() or async_api.as_future(obj) before " "the event loop starts.") else: asyncio.get_event_loop().run_until_complete(_async_init())
Manually shutdown the async API. Cancels all related tasks and all the socket transportation. def shutdown(): """Manually shutdown the async API. Cancels all related tasks and all the socket transportation. """ global handler, transport, protocol if handler is not None: handler.close() transport.close() handler = None transport = None protocol = None
This removes some non-critical state from the primary Redis shard. This removes the log files as well as the event log from Redis. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, it will only partially address the issue as much of the data is in the task table (and object table), which are not flushed. Args: redis_client: optional, if not provided then ray.init() must have been called. def flush_redis_unsafe(redis_client=None): """This removes some non-critical state from the primary Redis shard. This removes the log files as well as the event log from Redis. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, it will only partially address the issue as much of the data is in the task table (and object table), which are not flushed. Args: redis_client: optional, if not provided then ray.init() must have been called. """ if redis_client is None: ray.worker.global_worker.check_connected() redis_client = ray.worker.global_worker.redis_client # Delete the log files from the primary Redis shard. keys = redis_client.keys("LOGFILE:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} log files from Redis.".format(num_deleted)) # Delete the event log from the primary Redis shard. keys = redis_client.keys("event_log:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} event logs from Redis.".format(num_deleted))
This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the object and task metadata. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. def flush_task_and_object_metadata_unsafe(): """This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the object and task metadata. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. """ ray.worker.global_worker.check_connected() def flush_shard(redis_client): # Flush the task table. Note that this also flushes the driver tasks # which may be undesirable. num_task_keys_deleted = 0 for key in redis_client.scan_iter(match=TASK_PREFIX + b"*"): num_task_keys_deleted += redis_client.delete(key) print("Deleted {} task keys from Redis.".format(num_task_keys_deleted)) # Flush the object information. num_object_keys_deleted = 0 for key in redis_client.scan_iter(match=OBJECT_INFO_PREFIX + b"*"): num_object_keys_deleted += redis_client.delete(key) print("Deleted {} object info keys from Redis.".format( num_object_keys_deleted)) # Flush the object locations. num_object_location_keys_deleted = 0 for key in redis_client.scan_iter(match=OBJECT_LOCATION_PREFIX + b"*"): num_object_location_keys_deleted += redis_client.delete(key) print("Deleted {} object location keys from Redis.".format( num_object_location_keys_deleted)) # Loop over the shards and flush all of them. for redis_client in ray.worker.global_state.redis_clients: flush_shard(redis_client)
This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the metadata for finished tasks. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. def flush_finished_tasks_unsafe(): """This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the metadata for finished tasks. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. """ ray.worker.global_worker.check_connected() for shard_index in range(len(ray.global_state.redis_clients)): _flush_finished_tasks_unsafe_shard(shard_index)
This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the metadata for objects that have been evicted. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. def flush_evicted_objects_unsafe(): """This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the metadata for objects that have been evicted. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. """ ray.worker.global_worker.check_connected() for shard_index in range(len(ray.global_state.redis_clients)): _flush_evicted_objects_unsafe_shard(shard_index)
Creates a copy of self using existing input placeholders. def copy(self, existing_inputs): """Creates a copy of self using existing input placeholders.""" return PPOPolicyGraph( self.observation_space, self.action_space, self.config, existing_inputs=existing_inputs)
deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout. def deepnn(x): """deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout. """ # Reshape to use within a convolutional neural net. # Last dimension is for "features" - there is only one here, since images # are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. with tf.name_scope("reshape"): x_image = tf.reshape(x, [-1, 28, 28, 1]) # First convolutional layer - maps one grayscale image to 32 feature maps. with tf.name_scope("conv1"): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Pooling layer - downsamples by 2X. with tf.name_scope("pool1"): h_pool1 = max_pool_2x2(h_conv1) # Second convolutional layer -- maps 32 feature maps to 64. with tf.name_scope("conv2"): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # Second pooling layer. with tf.name_scope("pool2"): h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image # is down to 7x7x64 feature maps -- maps this to 1024 features. with tf.name_scope("fc1"): W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout - controls the complexity of the model, prevents co-adaptation of # features. with tf.name_scope("dropout"): keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Map the 1024 features to 10 classes, one for each digit with tf.name_scope("fc2"): W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 return y_conv, keep_prob
Get signature parameters Support Cython functions by grabbing relevant attributes from the Cython function and attaching to a no-op function. This is somewhat brittle, since funcsigs may change, but given that funcsigs is written to a PEP, we hope it is relatively stable. Future versions of Python may allow overloading the inspect 'isfunction' and 'ismethod' functions / create ABC for Python functions. Until then, it appears that Cython won't do anything about compatability with the inspect module. Args: func: The function whose signature should be checked. Raises: TypeError: A type error if the signature is not supported def get_signature_params(func): """Get signature parameters Support Cython functions by grabbing relevant attributes from the Cython function and attaching to a no-op function. This is somewhat brittle, since funcsigs may change, but given that funcsigs is written to a PEP, we hope it is relatively stable. Future versions of Python may allow overloading the inspect 'isfunction' and 'ismethod' functions / create ABC for Python functions. Until then, it appears that Cython won't do anything about compatability with the inspect module. Args: func: The function whose signature should be checked. Raises: TypeError: A type error if the signature is not supported """ # The first condition for Cython functions, the latter for Cython instance # methods if is_cython(func): attrs = [ "__code__", "__annotations__", "__defaults__", "__kwdefaults__" ] if all(hasattr(func, attr) for attr in attrs): original_func = func def func(): return for attr in attrs: setattr(func, attr, getattr(original_func, attr)) else: raise TypeError("{!r} is not a Python function we can process" .format(func)) return list(funcsigs.signature(func).parameters.items())
Check if we support the signature of this function. We currently do not allow remote functions to have **kwargs. We also do not support keyword arguments in conjunction with a *args argument. Args: func: The function whose signature should be checked. warn: If this is true, a warning will be printed if the signature is not supported. If it is false, an exception will be raised if the signature is not supported. Raises: Exception: An exception is raised if the signature is not supported. def check_signature_supported(func, warn=False): """Check if we support the signature of this function. We currently do not allow remote functions to have **kwargs. We also do not support keyword arguments in conjunction with a *args argument. Args: func: The function whose signature should be checked. warn: If this is true, a warning will be printed if the signature is not supported. If it is false, an exception will be raised if the signature is not supported. Raises: Exception: An exception is raised if the signature is not supported. """ function_name = func.__name__ sig_params = get_signature_params(func) has_kwargs_param = False has_kwonly_param = False for keyword_name, parameter in sig_params: if parameter.kind == Parameter.VAR_KEYWORD: has_kwargs_param = True if parameter.kind == Parameter.KEYWORD_ONLY: has_kwonly_param = True if has_kwargs_param: message = ("The function {} has a **kwargs argument, which is " "currently not supported.".format(function_name)) if warn: logger.warning(message) else: raise Exception(message) if has_kwonly_param: message = ("The function {} has a keyword only argument " "(defined after * or *args), which is currently " "not supported.".format(function_name)) if warn: logger.warning(message) else: raise Exception(message)
Extract the function signature from the function. Args: func: The function whose signature should be extracted. ignore_first: True if the first argument should be ignored. This should be used when func is a method of a class. Returns: A function signature object, which includes the names of the keyword arguments as well as their default values. def extract_signature(func, ignore_first=False): """Extract the function signature from the function. Args: func: The function whose signature should be extracted. ignore_first: True if the first argument should be ignored. This should be used when func is a method of a class. Returns: A function signature object, which includes the names of the keyword arguments as well as their default values. """ sig_params = get_signature_params(func) if ignore_first: if len(sig_params) == 0: raise Exception("Methods must take a 'self' argument, but the " "method '{}' does not have one.".format( func.__name__)) sig_params = sig_params[1:] # Construct the argument default values and other argument information. arg_names = [] arg_defaults = [] arg_is_positionals = [] keyword_names = set() for arg_name, parameter in sig_params: arg_names.append(arg_name) arg_defaults.append(parameter.default) arg_is_positionals.append(parameter.kind == parameter.VAR_POSITIONAL) if parameter.kind == Parameter.POSITIONAL_OR_KEYWORD: # Note KEYWORD_ONLY arguments currently unsupported. keyword_names.add(arg_name) return FunctionSignature(arg_names, arg_defaults, arg_is_positionals, keyword_names, func.__name__)
Extend the arguments that were passed into a function. This extends the arguments that were passed into a function with the default arguments provided in the function definition. Args: function_signature: The function signature of the function being called. args: The non-keyword arguments passed into the function. kwargs: The keyword arguments passed into the function. Returns: An extended list of arguments to pass into the function. Raises: Exception: An exception may be raised if the function cannot be called with these arguments. def extend_args(function_signature, args, kwargs): """Extend the arguments that were passed into a function. This extends the arguments that were passed into a function with the default arguments provided in the function definition. Args: function_signature: The function signature of the function being called. args: The non-keyword arguments passed into the function. kwargs: The keyword arguments passed into the function. Returns: An extended list of arguments to pass into the function. Raises: Exception: An exception may be raised if the function cannot be called with these arguments. """ arg_names = function_signature.arg_names arg_defaults = function_signature.arg_defaults arg_is_positionals = function_signature.arg_is_positionals keyword_names = function_signature.keyword_names function_name = function_signature.function_name args = list(args) for keyword_name in kwargs: if keyword_name not in keyword_names: raise Exception("The name '{}' is not a valid keyword argument " "for the function '{}'.".format( keyword_name, function_name)) # Fill in the remaining arguments. for skipped_name in arg_names[0:len(args)]: if skipped_name in kwargs: raise Exception("Positional and keyword value provided for the " "argument '{}' for the function '{}'".format( keyword_name, function_name)) zipped_info = zip(arg_names, arg_defaults, arg_is_positionals) zipped_info = list(zipped_info)[len(args):] for keyword_name, default_value, is_positional in zipped_info: if keyword_name in kwargs: args.append(kwargs[keyword_name]) else: if default_value != funcsigs._empty: args.append(default_value) else: # This means that there is a missing argument. Unless this is # the last argument and it is a *args argument in which case it # can be omitted. if not is_positional: raise Exception("No value was provided for the argument " "'{}' for the function '{}'.".format( keyword_name, function_name)) no_positionals = len(arg_is_positionals) == 0 or not arg_is_positionals[-1] too_many_arguments = len(args) > len(arg_names) and no_positionals if too_many_arguments: raise Exception("Too many arguments were passed to the function '{}'" .format(function_name)) return args
Poll for cloud resource manager operation until finished. def wait_for_crm_operation(operation): """Poll for cloud resource manager operation until finished.""" logger.info("wait_for_crm_operation: " "Waiting for operation {} to finish...".format(operation)) for _ in range(MAX_POLLS): result = crm.operations().get(name=operation["name"]).execute() if "error" in result: raise Exception(result["error"]) if "done" in result and result["done"]: logger.info("wait_for_crm_operation: Operation done.") break time.sleep(POLL_INTERVAL) return result
Poll for global compute operation until finished. def wait_for_compute_global_operation(project_name, operation): """Poll for global compute operation until finished.""" logger.info("wait_for_compute_global_operation: " "Waiting for operation {} to finish...".format( operation["name"])) for _ in range(MAX_POLLS): result = compute.globalOperations().get( project=project_name, operation=operation["name"], ).execute() if "error" in result: raise Exception(result["error"]) if result["status"] == "DONE": logger.info("wait_for_compute_global_operation: " "Operation done.") break time.sleep(POLL_INTERVAL) return result
Returns the ith default gcp_key_pair_name. def key_pair_name(i, region, project_id, ssh_user): """Returns the ith default gcp_key_pair_name.""" key_name = "{}_gcp_{}_{}_{}".format(RAY, region, project_id, ssh_user, i) return key_name
Returns public and private key paths for a given key_name. def key_pair_paths(key_name): """Returns public and private key paths for a given key_name.""" public_key_path = os.path.expanduser("~/.ssh/{}.pub".format(key_name)) private_key_path = os.path.expanduser("~/.ssh/{}.pem".format(key_name)) return public_key_path, private_key_path
Create public and private ssh-keys. def generate_rsa_key_pair(): """Create public and private ssh-keys.""" key = rsa.generate_private_key( backend=default_backend(), public_exponent=65537, key_size=2048) public_key = key.public_key().public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH).decode("utf-8") pem = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()).decode("utf-8") return public_key, pem
Setup a Google Cloud Platform Project. Google Compute Platform organizes all the resources, such as storage buckets, users, and instances under projects. This is different from aws ec2 where everything is global. def _configure_project(config): """Setup a Google Cloud Platform Project. Google Compute Platform organizes all the resources, such as storage buckets, users, and instances under projects. This is different from aws ec2 where everything is global. """ project_id = config["provider"].get("project_id") assert config["provider"]["project_id"] is not None, ( "'project_id' must be set in the 'provider' section of the autoscaler" " config. Notice that the project id must be globally unique.") project = _get_project(project_id) if project is None: # Project not found, try creating it _create_project(project_id) project = _get_project(project_id) assert project is not None, "Failed to create project" assert project["lifecycleState"] == "ACTIVE", ( "Project status needs to be ACTIVE, got {}".format( project["lifecycleState"])) config["provider"]["project_id"] = project["projectId"] return config
Setup a gcp service account with IAM roles. Creates a gcp service acconut and binds IAM roles which allow it to control control storage/compute services. Specifically, the head node needs to have an IAM role that allows it to create further gce instances and store items in google cloud storage. TODO: Allow the name/id of the service account to be configured def _configure_iam_role(config): """Setup a gcp service account with IAM roles. Creates a gcp service acconut and binds IAM roles which allow it to control control storage/compute services. Specifically, the head node needs to have an IAM role that allows it to create further gce instances and store items in google cloud storage. TODO: Allow the name/id of the service account to be configured """ email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format( account_id=DEFAULT_SERVICE_ACCOUNT_ID, project_id=config["provider"]["project_id"]) service_account = _get_service_account(email, config) if service_account is None: logger.info("_configure_iam_role: " "Creating new service account {}".format( DEFAULT_SERVICE_ACCOUNT_ID)) service_account = _create_service_account( DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config) assert service_account is not None, "Failed to create service account" _add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES) config["head_node"]["serviceAccounts"] = [{ "email": service_account["email"], # NOTE: The amount of access is determined by the scope + IAM # role of the service account. Even if the cloud-platform scope # gives (scope) access to the whole cloud-platform, the service # account is limited by the IAM rights specified below. "scopes": ["https://www.googleapis.com/auth/cloud-platform"] }] return config
Configure SSH access, using an existing key pair if possible. Creates a project-wide ssh key that can be used to access all the instances unless explicitly prohibited by instance config. The ssh-keys created by ray are of format: [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] where: [USERNAME] is the user for the SSH key, specified in the config. [KEY_VALUE] is the public SSH key value. def _configure_key_pair(config): """Configure SSH access, using an existing key pair if possible. Creates a project-wide ssh key that can be used to access all the instances unless explicitly prohibited by instance config. The ssh-keys created by ray are of format: [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] where: [USERNAME] is the user for the SSH key, specified in the config. [KEY_VALUE] is the public SSH key value. """ if "ssh_private_key" in config["auth"]: return config ssh_user = config["auth"]["ssh_user"] project = compute.projects().get( project=config["provider"]["project_id"]).execute() # Key pairs associated with project meta data. The key pairs are general, # and not just ssh keys. ssh_keys_str = next( (item for item in project["commonInstanceMetadata"].get("items", []) if item["key"] == "ssh-keys"), {}).get("value", "") ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else [] # Try a few times to get or create a good key pair. key_found = False for i in range(10): key_name = key_pair_name(i, config["provider"]["region"], config["provider"]["project_id"], ssh_user) public_key_path, private_key_path = key_pair_paths(key_name) for ssh_key in ssh_keys: key_parts = ssh_key.split(" ") if len(key_parts) != 3: continue if key_parts[2] == ssh_user and os.path.exists(private_key_path): # Found a key key_found = True break # Create a key since it doesn't exist locally or in GCP if not key_found and not os.path.exists(private_key_path): logger.info("_configure_key_pair: " "Creating new key pair {}".format(key_name)) public_key, private_key = generate_rsa_key_pair() _create_project_ssh_key_pair(project, public_key, ssh_user) with open(private_key_path, "w") as f: f.write(private_key) os.chmod(private_key_path, 0o600) with open(public_key_path, "w") as f: f.write(public_key) key_found = True break if key_found: break assert key_found, "SSH keypair for user {} not found for {}".format( ssh_user, private_key_path) assert os.path.exists(private_key_path), ( "Private key file {} not found for user {}" "".format(private_key_path, ssh_user)) logger.info("_configure_key_pair: " "Private key not specified in config, using" "{}".format(private_key_path)) config["auth"]["ssh_private_key"] = private_key_path return config
Pick a reasonable subnet if not specified by the config. def _configure_subnet(config): """Pick a reasonable subnet if not specified by the config.""" # Rationale: avoid subnet lookup if the network is already # completely manually configured if ("networkInterfaces" in config["head_node"] and "networkInterfaces" in config["worker_nodes"]): return config subnets = _list_subnets(config) if not subnets: raise NotImplementedError("Should be able to create subnet.") # TODO: make sure that we have usable subnet. Maybe call # compute.subnetworks().listUsable? For some reason it didn't # work out-of-the-box default_subnet = subnets[0] if "networkInterfaces" not in config["head_node"]: config["head_node"]["networkInterfaces"] = [{ "subnetwork": default_subnet["selfLink"], "accessConfigs": [{ "name": "External NAT", "type": "ONE_TO_ONE_NAT", }], }] if "networkInterfaces" not in config["worker_nodes"]: config["worker_nodes"]["networkInterfaces"] = [{ "subnetwork": default_subnet["selfLink"], "accessConfigs": [{ "name": "External NAT", "type": "ONE_TO_ONE_NAT", }], }] return config
Add new IAM roles for the service account. def _add_iam_policy_binding(service_account, roles): """Add new IAM roles for the service account.""" project_id = service_account["projectId"] email = service_account["email"] member_id = "serviceAccount:" + email policy = crm.projects().getIamPolicy(resource=project_id).execute() already_configured = True for role in roles: role_exists = False for binding in policy["bindings"]: if binding["role"] == role: if member_id not in binding["members"]: binding["members"].append(member_id) already_configured = False role_exists = True if not role_exists: already_configured = False policy["bindings"].append({ "members": [member_id], "role": role, }) if already_configured: # In some managed environments, an admin needs to grant the # roles, so only call setIamPolicy if needed. return result = crm.projects().setIamPolicy( resource=project_id, body={ "policy": policy, }).execute() return result
Inserts an ssh-key into project commonInstanceMetadata def _create_project_ssh_key_pair(project, public_key, ssh_user): """Inserts an ssh-key into project commonInstanceMetadata""" key_parts = public_key.split(" ") # Sanity checks to make sure that the generated key matches expectation assert len(key_parts) == 2, key_parts assert key_parts[0] == "ssh-rsa", key_parts new_ssh_meta = "{ssh_user}:ssh-rsa {key_value} {ssh_user}".format( ssh_user=ssh_user, key_value=key_parts[1]) common_instance_metadata = project["commonInstanceMetadata"] items = common_instance_metadata.get("items", []) ssh_keys_i = next( (i for i, item in enumerate(items) if item["key"] == "ssh-keys"), None) if ssh_keys_i is None: items.append({"key": "ssh-keys", "value": new_ssh_meta}) else: ssh_keys = items[ssh_keys_i] ssh_keys["value"] += "\n" + new_ssh_meta items[ssh_keys_i] = ssh_keys common_instance_metadata["items"] = items operation = compute.projects().setCommonInstanceMetadata( project=project["name"], body=common_instance_metadata).execute() response = wait_for_compute_global_operation(project["name"], operation) return response
An experimental alternate way to submit remote functions. def _remote(self, args=None, kwargs=None, num_return_vals=None, num_cpus=None, num_gpus=None, resources=None): """An experimental alternate way to submit remote functions.""" worker = ray.worker.get_global_worker() worker.check_connected() if self._last_export_session < worker._session_index: # If this function was exported in a previous session, we need to # export this function again, because current GCS doesn't have it. self._last_export_session = worker._session_index worker.function_actor_manager.export(self) kwargs = {} if kwargs is None else kwargs args = [] if args is None else args args = ray.signature.extend_args(self._function_signature, args, kwargs) if num_return_vals is None: num_return_vals = self._num_return_vals resources = ray.utils.resources_from_resource_arguments( self._num_cpus, self._num_gpus, self._resources, num_cpus, num_gpus, resources) if worker.mode == ray.worker.LOCAL_MODE: # In LOCAL_MODE, remote calls simply execute the function. # We copy the arguments to prevent the function call from # mutating them and to match the usual behavior of # immutable remote objects. result = self._function(*copy.deepcopy(args)) return result object_ids = worker.submit_task( self._function_descriptor, args, num_return_vals=num_return_vals, resources=resources) if len(object_ids) == 1: return object_ids[0] elif len(object_ids) > 1: return object_ids
Append an object to the linked list. Args: future (PlasmaObjectFuture): A PlasmaObjectFuture instance. def append(self, future): """Append an object to the linked list. Args: future (PlasmaObjectFuture): A PlasmaObjectFuture instance. """ future.prev = self.tail if self.tail is None: assert self.head is None self.head = future else: self.tail.next = future self.tail = future # Once done, it will be removed from the list. future.add_done_callback(self.remove)
Remove an object from the linked list. Args: future (PlasmaObjectFuture): A PlasmaObjectFuture instance. def remove(self, future): """Remove an object from the linked list. Args: future (PlasmaObjectFuture): A PlasmaObjectFuture instance. """ if self._loop.get_debug(): logger.debug("Removing %s from the linked list.", future) if future.prev is None: assert future is self.head self.head = future.next if self.head is None: self.tail = None if not self.cancelled(): self.set_result(None) else: self.head.prev = None elif future.next is None: assert future is self.tail self.tail = future.prev if self.tail is None: self.head = None if not self.cancelled(): self.set_result(None) else: self.tail.prev = None
Manually cancel all tasks assigned to this event loop. def cancel(self, *args, **kwargs): """Manually cancel all tasks assigned to this event loop.""" # Because remove all futures will trigger `set_result`, # we cancel itself first. super().cancel() for future in self.traverse(): # All cancelled futures should have callbacks to removed itself # from this linked list. However, these callbacks are scheduled in # an event loop, so we could still find them in our list. if not future.cancelled(): future.cancel()
Complete all tasks. def set_result(self, result): """Complete all tasks. """ for future in self.traverse(): # All cancelled futures should have callbacks to removed itself # from this linked list. However, these callbacks are scheduled in # an event loop, so we could still find them in our list. future.set_result(result) if not self.done(): super().set_result(result)
Traverse this linked list. Yields: PlasmaObjectFuture: PlasmaObjectFuture instances. def traverse(self): """Traverse this linked list. Yields: PlasmaObjectFuture: PlasmaObjectFuture instances. """ current = self.head while current is not None: yield current current = current.next
Process notifications. def process_notifications(self, messages): """Process notifications.""" for object_id, object_size, metadata_size in messages: if object_size > 0 and object_id in self._waiting_dict: linked_list = self._waiting_dict[object_id] self._complete_future(linked_list)
Turn an object_id into a Future object. Args: object_id: A Ray's object_id. check_ready (bool): If true, check if the object_id is ready. Returns: PlasmaObjectFuture: A future object that waits the object_id. def as_future(self, object_id, check_ready=True): """Turn an object_id into a Future object. Args: object_id: A Ray's object_id. check_ready (bool): If true, check if the object_id is ready. Returns: PlasmaObjectFuture: A future object that waits the object_id. """ if not isinstance(object_id, ray.ObjectID): raise TypeError("Input should be an ObjectID.") plain_object_id = plasma.ObjectID(object_id.binary()) fut = PlasmaObjectFuture(loop=self._loop, object_id=plain_object_id) if check_ready: ready, _ = ray.wait([object_id], timeout=0) if ready: if self._loop.get_debug(): logger.debug("%s has been ready.", plain_object_id) self._complete_future(fut) return fut if plain_object_id not in self._waiting_dict: linked_list = PlasmaObjectLinkedList(self._loop, plain_object_id) linked_list.add_done_callback(self._unregister_callback) self._waiting_dict[plain_object_id] = linked_list self._waiting_dict[plain_object_id].append(fut) if self._loop.get_debug(): logger.debug("%s added to the waiting list.", fut) return fut
Returns a list of all trials' information. def get_all_trials(self): """Returns a list of all trials' information.""" response = requests.get(urljoin(self._path, "trials")) return self._deserialize(response)
Returns trial information by trial_id. def get_trial(self, trial_id): """Returns trial information by trial_id.""" response = requests.get( urljoin(self._path, "trials/{}".format(trial_id))) return self._deserialize(response)
Adds a trial by name and specification (dict). def add_trial(self, name, specification): """Adds a trial by name and specification (dict).""" payload = {"name": name, "spec": specification} response = requests.post(urljoin(self._path, "trials"), json=payload) return self._deserialize(response)
Requests to stop trial by trial_id. def stop_trial(self, trial_id): """Requests to stop trial by trial_id.""" response = requests.put( urljoin(self._path, "trials/{}".format(trial_id))) return self._deserialize(response)
Apply the given function to each remote worker. Returns: List of results from applying the function. def foreach_worker(self, fn): """Apply the given function to each remote worker. Returns: List of results from applying the function. """ results = ray.get([w.foreach_worker.remote(fn) for w in self.workers]) return results
Apply the given function to each model replica in each worker. Returns: List of results from applying the function. def foreach_model(self, fn): """Apply the given function to each model replica in each worker. Returns: List of results from applying the function. """ results = ray.get([w.foreach_model.remote(fn) for w in self.workers]) out = [] for r in results: out.extend(r) return out
Apply the given function to a single model replica. Returns: Result from applying the function. def for_model(self, fn): """Apply the given function to a single model replica. Returns: Result from applying the function. """ return ray.get(self.workers[0].for_model.remote(fn))
Run a single SGD step. Arguments: fetch_stats (bool): Whether to return stats from the step. This can slow down the computation by acting as a global barrier. def step(self, fetch_stats=False): """Run a single SGD step. Arguments: fetch_stats (bool): Whether to return stats from the step. This can slow down the computation by acting as a global barrier. """ if self.strategy == "ps": return _distributed_sgd_step( self.workers, self.ps_list, write_timeline=False, fetch_stats=fetch_stats) else: return _simple_sgd_step(self.workers)
Wrapper for starting a router and register it. Args: router_class: The router class to instantiate. router_name: The name to give to the router. Returns: A handle to newly started router actor. def start_router(router_class, router_name): """Wrapper for starting a router and register it. Args: router_class: The router class to instantiate. router_name: The name to give to the router. Returns: A handle to newly started router actor. """ handle = router_class.remote(router_name) ray.experimental.register_actor(router_name, handle) handle.start.remote() return handle
Returns a list of one-hot encodings for all parameters. 1 one-hot np.array for 1 parameter, and the 1's place is randomly chosen. def generate_random_one_hot_encoding(self): """Returns a list of one-hot encodings for all parameters. 1 one-hot np.array for 1 parameter, and the 1's place is randomly chosen. """ encoding = [] for ps in self.param_list: one_hot = np.zeros(ps.choices_count()) choice = random.randrange(ps.choices_count()) one_hot[choice] = 1 encoding.append(one_hot) return encoding
Apply one hot encoding to generate a specific config. Arguments: one_hot_encoding (list): A list of one hot encodings, 1 for each parameter. The shape of each encoding should match that ``ParameterSpace`` Returns: A dict config with specific <name, value> pair def apply_one_hot_encoding(self, one_hot_encoding): """Apply one hot encoding to generate a specific config. Arguments: one_hot_encoding (list): A list of one hot encodings, 1 for each parameter. The shape of each encoding should match that ``ParameterSpace`` Returns: A dict config with specific <name, value> pair """ config = {} for ps, one_hot in zip(self.param_list, one_hot_encoding): index = np.argmax(one_hot) config[ps.name] = ps.choices[index] return config
Pin an object in the object store. It will be available as long as the pinning process is alive. The pinned object can be retrieved by calling get_pinned_object on the identifier returned by this call. def pin_in_object_store(obj): """Pin an object in the object store. It will be available as long as the pinning process is alive. The pinned object can be retrieved by calling get_pinned_object on the identifier returned by this call. """ obj_id = ray.put(_to_pinnable(obj)) _pinned_objects.append(ray.get(obj_id)) return "{}{}".format(PINNED_OBJECT_PREFIX, base64.b64encode(obj_id.binary()).decode("utf-8"))
Retrieve a pinned object from the object store. def get_pinned_object(pinned_id): """Retrieve a pinned object from the object store.""" from ray import ObjectID return _from_pinnable( ray.get( ObjectID(base64.b64decode(pinned_id[len(PINNED_OBJECT_PREFIX):]))))
Returns a new dict that is d1 and d2 deep merged. def merge_dicts(d1, d2): """Returns a new dict that is d1 and d2 deep merged.""" merged = copy.deepcopy(d1) deep_update(merged, d2, True, []) return merged
Updates original dict with values from new_dict recursively. If new key is introduced in new_dict, then if new_keys_allowed is not True, an error will be thrown. Further, for sub-dicts, if the key is in the whitelist, then new subkeys can be introduced. Args: original (dict): Dictionary with default values. new_dict (dict): Dictionary with values to be updated new_keys_allowed (bool): Whether new keys are allowed. whitelist (list): List of keys that correspond to dict values where new subkeys can be introduced. This is only at the top level. def deep_update(original, new_dict, new_keys_allowed, whitelist): """Updates original dict with values from new_dict recursively. If new key is introduced in new_dict, then if new_keys_allowed is not True, an error will be thrown. Further, for sub-dicts, if the key is in the whitelist, then new subkeys can be introduced. Args: original (dict): Dictionary with default values. new_dict (dict): Dictionary with values to be updated new_keys_allowed (bool): Whether new keys are allowed. whitelist (list): List of keys that correspond to dict values where new subkeys can be introduced. This is only at the top level. """ for k, value in new_dict.items(): if k not in original: if not new_keys_allowed: raise Exception("Unknown config parameter `{}` ".format(k)) if isinstance(original.get(k), dict): if k in whitelist: deep_update(original[k], value, True, []) else: deep_update(original[k], value, new_keys_allowed, []) else: original[k] = value return original
Similar to completed but only returns once the object is local. Assumes obj_id only is one id. def completed_prefetch(self, blocking_wait=False, max_yield=999): """Similar to completed but only returns once the object is local. Assumes obj_id only is one id.""" for worker, obj_id in self.completed(blocking_wait=blocking_wait): plasma_id = ray.pyarrow.plasma.ObjectID(obj_id.binary()) (ray.worker.global_worker.raylet_client.fetch_or_reconstruct( [obj_id], True)) self._fetching.append((worker, obj_id)) remaining = [] num_yielded = 0 for worker, obj_id in self._fetching: plasma_id = ray.pyarrow.plasma.ObjectID(obj_id.binary()) if (num_yielded < max_yield and ray.worker.global_worker.plasma_client.contains( plasma_id)): yield (worker, obj_id) num_yielded += 1 else: remaining.append((worker, obj_id)) self._fetching = remaining
Notify that some evaluators may be removed. def reset_evaluators(self, evaluators): """Notify that some evaluators may be removed.""" for obj_id, ev in self._tasks.copy().items(): if ev not in evaluators: del self._tasks[obj_id] del self._objects[obj_id] ok = [] for ev, obj_id in self._fetching: if ev in evaluators: ok.append((ev, obj_id)) self._fetching = ok
Iterate over train batches. Arguments: max_yield (int): Max number of batches to iterate over in this cycle. Setting this avoids iter_train_batches returning too much data at once. def iter_train_batches(self, max_yield=999): """Iterate over train batches. Arguments: max_yield (int): Max number of batches to iterate over in this cycle. Setting this avoids iter_train_batches returning too much data at once. """ for ev, sample_batch in self._augment_with_replay( self.sample_tasks.completed_prefetch( blocking_wait=True, max_yield=max_yield)): sample_batch.decompress_if_needed() self.batch_buffer.append(sample_batch) if sum(b.count for b in self.batch_buffer) >= self.train_batch_size: train_batch = self.batch_buffer[0].concat_samples( self.batch_buffer) yield train_batch self.batch_buffer = [] # If the batch was replayed, skip the update below. if ev is None: continue # Put in replay buffer if enabled if self.replay_buffer_num_slots > 0: if len(self.replay_batches) < self.replay_buffer_num_slots: self.replay_batches.append(sample_batch) else: self.replay_batches[self.replay_index] = sample_batch self.replay_index += 1 self.replay_index %= self.replay_buffer_num_slots ev.set_weights.remote(self.broadcasted_weights) self.num_weight_syncs += 1 self.num_sent_since_broadcast += 1 # Kick off another sample request self.sample_tasks.add(ev, ev.sample.remote())
Create or updates an autoscaling Ray cluster from a config json. def create_or_update_cluster(config_file, override_min_workers, override_max_workers, no_restart, restart_only, yes, override_cluster_name): """Create or updates an autoscaling Ray cluster from a config json.""" config = yaml.load(open(config_file).read()) if override_min_workers is not None: config["min_workers"] = override_min_workers if override_max_workers is not None: config["max_workers"] = override_max_workers if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) get_or_create_head_node(config, config_file, no_restart, restart_only, yes, override_cluster_name)
Destroys all nodes of a Ray cluster described by a config json. def teardown_cluster(config_file, yes, workers_only, override_cluster_name): """Destroys all nodes of a Ray cluster described by a config json.""" config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name validate_config(config) config = fillout_defaults(config) confirm("This will destroy your cluster", yes) provider = get_node_provider(config["provider"], config["cluster_name"]) try: def remaining_nodes(): if workers_only: A = [] else: A = [ node_id for node_id in provider.non_terminated_nodes({ TAG_RAY_NODE_TYPE: "head" }) ] A += [ node_id for node_id in provider.non_terminated_nodes({ TAG_RAY_NODE_TYPE: "worker" }) ] return A # Loop here to check that both the head and worker nodes are actually # really gone A = remaining_nodes() with LogTimer("teardown_cluster: Termination done."): while A: logger.info("teardown_cluster: " "Terminating {} nodes...".format(len(A))) provider.terminate_nodes(A) time.sleep(1) A = remaining_nodes() finally: provider.cleanup()
Kills a random Raylet worker. def kill_node(config_file, yes, override_cluster_name): """Kills a random Raylet worker.""" config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) confirm("This will kill a node in your cluster", yes) provider = get_node_provider(config["provider"], config["cluster_name"]) try: nodes = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: "worker"}) node = random.choice(nodes) logger.info("kill_node: Terminating worker {}".format(node)) updater = NodeUpdaterThread( node_id=node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], runtime_hash="") _exec(updater, "ray stop", False, False) time.sleep(5) if config.get("provider", {}).get("use_internal_ips", False) is True: node_ip = provider.internal_ip(node) else: node_ip = provider.external_ip(node) finally: provider.cleanup() return node_ip
Create the cluster head node, which in turn creates the workers. def get_or_create_head_node(config, config_file, no_restart, restart_only, yes, override_cluster_name): """Create the cluster head node, which in turn creates the workers.""" provider = get_node_provider(config["provider"], config["cluster_name"]) try: head_node_tags = { TAG_RAY_NODE_TYPE: "head", } nodes = provider.non_terminated_nodes(head_node_tags) if len(nodes) > 0: head_node = nodes[0] else: head_node = None if not head_node: confirm("This will create a new cluster", yes) elif not no_restart: confirm("This will restart cluster services", yes) launch_hash = hash_launch_conf(config["head_node"], config["auth"]) if head_node is None or provider.node_tags(head_node).get( TAG_RAY_LAUNCH_CONFIG) != launch_hash: if head_node is not None: confirm("Head node config out-of-date. It will be terminated", yes) logger.info( "get_or_create_head_node: " "Terminating outdated head node {}".format(head_node)) provider.terminate_node(head_node) logger.info("get_or_create_head_node: Launching new head node...") head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format( config["cluster_name"]) provider.create_node(config["head_node"], head_node_tags, 1) nodes = provider.non_terminated_nodes(head_node_tags) assert len(nodes) == 1, "Failed to create head node." head_node = nodes[0] # TODO(ekl) right now we always update the head node even if the hash # matches. We could prompt the user for what they want to do here. runtime_hash = hash_runtime_conf(config["file_mounts"], config) logger.info("get_or_create_head_node: Updating files on head node...") # Rewrite the auth config so that the head node can update the workers remote_key_path = "~/ray_bootstrap_key.pem" remote_config = copy.deepcopy(config) remote_config["auth"]["ssh_private_key"] = remote_key_path # Adjust for new file locations new_mounts = {} for remote_path in config["file_mounts"]: new_mounts[remote_path] = remote_path remote_config["file_mounts"] = new_mounts remote_config["no_restart"] = no_restart # Now inject the rewritten config and SSH key into the head node remote_config_file = tempfile.NamedTemporaryFile( "w", prefix="ray-bootstrap-") remote_config_file.write(json.dumps(remote_config)) remote_config_file.flush() config["file_mounts"].update({ remote_key_path: config["auth"]["ssh_private_key"], "~/ray_bootstrap_config.yaml": remote_config_file.name }) if restart_only: init_commands = config["head_start_ray_commands"] elif no_restart: init_commands = config["head_setup_commands"] else: init_commands = (config["head_setup_commands"] + config["head_start_ray_commands"]) updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=config["initialization_commands"], setup_commands=init_commands, runtime_hash=runtime_hash, ) updater.start() updater.join() # Refresh the node cache so we see the external ip if available provider.non_terminated_nodes(head_node_tags) if config.get("provider", {}).get("use_internal_ips", False) is True: head_node_ip = provider.internal_ip(head_node) else: head_node_ip = provider.external_ip(head_node) if updater.exitcode != 0: logger.error("get_or_create_head_node: " "Updating {} failed".format(head_node_ip)) sys.exit(1) logger.info( "get_or_create_head_node: " "Head node up-to-date, IP address is: {}".format(head_node_ip)) monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*" use_docker = bool(config["docker"]["container_name"]) if override_cluster_name: modifiers = " --cluster-name={}".format( quote(override_cluster_name)) else: modifiers = "" print("To monitor auto-scaling activity, you can run:\n\n" " ray exec {} {}{}{}\n".format( config_file, "--docker " if use_docker else " ", quote(monitor_str), modifiers)) print("To open a console on the cluster:\n\n" " ray attach {}{}\n".format(config_file, modifiers)) print("To ssh manually to the cluster, run:\n\n" " ssh -i {} {}@{}\n".format(config["auth"]["ssh_private_key"], config["auth"]["ssh_user"], head_node_ip)) finally: provider.cleanup()
Attaches to a screen for the specified cluster. Arguments: config_file: path to the cluster yaml start: whether to start the cluster if it isn't up use_tmux: whether to use tmux as multiplexer override_cluster_name: set the name of the cluster new: whether to force a new screen def attach_cluster(config_file, start, use_tmux, override_cluster_name, new): """Attaches to a screen for the specified cluster. Arguments: config_file: path to the cluster yaml start: whether to start the cluster if it isn't up use_tmux: whether to use tmux as multiplexer override_cluster_name: set the name of the cluster new: whether to force a new screen """ if use_tmux: if new: cmd = "tmux new" else: cmd = "tmux attach || tmux new" else: if new: cmd = "screen -L" else: cmd = "screen -L -xRR" exec_cluster(config_file, cmd, False, False, False, False, start, override_cluster_name, None)
Runs a command on the specified cluster. Arguments: config_file: path to the cluster yaml cmd: command to run docker: whether to run command in docker container of config screen: whether to run in a screen tmux: whether to run in a tmux session stop: whether to stop the cluster after command run start: whether to start the cluster if it isn't up override_cluster_name: set the name of the cluster port_forward: port to forward def exec_cluster(config_file, cmd, docker, screen, tmux, stop, start, override_cluster_name, port_forward): """Runs a command on the specified cluster. Arguments: config_file: path to the cluster yaml cmd: command to run docker: whether to run command in docker container of config screen: whether to run in a screen tmux: whether to run in a tmux session stop: whether to stop the cluster after command run start: whether to start the cluster if it isn't up override_cluster_name: set the name of the cluster port_forward: port to forward """ assert not (screen and tmux), "Can specify only one of `screen` or `tmux`." config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) head_node = _get_head_node( config, config_file, override_cluster_name, create_if_needed=start) provider = get_node_provider(config["provider"], config["cluster_name"]) try: updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], runtime_hash="", ) def wrap_docker(command): container_name = config["docker"]["container_name"] if not container_name: raise ValueError("Docker container not specified in config.") return with_docker_exec( [command], container_name=container_name)[0] cmd = wrap_docker(cmd) if docker else cmd if stop: shutdown_cmd = ( "ray stop; ray teardown ~/ray_bootstrap_config.yaml " "--yes --workers-only") if docker: shutdown_cmd = wrap_docker(shutdown_cmd) cmd += ("; {}; sudo shutdown -h now".format(shutdown_cmd)) _exec( updater, cmd, screen, tmux, expect_error=stop, port_forward=port_forward) if tmux or screen: attach_command_parts = ["ray attach", config_file] if override_cluster_name is not None: attach_command_parts.append( "--cluster-name={}".format(override_cluster_name)) if tmux: attach_command_parts.append("--tmux") elif screen: attach_command_parts.append("--screen") attach_command = " ".join(attach_command_parts) attach_info = "Use `{}` to check on command status.".format( attach_command) logger.info(attach_info) finally: provider.cleanup()
Rsyncs files. Arguments: config_file: path to the cluster yaml source: source dir target: target dir override_cluster_name: set the name of the cluster down: whether we're syncing remote -> local def rsync(config_file, source, target, override_cluster_name, down): """Rsyncs files. Arguments: config_file: path to the cluster yaml source: source dir target: target dir override_cluster_name: set the name of the cluster down: whether we're syncing remote -> local """ config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) head_node = _get_head_node( config, config_file, override_cluster_name, create_if_needed=False) provider = get_node_provider(config["provider"], config["cluster_name"]) try: updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], runtime_hash="", ) if down: rsync = updater.rsync_down else: rsync = updater.rsync_up rsync(source, target, check_error=False) finally: provider.cleanup()
Returns head node IP for given configuration file if exists. def get_head_node_ip(config_file, override_cluster_name): """Returns head node IP for given configuration file if exists.""" config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name provider = get_node_provider(config["provider"], config["cluster_name"]) try: head_node = _get_head_node(config, config_file, override_cluster_name) if config.get("provider", {}).get("use_internal_ips", False) is True: head_node_ip = provider.internal_ip(head_node) else: head_node_ip = provider.external_ip(head_node) finally: provider.cleanup() return head_node_ip
Returns worker node IPs for given configuration file. def get_worker_node_ips(config_file, override_cluster_name): """Returns worker node IPs for given configuration file.""" config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name provider = get_node_provider(config["provider"], config["cluster_name"]) try: nodes = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: "worker"}) if config.get("provider", {}).get("use_internal_ips", False) is True: return [provider.internal_ip(node) for node in nodes] else: return [provider.external_ip(node) for node in nodes] finally: provider.cleanup()
Implements train() for a Function API. If the RunnerThread finishes without reporting "done", Tune will automatically provide a magic keyword __duplicate__ along with a result with "done=True". The TrialRunner will handle the result accordingly (see tune/trial_runner.py). def _train(self): """Implements train() for a Function API. If the RunnerThread finishes without reporting "done", Tune will automatically provide a magic keyword __duplicate__ along with a result with "done=True". The TrialRunner will handle the result accordingly (see tune/trial_runner.py). """ if self._runner.is_alive(): # if started and alive, inform the reporter to continue and # generate the next result self._continue_semaphore.release() else: # if not alive, try to start self._status_reporter._start() try: self._runner.start() except RuntimeError: # If this is reached, it means the thread was started and is # now done or has raised an exception. pass result = None while result is None and self._runner.is_alive(): # fetch the next produced result try: result = self._results_queue.get( block=True, timeout=RESULT_FETCH_TIMEOUT) except queue.Empty: pass # if no result were found, then the runner must no longer be alive if result is None: # Try one last time to fetch results in case results were reported # in between the time of the last check and the termination of the # thread runner. try: result = self._results_queue.get(block=False) except queue.Empty: pass # check if error occured inside the thread runner if result is None: # only raise an error from the runner if all results are consumed self._report_thread_runner_error(block=True) # Under normal conditions, this code should never be reached since # this branch should only be visited if the runner thread raised # an exception. If no exception were raised, it means that the # runner thread never reported any results which should not be # possible when wrapping functions with `wrap_function`. raise TuneError( ("Wrapped function ran until completion without reporting " "results or raising an exception.")) else: if not self._error_queue.empty(): logger.warning( ("Runner error waiting to be raised in main thread. " "Logging all available results first.")) # This keyword appears if the train_func using the Function API # finishes without "done=True". This duplicates the last result, but # the TrialRunner will not log this result again. if "__duplicate__" in result: new_result = self._last_result.copy() new_result.update(result) result = new_result self._last_result = result return result
Returns logits and aux_logits from images. def build_network(self, images, phase_train=True, nclass=1001, image_depth=3, data_type=tf.float32, data_format="NCHW", use_tf_layers=True, fp16_vars=False): """Returns logits and aux_logits from images.""" if data_format == "NCHW": images = tf.transpose(images, [0, 3, 1, 2]) var_type = tf.float32 if data_type == tf.float16 and fp16_vars: var_type = tf.float16 network = convnet_builder.ConvNetBuilder( images, image_depth, phase_train, use_tf_layers, data_format, data_type, var_type) with tf.variable_scope( "cg", custom_getter=network.get_custom_getter()): self.add_inference(network) # Add the final fully-connected class layer logits = (network.affine(nclass, activation="linear") if not self.skip_final_affine_layer() else network.top_layer) aux_logits = None if network.aux_top_layer is not None: with network.switch_to_aux_top_layer(): aux_logits = network.affine( nclass, activation="linear", stddev=0.001) if data_type == tf.float16: # TODO(reedwm): Determine if we should do this cast here. logits = tf.cast(logits, tf.float32) if aux_logits is not None: aux_logits = tf.cast(aux_logits, tf.float32) return logits, aux_logits
Helper class for renaming Agent => Trainer with a warning. def renamed_class(cls): """Helper class for renaming Agent => Trainer with a warning.""" class DeprecationWrapper(cls): def __init__(self, config=None, env=None, logger_creator=None): old_name = cls.__name__.replace("Trainer", "Agent") new_name = cls.__name__ logger.warn("DeprecationWarning: {} has been renamed to {}. ". format(old_name, new_name) + "This will raise an error in the future.") cls.__init__(self, config, env, logger_creator) DeprecationWrapper.__name__ = cls.__name__ return DeprecationWrapper
Profile a span of time so that it appears in the timeline visualization. Note that this only works in the raylet code path. This function can be used as follows (both on the driver or within a task). .. code-block:: python with ray.profile("custom event", extra_data={'key': 'value'}): # Do some computation here. Optionally, a dictionary can be passed as the "extra_data" argument, and it can have keys "name" and "cname" if you want to override the default timeline display text and box color. Other values will appear at the bottom of the chrome tracing GUI when you click on the box corresponding to this profile span. Args: event_type: A string describing the type of the event. extra_data: This must be a dictionary mapping strings to strings. This data will be added to the json objects that are used to populate the timeline, so if you want to set a particular color, you can simply set the "cname" attribute to an appropriate color. Similarly, if you set the "name" attribute, then that will set the text displayed on the box in the timeline. Returns: An object that can profile a span of time via a "with" statement. def profile(event_type, extra_data=None): """Profile a span of time so that it appears in the timeline visualization. Note that this only works in the raylet code path. This function can be used as follows (both on the driver or within a task). .. code-block:: python with ray.profile("custom event", extra_data={'key': 'value'}): # Do some computation here. Optionally, a dictionary can be passed as the "extra_data" argument, and it can have keys "name" and "cname" if you want to override the default timeline display text and box color. Other values will appear at the bottom of the chrome tracing GUI when you click on the box corresponding to this profile span. Args: event_type: A string describing the type of the event. extra_data: This must be a dictionary mapping strings to strings. This data will be added to the json objects that are used to populate the timeline, so if you want to set a particular color, you can simply set the "cname" attribute to an appropriate color. Similarly, if you set the "name" attribute, then that will set the text displayed on the box in the timeline. Returns: An object that can profile a span of time via a "with" statement. """ worker = ray.worker.global_worker return RayLogSpanRaylet(worker.profiler, event_type, extra_data=extra_data)
Drivers run this as a thread to flush profile data in the background. def _periodically_flush_profile_events(self): """Drivers run this as a thread to flush profile data in the background.""" # Note(rkn): This is run on a background thread in the driver. It uses # the raylet client. This should be ok because it doesn't read # from the raylet client and we have the GIL here. However, # if either of those things changes, then we could run into issues. while True: # Sleep for 1 second. This will be interrupted if # self.threads_stopped is set. self.threads_stopped.wait(timeout=1) # Exit if we received a signal that we should stop. if self.threads_stopped.is_set(): return self.flush_profile_data()
Push the logged profiling data to the global control store. def flush_profile_data(self): """Push the logged profiling data to the global control store.""" with self.lock: events = self.events self.events = [] if self.worker.mode == ray.WORKER_MODE: component_type = "worker" else: component_type = "driver" self.worker.raylet_client.push_profile_events( component_type, ray.UniqueID(self.worker.worker_id), self.worker.node_ip_address, events)
Add a key-value pair to the extra_data dict. This can be used to add attributes that are not available when ray.profile was called. Args: key: The attribute name. value: The attribute value. def set_attribute(self, key, value): """Add a key-value pair to the extra_data dict. This can be used to add attributes that are not available when ray.profile was called. Args: key: The attribute name. value: The attribute value. """ if not isinstance(key, str) or not isinstance(value, str): raise ValueError("The arguments 'key' and 'value' must both be " "strings. Instead they are {} and {}.".format( key, value)) self.extra_data[key] = value
Syncs the local logdir on driver to worker if possible. Requires ray cluster to be started with the autoscaler. Also requires rsync to be installed. def sync_to_worker_if_possible(self): """Syncs the local logdir on driver to worker if possible. Requires ray cluster to be started with the autoscaler. Also requires rsync to be installed. """ if self.worker_ip == self.local_ip: return ssh_key = get_ssh_key() ssh_user = get_ssh_user() global _log_sync_warned if ssh_key is None or ssh_user is None: if not _log_sync_warned: logger.error("Log sync requires cluster to be setup with " "`ray up`.") _log_sync_warned = True return if not distutils.spawn.find_executable("rsync"): logger.error("Log sync requires rsync to be installed.") return source = "{}/".format(self.local_dir) target = "{}@{}:{}/".format(ssh_user, self.worker_ip, self.local_dir) final_cmd = (("""rsync -savz -e "ssh -i {} -o ConnectTimeout=120s """ """-o StrictHostKeyChecking=no" {} {}""").format( quote(ssh_key), quote(source), quote(target))) logger.info("Syncing results to %s", str(self.worker_ip)) sync_process = subprocess.Popen( final_cmd, shell=True, stdout=self.logfile) sync_process.wait()
Forward pass for the mixer. Arguments: agent_qs: Tensor of shape [B, T, n_agents, n_actions] states: Tensor of shape [B, T, state_dim] def forward(self, agent_qs, states): """Forward pass for the mixer. Arguments: agent_qs: Tensor of shape [B, T, n_agents, n_actions] states: Tensor of shape [B, T, state_dim] """ bs = agent_qs.size(0) states = states.reshape(-1, self.state_dim) agent_qs = agent_qs.view(-1, 1, self.n_agents) # First layer w1 = th.abs(self.hyper_w_1(states)) b1 = self.hyper_b_1(states) w1 = w1.view(-1, self.n_agents, self.embed_dim) b1 = b1.view(-1, 1, self.embed_dim) hidden = F.elu(th.bmm(agent_qs, w1) + b1) # Second layer w_final = th.abs(self.hyper_w_final(states)) w_final = w_final.view(-1, self.embed_dim, 1) # State-dependent bias v = self.V(states).view(-1, 1, 1) # Compute final output y = th.bmm(hidden, w_final) + v # Reshape and return q_tot = y.view(bs, -1, 1) return q_tot
Passes the result to SigOpt unless early terminated or errored. If a trial fails, it will be reported as a failed Observation, telling the optimizer that the Suggestion led to a metric failure, which updates the feasible region and improves parameter recommendation. Creates SigOpt Observation object for trial. def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Passes the result to SigOpt unless early terminated or errored. If a trial fails, it will be reported as a failed Observation, telling the optimizer that the Suggestion led to a metric failure, which updates the feasible region and improves parameter recommendation. Creates SigOpt Observation object for trial. """ if result: self.conn.experiments(self.experiment.id).observations().create( suggestion=self._live_trial_mapping[trial_id].id, value=result[self._reward_attr], ) # Update the experiment object self.experiment = self.conn.experiments(self.experiment.id).fetch() elif error or early_terminated: # Reports a failed Observation self.conn.experiments(self.experiment.id).observations().create( failed=True, suggestion=self._live_trial_mapping[trial_id].id) del self._live_trial_mapping[trial_id]