docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Mark additional columns as being part of the superkey. Supplements the Keys already extracted from the FSM template. Useful when adding new columns to existing tables. Note: This will impact attempts to further 'extend' the table as the superkey must be common between tables for successful extension. ...
def AddKeys(self, key_list): for keyname in key_list: if keyname not in self.header: raise KeyError("'%s'" % keyname) self._keys = self._keys.union(set(key_list))
86,093
Support for [] notation. Args: column: Tuple of column names, or a (str) column name, or positional column number, 0-indexed. Returns: A list or string with column value(s). Raises: IndexError: The given column(s) were not found.
def __getitem__(self, column): if isinstance(column, (list, tuple)): ret = [] for col in column: ret.append(self[col]) return ret try: return self._values[self._index[column]] except (KeyError, TypeError, ValueError): ...
86,250
Get an item from the Row by column name. Args: column: Tuple of column names, or a (str) column name, or positional column number, 0-indexed. default_value: The value to use if the key is not found. Returns: A list or string with column value(s) or default_value if not found.
def get(self, column, default_value=None): if isinstance(column, (list, tuple)): ret = [] for col in column: ret.append(self.get(col, default_value)) return ret # Perhaps we have a range like '1', ':-1' or '1:'. try: return...
86,252
Fetches the column number (0 indexed). Args: column: A string, column to fetch the index of. Returns: An int, the row index number. Raises: ValueError: The specified column was not found.
def index(self, column): # pylint: disable=C6409 for i, key in enumerate(self._keys): if key == column: return i raise ValueError('Column "%s" not found.' % column)
86,253
Inserts new values at a specified offset. Args: key: string for header value. value: string for a data value. row_index: Offset into row for data. Raises: IndexError: If the offset is out of bands.
def Insert(self, key, value, row_index): if row_index < 0: row_index += len(self) if not 0 <= row_index < len(self): raise IndexError('Index "%s" is out of bounds.' % row_index) new_row = Row() for idx in self.header: if self.index(idx) == r...
86,255
Applies the function to every row in the table. Args: function: A function applied to each row. Returns: A new TextTable() Raises: TableError: When transform is not invalid row entry. The transform must be compatible with Append().
def Map(self, function): new_table = self.__class__() # pylint: disable=protected-access new_table._table = [self.header] for row in self: filtered_row = function(row) if filtered_row: new_table.Append(filtered_row) return new_tabl...
86,259
Sorts rows in the texttable. Args: cmp: func, non default sort algorithm to use. key: func, applied to each element before sorting. reverse: bool, reverse order of sort.
def sort(self, cmp=None, key=None, reverse=False): def _DefaultKey(value): result = [] for key in self.header: # Try sorting as numerical value if possible. try: result.append(float(value[key])) ex...
86,260
Extends all rows in the texttable. The rows are extended with the new columns from the table. Args: table: A texttable, the table to extend this table by. keys: A set, the set of columns to use as the key. If None, the row index is used. Raises: IndexError: If key is not a valid...
def extend(self, table, keys=None): if keys: for k in keys: if k not in self._Header(): raise IndexError("Unknown key: '%s'", k) extend_with = [] for column in table.header: if column not in self.header: extend...
86,261
Removes a row from the table. Args: row: int, the row number to delete. Must be >= 1, as the header cannot be removed. Raises: TableError: Attempt to remove nonexistent or header row.
def Remove(self, row): if row == 0 or row > self.size: raise TableError("Attempt to remove header row") new_table = [] # pylint: disable=E1103 for t_row in self._table: if t_row.row != row: new_table.append(t_row) if t_row....
86,262
Sets the current row to new list. Args: new_values: List|dict of new values to insert into row. row: int, Row to insert values into. Raises: TableError: If number of new values is not equal to row size.
def _SetRow(self, new_values, row=0): if not row: row = self._row_index if row > self.size: raise TableError("Entry %s beyond table size %s." % (row, self.size)) self._table[row].values = new_values
86,264
Sets header of table to the given tuple. Args: new_values: Tuple of new header values.
def _SetHeader(self, new_values): row = self.row_class() row.row = 0 for v in new_values: row[v] = v self._table[0] = row
86,265
Finds the largest indivisible word of a string. ...and thus the smallest possible column width that can contain that word unsplit over rows. Args: text: A string of text potentially consisting of words. Returns: Integer size of the largest single word in the text.
def _SmallestColSize(self, text): if not text: return 0 stripped = terminal.StripAnsiText(text) return max(len(word) for word in stripped.split())
86,266
Retrieves the first non header row with the column of the given value. Args: column: str, the name of the column to check. value: str, The value of the column to check. Returns: A Row() of the first row found, None otherwise. Raises: IndexError: The specified column does not exist...
def RowWith(self, column, value): for row in self._table[1:]: if row[column] == value: return row return None
86,267
Appends a new column to the table. Args: column: A string, name of the column to add. default: Default value for entries. Defaults to ''. col_index: Integer index for where to insert new column. Raises: TableError: Column name already exists.
def AddColumn(self, column, default="", col_index=-1): if column in self.table: raise TableError("Column %r already in table." % column) if col_index == -1: self._table[0][column] = column for i in range(1, len(self._table)): self._table[i][co...
86,268
Adds a new row (list) to the table. Args: new_values: Tuple, dict, or Row() of new values to append as a row. Raises: TableError: Supplied tuple not equal to table width.
def Append(self, new_values): newrow = self.NewRow() newrow.values = new_values self._table.append(newrow)
86,269
Fetches a new, empty row, with headers populated. Args: value: Initial value to set each row entry to. Returns: A Row() object.
def NewRow(self, value=""): newrow = self.row_class() newrow.row = self.size + 1 newrow.table = self headers = self._Header() for header in headers: newrow[header] = value return newrow
86,270
Parses buffer into tabular format. Strips off comments (preceded by '#'). Optionally parses and indexes by first line (header). Args: buf: String file buffer containing CSV data. header: Is the first line of buffer a header. separator: String that CSV is separated by. Returns: ...
def CsvToTable(self, buf, header=True, separator=","): self.Reset() header_row = self.row_class() if header: line = buf.readline() header_str = "" while not header_str: # Remove comments. header_str = line.split("#")[0...
86,271
Returns windows interfaces through GetAdaptersAddresses. params: - extended: include anycast and multicast IPv6 (default False)
def get_windows_if_list(extended=False): # Should work on Windows XP+ def _get_mac(x): size = x["physical_address_length"] if size != 6: return "" data = bytearray(x["physical_address"]) return str2mac(bytes(data)[:size]) def _get_ips(x): unicast = x...
86,465
This function extracts the source/destination address of a 6LoWPAN from its upper Dot15d4Data (802.15.4 data) layer. params: - source: if True, the address is the source one. Otherwise, it is the destination. returns: the packed & processed address
def _extract_dot15d4address(pkt, source=True): underlayer = pkt.underlayer while underlayer is not None and not isinstance(underlayer, Dot15d4Data): # noqa: E501 underlayer = underlayer.underlayer if type(underlayer) == Dot15d4Data: addr = underlayer.src_addr if source else underlayer....
87,344
List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose
def ls(obj=None, case_sensitive=False, verbose=False): is_string = isinstance(obj, six.string_types) if obj is None or is_string: tip = False if obj is None: tip = True all_layers = sorted(conf.layers, key=lambda x: x.__name__) else: pattern = re...
87,400
Find all MACs registered to a OUI params: - name: the OUI name - case_sensitive: default to False returns: a dict of mac:tuples (Name, Extended Name)
def reverse_lookup(self, name, case_sensitive=False): if case_sensitive: filtr = lambda x, l: any(x == z for z in l) else: name = name.lower() filtr = lambda x, l: any(x == z.lower() for z in l) return {k: v for k, v in six.iteritems(self.__dict__) ...
87,573
Process all NetflowV9/10 Packets to match IDs of the DataFlowsets with the Headers params: - plist: the list of mixed NetflowV9/10 packets. - verb: verbose print (0/1)
def netflowv9_defragment(plist, verb=1): if not isinstance(plist, (PacketList, list)): plist = [plist] # We need the whole packet to be dissected to access field def in # NetflowFlowsetV9 or NetflowOptionsFlowsetV9/10 definitions = {} definitions_opts = {} ignored = set() # Iter...
87,645
Return the interface mode. params: - iface: the iwconfig interface
def get_iface_mode(iface): p = subprocess.Popen(["iwconfig", iface], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, err = p.communicate() match = re.search(br"mode:([a-zA-Z]*)", output.lower()) if match: return plain_str(match.group(1)) return "unknow...
87,914
Sets the monitor mode (or remove it) from an interface. params: - iface: the iwconfig interface - monitor: True if the interface should be set in monitor mode, False if it should be in managed mode
def set_iface_monitor(iface, monitor): mode = get_iface_mode(iface) if mode == "unknown": warning("Could not parse iwconfig !") current_monitor = mode == "monitor" if monitor == current_monitor: # Already correct return True s_mode = "monitor" if monitor else "managed" ...
87,915
Checks that module has a higher version that minver. params: - module: a module to test - minver: a tuple of versions
def _version_checker(module, minver): # We could use LooseVersion, but distutils imports imp which is deprecated version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?' version_tags = re.match(version_regexp, module.__version__) if not version_tags: return False version_tags = version...
88,087
Show the list of all existing contribs. Params: - name: filter to search the contribs - ret: whether the function should return a dict instead of printing it
def list_contrib(name=None, ret=False, _debug=False): # _debug: checks that all contrib modules have correctly defined: # # scapy.contrib.description = [...] # # scapy.contrib.status = [...] # # scapy.contrib.name = [...] (optional) # or set the flag: # # scapy.contrib.description = skip ...
88,228
Save current Scapy session to the file specified in the fname arg. params: - fname: file to save the scapy session in - session: scapy session to use. If None, the console one will be used - pickleProto: pickle proto version (default: -1 = latest)
def save_session(fname=None, session=None, pickleProto=-1): from scapy import utils if fname is None: fname = conf.session if not fname: conf.session = fname = utils.get_temp_file(keep=True) log_interactive.info("Use [%s] as session file" % fname) if session is None: ...
88,229
Load current Scapy session from the file specified in the fname arg. This will erase any existing session. params: - fname: file to load the scapy session from
def load_session(fname=None): if fname is None: fname = conf.session try: s = six.moves.cPickle.load(gzip.open(fname, "rb")) except IOError: try: s = six.moves.cPickle.load(open(fname, "rb")) except IOError: # Raise "No such file exception" ...
88,230
Update current Scapy session from the file specified in the fname arg. params: - fname: file to load the scapy session from
def update_session(fname=None): if fname is None: fname = conf.session try: s = six.moves.cPickle.load(gzip.open(fname, "rb")) except IOError: s = six.moves.cPickle.load(open(fname, "rb")) scapy_session = six.moves.builtins.__dict__["scapy_session"] scapy_session.update(...
88,231
Create an interactive session and execute the commands passed as "cmds" and return all output params: - cmds: a list of commands to run returns: (output, returned) The output contains both sys.stdout and sys.stderr logs
def autorun_get_interactive_session(cmds, **kargs): sstdout, sstderr = sys.stdout, sys.stderr sw = StringWriter() try: try: sys.stdout = sys.stderr = sw res = autorun_commands(cmds, **kargs) except StopAutorun as e: e.code_run = sw.s raise...
88,237
This function is called during sendrecv() routine to select the available sockets. params: - sockets: an array of sockets that need to be selected returns: - an array of sockets that were selected - the function to be called next to get the packets (i.g. recv)
def select(sockets, remain=conf.recv_poll_rate): try: inp, _, _ = select(sockets, [], [], remain) except (IOError, select_error) as exc: # select.error has no .errno attribute if exc.args[0] != errno.EINTR: raise return inp, None
88,431
Returns the IPv4 routes to a host. parameters: - dst: the IPv4 of the destination host returns: (iface, output_ip, gateway_ip) - iface: the interface used to connect to the host - output_ip: the outgoing IP that will be used - gateway_ip: the gateway IP that will be ...
def route(self, dst=None, verbose=conf.verb): dst = dst or "0.0.0.0" # Enable route(None) to return default route if isinstance(dst, bytes): try: dst = plain_str(dst) except UnicodeDecodeError: raise TypeError("Unknown IP address input (b...
88,563
Initialize an ``TensorFlowPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If n...
def __init__(self, endpoint_name, sagemaker_session=None): super(TensorFlowPredictor, self).__init__(endpoint_name, sagemaker_session, tf_json_serializer, tf_json_deserializer)
100,222
Stream the output of a process to stdout This function takes an existing process that will be polled for output. Only stdout will be polled and sent to sys.stdout. Args: process(subprocess.Popen): a process that has been started with stdout=PIPE and stderr=STDOUT Returns (int): pr...
def _stream_output(process): exit_code = None while exit_code is None: stdout = process.stdout.readline().decode("utf-8") sys.stdout.write(stdout) exit_code = process.poll() if exit_code != 0: raise RuntimeError("Process exited with code: %s" % exit_code) return e...
100,236
Run a training job locally using docker-compose. Args: input_data_config (dict): The Input Data Configuration, this contains data such as the channels to be used for training. hyperparameters (dict): The HyperParameters for the training job. job_name (str): Na...
def train(self, input_data_config, output_data_config, hyperparameters, job_name): self.container_root = self._create_tmp_folder() os.mkdir(os.path.join(self.container_root, 'output')) # create output/data folder since sagemaker-containers 2.0 expects it os.mkdir(os.path.join(se...
100,246
Host a local endpoint using docker-compose. Args: primary_container (dict): dictionary containing the container runtime settings for serving. Expected keys: - 'ModelDataUrl' pointing to a file or s3:// location. - 'Environment' a dictionary of environm...
def serve(self, model_dir, environment): logger.info("serving") self.container_root = self._create_tmp_folder() logger.info('creating hosting dir in {}'.format(self.container_root)) volumes = self._prepare_serving_volumes(model_dir) # If the user script was passed as ...
100,247
Get the model artifacts from all the container nodes. Used after training completes to gather the data from all the individual containers. As the official SageMaker Training Service, it will override duplicate files if multiple containers have the same file names. Args: com...
def retrieve_artifacts(self, compose_data, output_data_config, job_name): # We need a directory to store the artfiacts from all the nodes # and another one to contained the compressed final artifacts artifacts = os.path.join(self.container_root, 'artifacts') compressed_artifacts...
100,249
Write the config files for the training containers. This method writes the hyperparameters, resources and input data configuration files. Args: host (str): Host to write the configuration for hyperparameters (dict): Hyperparameters for training. input_data_config (d...
def write_config_files(self, host, hyperparameters, input_data_config): config_path = os.path.join(self.container_root, host, 'input', 'config') resource_config = { 'current_host': host, 'hosts': self.hosts } json_input_data_config = {} for c in...
100,250
Create a Volume instance the container path can be provided as a container_dir or as a channel name but not both. Args: host_dir (str): path to the volume data in the host container_dir (str): path inside the container that host_dir will be mapped to channel (str): c...
def __init__(self, host_dir, container_dir=None, channel=None): if not container_dir and not channel: raise ValueError('Either container_dir or channel must be declared.') if container_dir and channel: raise ValueError('container_dir and channel cannot be declared toget...
100,262
Initialize an ``PyTorchPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not ...
def __init__(self, endpoint_name, sagemaker_session=None): super(PyTorchPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
100,265
Set hyperparameters needed for training. Args: * records (:class:`~RecordSet`): The records to train this ``Estimator`` on. * mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a default value will be used. * job_nam...
def _prepare_for_training(self, records, mini_batch_size=None, job_name=None): num_records = None if isinstance(records, list): for record in records: if record.channel == 'train': num_records = record.num_records break ...
100,268
Validate that the source directory exists and it contains the user script Args: script (str): Script filename. directory (str): Directory containing the source file. Raises: ValueError: If ``directory`` does not exist, is not a directory, or does not contain ``script``.
def validate_source_dir(script, directory): if directory: if not os.path.isfile(os.path.join(directory, script)): raise ValueError('No file named "{}" was found in directory "{}".'.format(script, directory)) return True
100,272
Extract the framework version from the image tag. Args: image_tag (str): Image tag, which should take the form '<framework_version>-<device>-<py_version>' Returns: str: The framework version.
def framework_version_from_tag(image_tag): tag_pattern = re.compile('^(.*)-(cpu|gpu)-(py2|py3)$') tag_match = tag_pattern.match(image_tag) return None if tag_match is None else tag_match.group(1)
100,276
Returns an (s3 bucket, key name/prefix) tuple from a url with an s3 scheme Args: url (str): Returns: tuple: A tuple containing: str: S3 bucket name str: S3 key
def parse_s3_url(url): parsed_url = urlparse(url) if parsed_url.scheme != "s3": raise ValueError("Expecting 's3' scheme, got: {} in {}".format(parsed_url.scheme, url)) return parsed_url.netloc, parsed_url.path.lstrip('/')
100,277
Describe a local training job. Args: TrainingJobName (str): Training job name to describe. Returns: (dict) DescribeTrainingJob Response.
def describe_training_job(self, TrainingJobName): if TrainingJobName not in LocalSagemakerClient._training_jobs: error_response = {'Error': {'Code': 'ValidationException', 'Message': 'Could not find local training job'}} raise ClientError(error_response, 'describe_training_job')...
100,281
Create a Local Model Object Args: ModelName (str): the Model Name PrimaryContainer (dict): a SageMaker primary container definition
def create_model(self, ModelName, PrimaryContainer, *args, **kwargs): # pylint: disable=unused-argument LocalSagemakerClient._models[ModelName] = _LocalModel(ModelName, PrimaryContainer)
100,284
Initializes a LocalSageMakerRuntimeClient Args: config (dict): Optional configuration for this client. In particular only the local port is read.
def __init__(self, config=None): self.http = urllib3.PoolManager() self.serving_port = 8080 self.config = config self.serving_port = get_config_value('local.serving_port', config) or 8080
100,291
Initialize an ``SKLearnPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not ...
def __init__(self, endpoint_name, sagemaker_session=None): super(SKLearnPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
100,296
Initialize an AmazonAlgorithmEstimatorBase. Args: data_location (str or None): The s3 prefix to upload RecordSet objects to, expressed as an S3 url. For example "s3://example-bucket/some-key-prefix/". Objects will be saved in a unique sub-directory of the specified l...
def __init__(self, role, train_instance_count, train_instance_type, data_location=None, **kwargs): super(AmazonAlgorithmEstimatorBase, self).__init__(role, train_instance_count, train_instance_type, **kwargs) data_location = data_locat...
100,303
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: ...
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(AmazonAlgorithmEstimatorBase, cls)._prepare_init_params_from_job_description( job_details, model_channel_name) # The hyperparam names may not be the same as the class attri...
100,306
Set hyperparameters needed for training. Args: * records (:class:`~RecordSet`): The records to train this ``Estimator`` on. * mini_batch_size (int or None): The size of each mini-batch to use when training. If ``None``, a default value will be used. * job_nam...
def _prepare_for_training(self, records, mini_batch_size=None, job_name=None): super(AmazonAlgorithmEstimatorBase, self)._prepare_for_training(job_name=job_name) feature_dim = None if isinstance(records, list): for record in records: if record.channel == 't...
100,307
Return an Instance of :class:`sagemaker.local.data.DataSource` that can handle the provided data_source URI. data_source can be either file:// or s3:// Args: data_source (str): a valid URI that points to a data source. sagemaker_session (:class:`sagemaker.session.Session`): a SageMaker Ses...
def get_data_source_instance(data_source, sagemaker_session): parsed_uri = urlparse(data_source) if parsed_uri.scheme == 'file': return LocalFileDataSource(parsed_uri.netloc + parsed_uri.path) elif parsed_uri.scheme == 's3': return S3DataSource(parsed_uri.netloc, parsed_uri.path, sagema...
100,311
Return an Instance of :class:`sagemaker.local.data.Splitter` according to the specified `split_type`. Args: split_type (str): either 'Line' or 'RecordIO'. Can be left as None to signal no data split will happen. Returns :class:`sagemaker.local.data.Splitter`: an Instance of a S...
def get_splitter_instance(split_type): if split_type is None: return NoneSplitter() elif split_type == 'Line': return LineSplitter() elif split_type == 'RecordIO': return RecordIOSplitter() else: raise ValueError('Invalid Split Type: %s' % split_type)
100,312
Return an Instance of :class:`sagemaker.local.data.BatchStrategy` according to `strategy` Args: strategy (str): Either 'SingleRecord' or 'MultiRecord' splitter (:class:`sagemaker.local.data.Splitter): splitter to get the data from. Returns :class:`sagemaker.local.data.BatchStrategy`: a...
def get_batch_strategy_instance(strategy, splitter): if strategy == 'SingleRecord': return SingleRecordStrategy(splitter) elif strategy == 'MultiRecord': return MultiRecordStrategy(splitter) else: raise ValueError('Invalid Batch Strategy: %s - Valid Strategies: "SingleRecord", "...
100,313
Create an S3DataSource instance Args: bucket (str): S3 bucket name prefix (str): S3 prefix path to the data sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker_session with the desired settings to talk to S3
def __init__(self, bucket, prefix, sagemaker_session): # Create a temporary dir to store the S3 contents root_dir = sagemaker.utils.get_config_value('local.container_root', sagemaker_session.config) if root_dir: root_dir = os.path.abspath(root_dir) working_dir = te...
100,318
Split a file into records using a specific strategy This RecordIOSplitter splits the data into individual RecordIO records. Args: file (str): path to the file to split Returns: generator for the individual records that were split from the file
def split(self, file): with open(file, 'rb') as f: for record in sagemaker.amazon.common.read_recordio(f): yield record
100,319
Group together as many records as possible to fit in the specified size Args: file (str): file path to read the records from. size (int): maximum size in MB that each group of records will be fitted to. passing 0 means unlimited size. Returns: genera...
def pad(self, file, size=6): buffer = '' for element in self.splitter.split(file): if _payload_size_within_limit(buffer + element, size): buffer += element else: tmp = buffer buffer = element yield tmp ...
100,320
Group together as many records as possible to fit in the specified size This SingleRecordStrategy will not group any record and will return them one by one as long as they are within the maximum size. Args: file (str): file path to read the records from. size (int): max...
def pad(self, file, size=6): for element in self.splitter.split(file): if _validate_payload_size(element, size): yield element
100,321
Initialize an ``MXNetPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not sp...
def __init__(self, endpoint_name, sagemaker_session=None): super(MXNetPredictor, self).__init__(endpoint_name, sagemaker_session, json_serializer, json_deserializer)
100,322
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: ...
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Chainer, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) for argument in [Chainer._use_mpi, Chainer._num_processes, Chainer._process_slots_per_host, ...
100,328
Create a SageMaker Model Entity Args: *args: Arguments coming from the caller. This class does not require any so they are ignored.
def _create_sagemaker_model(self, *args): # pylint: disable=unused-argument if self.algorithm_arn: # When ModelPackage is created using an algorithm_arn we need to first # create a ModelPackage. If we had already created one then its fine to re-use it. if self._crea...
100,356
Delete the Amazon SageMaker endpoint backing this predictor. Also delete the endpoint configuration attached to it if delete_endpoint_config is True. Args: delete_endpoint_config (bool, optional): Flag to indicate whether to delete endpoint configuration together with endpoi...
def delete_endpoint(self, delete_endpoint_config=True): if delete_endpoint_config: self._delete_endpoint_config() self.sagemaker_session.delete_endpoint(self.endpoint)
100,372
Take data of various data formats and serialize them into CSV. Args: data (object): Data to be serialized. Returns: object: Sequence of bytes to be used for the request body.
def __call__(self, data): # For inputs which represent multiple "rows", the result should be newline-separated CSV rows if _is_mutable_sequence_like(data) and len(data) > 0 and _is_sequence_like(data[0]): return '\n'.join([_CsvSerializer._serialize_row(row) for row in data]) ...
100,376
Take data of various formats and serialize them into the expected request body. This uses information about supported input formats for the deployed model. Args: data (object): Data to be serialized. Returns: object: Serialized data used for the request.
def __call__(self, data): if isinstance(data, dict): # convert each value in dict from a numpy array to a list if necessary, so they can be json serialized return json.dumps({k: _ndarray_to_list(v) for k, v in six.iteritems(data)}) # files and buffers if hasattr...
100,381
Decode a JSON object into the corresponding Python object. Args: stream (stream): The response stream to be deserialized. content_type (str): The content type of the response. Returns: object: Body of the response deserialized into a JSON object.
def __call__(self, stream, content_type): try: return json.load(codecs.getreader('utf-8')(stream)) finally: stream.close()
100,382
Decode from serialized data into a Numpy array. Args: stream (stream): The response stream to be deserialized. content_type (str): The content type of the response. Can accept CSV, JSON, or NPY data. Returns: object: Body of the response deserialized into a Numpy ar...
def __call__(self, stream, content_type=CONTENT_TYPE_NPY): try: if content_type == CONTENT_TYPE_CSV: return np.genfromtxt(codecs.getreader('utf-8')(stream), delimiter=',', dtype=self.dtype) elif content_type == CONTENT_TYPE_JSON: return np.array(j...
100,384
Serialize data into the request body in NPY format. Args: data (object): Data to be serialized. Can be a numpy array, list, file, or buffer. Returns: object: NPY serialized data used for the request.
def __call__(self, data, dtype=None): if isinstance(data, np.ndarray): if not data.size > 0: raise ValueError("empty array can't be serialized") return _npy_serialize(data) if isinstance(data, list): if not len(data) > 0: rais...
100,385
A pandas dataframe with lots of interesting results about this object. Created by calling SageMaker List and Describe APIs and converting them into a convenient tabular summary. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
def dataframe(self, force_refresh=False): if force_refresh: self.clear_cache() if self._dataframe is None: self._dataframe = self._fetch_dataframe() return self._dataframe
100,387
Initialize a ``HyperparameterTuningJobAnalytics`` instance. Args: hyperparameter_tuning_job_name (str): name of the HyperparameterTuningJob to analyze. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and...
def __init__(self, hyperparameter_tuning_job_name, sagemaker_session=None): sagemaker_session = sagemaker_session or Session() self._sage_client = sagemaker_session.sagemaker_client self._tuning_job_name = hyperparameter_tuning_job_name self.clear_cache()
100,388
Call ``DescribeHyperParameterTuningJob`` for the hyperparameter tuning job. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API. Returns: dict: The Amazon SageMaker response for ``DescribeHyperParameterTuningJob``.
def description(self, force_refresh=False): if force_refresh: self.clear_cache() if not self._tuning_job_describe_result: self._tuning_job_describe_result = self._sage_client.describe_hyper_parameter_tuning_job( HyperParameterTuningJobName=self.name ...
100,392
A (paginated) list of everything from ``ListTrainingJobsForTuningJob``. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API. Returns: dict: The Amazon SageMaker response for ``ListTrainingJobsForTuningJob``.
def training_job_summaries(self, force_refresh=False): if force_refresh: self.clear_cache() if self._training_job_summaries is not None: return self._training_job_summaries output = [] next_args = {} for count in range(100): logging.de...
100,393
Append a timestamp to the provided string. This function assures that the total length of the resulting string is not longer than the specified max length, trimming the input parameter if necessary. Args: base (str): String used as prefix to generate the unique name. max_length (int): Maxi...
def name_from_base(base, max_length=63, short=False): timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp() trimmed_base = base[:max_length - len(timestamp) - 1] return '{}-{}'.format(trimmed_base, timestamp)
100,401
Extract the base name of the image to use as the 'algorithm name' for the job. Args: image (str): Image name. Returns: str: Algorithm name, as extracted from the image name.
def base_name_from_image(image): m = re.match("^(.+/)?([^:/]+)(:[^:]+)?$", image) algo_name = m.group(2) if m else image return algo_name
100,403
Convert the input to a string, unless it is a unicode string in Python 2. Unicode strings are supported as native strings in Python 3, but ``str()`` cannot be invoked on unicode strings in Python 2, so we need to check for that case when converting user-specified values to strings. Args: value...
def to_str(value): if sys.version_info.major < 3 and isinstance(value, six.string_types): return value return str(value)
100,407
Returns a string contains last modified time and the secondary training job status message. Args: job_description: Returned response from DescribeTrainingJob call prev_description: Previous job description from DescribeTrainingJob call Returns: str: Job status string to be printed.
def secondary_training_status_message(job_description, prev_description): if job_description is None or job_description.get('SecondaryStatusTransitions') is None\ or len(job_description.get('SecondaryStatusTransitions')) == 0: return '' prev_description_secondary_transitions = prev_de...
100,409
Download a folder from S3 to a local path Args: bucket_name (str): S3 bucket name prefix (str): S3 prefix within the bucket that will be downloaded. Can be a single file. target (str): destination path where the downloaded items will be placed sagemaker_session (:class:`sagemaker.se...
def download_folder(bucket_name, prefix, target, sagemaker_session): boto_session = sagemaker_session.boto_session s3 = boto_session.resource('s3') bucket = s3.Bucket(bucket_name) prefix = prefix.lstrip('/') # there is a chance that the prefix points to a file and not a 'directory' if that i...
100,410
Create a tar file containing all the source_files Args: source_files (List[str]): List of file paths that will be contained in the tar file Returns: (str): path to created tar file
def create_tar_file(source_files, target=None): if target: filename = target else: _, filename = tempfile.mkstemp() with tarfile.open(filename, mode='w:gz') as t: for sf in source_files: # Add all files from the directory into the root of the directory structure of ...
100,411
Download a Single File from S3 into a local path Args: bucket_name (str): S3 bucket name path (str): file path within the bucket target (str): destination directory for the downloaded file. sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with ...
def download_file(bucket_name, path, target, sagemaker_session): path = path.lstrip('/') boto_session = sagemaker_session.boto_session s3 = boto_session.resource('s3') bucket = s3.Bucket(bucket_name) bucket.download_file(path, target)
100,412
Initialize ``Tensorboard`` instance. Args: estimator (sagemaker.estimator.Framework): A SageMaker ``Estimator``. logdir (str): Directory for logs (default: None). If not specified, a temporary directory is made.
def __init__(self, estimator, logdir=None): threading.Thread.__init__(self) self.event = threading.Event() self.estimator = estimator self.logdir = logdir or tempfile.mkdtemp()
100,413
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. Returns: dictionary: The transformed init_params
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(TensorFlow, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) # Mov...
100,422
Initialize an ``ChainerPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not ...
def __init__(self, endpoint_name, sagemaker_session=None): super(ChainerPredictor, self).__init__(endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer)
100,432
Initializes the ``WarmStartConfig`` with the provided ``WarmStartTypes`` and parents. Args: warm_start_type (sagemaker.tuner.WarmStartTypes): This should be one of the supported warm start types in WarmStartType parents (set{str}): Set of parent tuning jobs which will be use...
def __init__(self, warm_start_type, parents): if warm_start_type not in WarmStartTypes: raise ValueError( "Invalid type: {}, valid warm start types are: [{}]".format(warm_start_type, [t for t in War...
100,443
Delete an Amazon SageMaker endpoint. If an endpoint name is not specified, this defaults to looking for an endpoint that shares a name with the best training job for deletion. Args: endpoint_name (str): Name of the endpoint to delete
def delete_endpoint(self, endpoint_name=None): endpoint_name = endpoint_name or self.best_training_job() self.sagemaker_session.delete_endpoint(endpoint_name)
100,452
Create a new Amazon SageMaker hyperparameter tuning job from the HyperparameterTuner. Args: tuner (sagemaker.tuner.HyperparameterTuner): HyperparameterTuner object created by the user. inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`. Ret...
def start_new(cls, tuner, inputs): config = _Job._load_config(inputs, tuner.estimator) warm_start_config_req = None if tuner.warm_start_config: warm_start_config_req = tuner.warm_start_config.to_input_req() tuner_args = config.copy() tuner_args['job_name']...
100,463
Initialize the class. Args: force (bool): If True, render colorizes output no matter where the output is (default: False).
def __init__(self, force=False): self.colorize = force or sys.stdout.isatty() or os.environ.get('JPY_PARENT_PID', None)
100,470
Print the output, colorized or not, depending on the environment. Args: index (int): The instance number. s (str): The string to print.
def __call__(self, index, s): if self.colorize: self._color_wrap(index, s) else: print(s)
100,471
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. ...
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(RLEstimator, cls)\ ._prepare_init_params_from_job_description(job_details, model_channel_name) image_name = init_params.pop('image') framework, _, tag, _ = fw_utils...
100,476
Provides default metric definitions based on provided toolkit. Args: toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training. Returns: list: metric definitions
def default_metric_definitions(cls, toolkit): if toolkit is RLToolkit.COACH: return [ {'Name': 'reward-training', 'Regex': '^Training>.*Total reward=(.*?),'}, {'Name': 'reward-testing', 'Regex': '^Testing>.*Total reward=(.*?)...
100,484
Prepare S3 operations (specify where to upload `source_dir`) and environment variables related to framework. Args: estimator (sagemaker.estimator.Estimator): The framework estimator to get information from and update. s3_operations (dict): The dict to specify s3 operations (upload `source_dir`)...
def prepare_framework(estimator, s3_operations): if estimator.code_location is not None: bucket, key = fw_utils.parse_s3_url(estimator.code_location) key = os.path.join(key, estimator._current_job_name, 'source', 'sourcedir.tar.gz') else: bucket = estimator.sagemaker_session._defaul...
100,485
Updated the S3 URI of the framework source directory in given estimator. Args: estimator (sagemaker.estimator.Framework): The Framework estimator to update. job_name (str): The new job name included in the submit S3 URI Returns: str: The updated S3 URI of framework source directory
def update_submit_s3_uri(estimator, job_name): if estimator.uploaded_code is None: return pattern = r'(?<=/)[^/]+?(?=/source/sourcedir.tar.gz)' # update the S3 URI with the latest training job. # s3://path/old_job/source/sourcedir.tar.gz will become s3://path/new_job/source/sourcedir.tar....
100,490
Prepare the framework model container information. Specify related S3 operations for Airflow to perform. (Upload `source_dir`) Args: model (sagemaker.model.FrameworkModel): The framework model instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. ...
def prepare_framework_container_def(model, instance_type, s3_operations): deploy_image = model.image if not deploy_image: region_name = model.sagemaker_session.boto_session.region_name deploy_image = fw_utils.create_image_uri( region_name, model.__framework_name__, instance_type...
100,492
Set any values in the estimator that need to be set before training. Args: * job_name (str): Name of the training job to be created. If not specified, one is generated, using the base name given to the constructor if applicable.
def _prepare_for_training(self, job_name=None): if job_name is not None: self._current_job_name = job_name else: # honor supplied base_job_name or generate it if self.base_job_name: base_name = self.base_job_name elif isinstance(se...
100,522
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: ...
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = dict() init_params['role'] = job_details['RoleArn'] init_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount'] init_params['train_instance_type'] = ...
100,529
Create a new Amazon SageMaker training job from the estimator. Args: estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user. inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`. Returns: sagemaker.es...
def start_new(cls, estimator, inputs): local_mode = estimator.sagemaker_session.local_mode model_uri = estimator.model_uri # Allow file:// input only in local mode if cls._is_local_channel(inputs) or cls._is_local_channel(model_uri): if not local_mode: ...
100,534
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded Returns: ...
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Estimator, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) init_params['image_name'] = init_params.pop('image') return init_params
100,539
Set hyperparameters needed for training. This method will also validate ``source_dir``. Args: * job_name (str): Name of the training job to be created. If not specified, one is generated, using the base name given to the constructor if applicable.
def _prepare_for_training(self, job_name=None): super(Framework, self)._prepare_for_training(job_name=job_name) # validate source dir will raise a ValueError if there is something wrong with the # source directory. We are intentionally not handling it because this is a critical error. ...
100,541
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded Returns: ...
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Framework, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) init_params['entry_point'] = json.loads(init_params['hyperparameters'].get(SCRIPT_PARAM_NAME)...
100,544
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.GetModelStatus = channel.unary_unary( '/tensorflow.serving.ModelService/GetModelStatus', request_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_a...
100,558
Start the Local Transform Job Args: input_data (dict): Describes the dataset to be transformed and the location where it is stored. output_data (dict): Identifies the location where to save the results from the transform job transform_resources (dict): compute instances for ...
def start(self, input_data, output_data, transform_resources, **kwargs): self.transform_resources = transform_resources self.input_data = input_data self.output_data = output_data image = self.primary_container['Image'] instance_type = transform_resources['InstanceType'...
100,565
Get all the Environment variables that will be passed to the container Certain input fields such as BatchStrategy have different values for the API vs the Environment variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion. Args: **kwargs: existin...
def _get_container_environment(self, **kwargs): environment = {} environment.update(self.primary_container['Environment']) environment['SAGEMAKER_BATCH'] = 'True' if 'MaxPayloadInMB' in kwargs: environment['SAGEMAKER_MAX_PAYLOAD_IN_MB'] = str(kwargs['MaxPayloadInMB']...
100,567