text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self, end_time=None): """ Close the trace entity by setting `end_time` and flip the in progress flag to False. Also decrement parent segment's ref counter by 1. :param int end_time: Epoch in seconds. If not specified current time will be used. """
super(Subsegment, self).close(end_time) self.parent_segment.decrement_ref_counter()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_sampling_rules(self): """ Use X-Ray botocore client to get the centralized sampling rules from X-Ray service. The call is proxied and signed by X-Ray Daemon. """
new_rules = [] resp = self._xray_client.get_sampling_rules() records = resp['SamplingRuleRecords'] for record in records: rule_def = record['SamplingRule'] if self._is_rule_valid(rule_def): rule = SamplingRule(name=rule_def['RuleName'], priority=rule_def['Priority'], rate=rule_def['FixedRate'], reservoir_size=rule_def['ReservoirSize'], host=rule_def['Host'], service=rule_def['ServiceName'], method=rule_def['HTTPMethod'], path=rule_def['URLPath'], service_type=rule_def['ServiceType']) new_rules.append(rule) return new_rules
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_xray_client(self, ip, port, client): """ Setup the xray client based on ip and port. If a preset client is specified, ip and port will be ignored. """
if not client: client = self._create_xray_client(ip, port) self._xray_client = client
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _dt_to_epoch(self, dt): """ Convert a offset-aware datetime to POSIX time. """
if PY2: # The input datetime is from botocore unmarshalling and it is # offset-aware so the timedelta of subtracting this time # to 01/01/1970 using the same tzinfo gives us # Unix Time (also known as POSIX Time). time_delta = dt - datetime(1970, 1, 1).replace(tzinfo=dt.tzinfo) return int(time_delta.total_seconds()) else: # Added in python 3.3+ and directly returns POSIX time. return int(dt.timestamp())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stacktrace(limit=None): """ Get a full stacktrace for the current state of execution. Include the current state of the stack, minus this function. If there is an active exception, include the stacktrace information from the exception as well. :param int limit: Optionally limit stack trace size results. This parmaeters has the same meaning as the `limit` parameter in `traceback.print_stack`. :returns: List of stack trace objects, in the same form as `traceback.extract_stack`. """
if limit is not None and limit == 0: # Nothing to return. This is consistent with the behavior of the # functions in the `traceback` module. return [] stack = traceback.extract_stack() # Remove this `get_stacktrace()` function call from the stack info. # For what we want to report, this is superfluous information and arguably # adds garbage to the report. # Also drop the `traceback.extract_stack()` call above from the returned # stack info, since this is also superfluous. stack = stack[:-2] _exc_type, _exc, exc_traceback = sys.exc_info() if exc_traceback is not None: # If and only if there is a currently triggered exception, combine the # exception traceback information with the current stack state to get a # complete trace. exc_stack = traceback.extract_tb(exc_traceback) stack += exc_stack # Limit the stack trace size, if a limit was specified: if limit is not None: # Copy the behavior of `traceback` functions with a `limit` argument. # See https://docs.python.org/3/library/traceback.html. if limit > 0: # limit > 0: include the last `limit` items stack = stack[-limit:] else: # limit < 0: include the first `abs(limit)` items stack = stack[:abs(limit)] return stack
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def should_trace(self, sampling_req=None): """ Return True if the sampler decide to sample based on input information and sampling rules. It will first check if any custom rule should be applied, if not it falls back to the default sampling rule. All optional arugments are extracted from incoming requests by X-Ray middleware to perform path based sampling. """
if sampling_req is None: return self._should_trace(self._default_rule) host = sampling_req.get('host', None) method = sampling_req.get('method', None) path = sampling_req.get('path', None) for rule in self._rules: if rule.applies(host, method, path): return self._should_trace(rule) return self._should_trace(self._default_rule)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_exception(self, request, exception): """ Add exception information and fault flag to the current segment. """
if self.in_lambda_ctx: segment = xray_recorder.current_subsegment() else: segment = xray_recorder.current_segment() segment.put_http_meta(http.STATUS, 500) stack = stacktrace.get_stacktrace(limit=xray_recorder._max_trace_back) segment.add_exception(exception, stack)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def take(self): """ Returns True if there are segments left within the current second, otherwise return False. """
with self._lock: now = int(time.time()) if now != self.this_sec: self.used_this_sec = 0 self.this_sec = now if self.used_this_sec >= self.traces_per_sec: return False self.used_this_sec = self.used_this_sec + 1 return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_default_connection(): """Returns the default datastore connection. Defaults endpoint to helper.get_project_endpoint_from_env() and credentials to helper.get_credentials_from_env(). Use set_options to override defaults. """
tid = id(threading.current_thread()) conn = _conn_holder.get(tid) if not conn: with(_rlock): # No other thread would insert a value in our slot, so no need # to recheck existence inside the lock. if 'project_endpoint' not in _options and 'project_id' not in _options: _options['project_endpoint'] = helper.get_project_endpoint_from_env() if 'credentials' not in _options: _options['credentials'] = helper.get_credentials_from_env() # We still need the lock when caching the thread local connection so we # don't race with _conn_holder.clear() in set_options(). _conn_holder[tid] = conn = connection.Datastore(**_options) return conn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_all(cls): """Query for all Todo items ordered by creation date. This method is eventually consistent to avoid the need for an extra index. """
req = datastore.RunQueryRequest() q = req.query set_kind(q, kind='Todo') add_property_orders(q, 'created') resp = datastore.run_query(req) todos = [Todo.from_proto(r.entity) for r in resp.batch.entity_results] return todos
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def archive(cls): """Delete all Todo items that are done."""
req = datastore.BeginTransactionRequest() resp = datastore.begin_transaction(req) tx = resp.transaction req = datastore.RunQueryRequest() req.read_options.transaction = tx q = req.query set_kind(q, kind='Todo') add_projection(q, '__key__') set_composite_filter(q.filter, datastore.CompositeFilter.AND, set_property_filter( datastore.Filter(), 'done', datastore.PropertyFilter.EQUAL, True), set_property_filter( datastore.Filter(), '__key__', datastore.PropertyFilter.HAS_ANCESTOR, default_todo_list.key)) resp = datastore.run_query(req) req = datastore.CommitRequest() req.transaction = tx for result in resp.batch.entity_results: req.mutations.add().delete.CopyFrom(result.entity.key) resp = datastore.commit(req) return ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """Update or insert a Todo item."""
req = datastore.CommitRequest() req.mode = datastore.CommitRequest.NON_TRANSACTIONAL req.mutations.add().upsert.CopyFrom(self.to_proto()) resp = datastore.commit(req) if not self.id: self.id = resp.mutation_results[0].key.path[-1].id return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Get(self, project_id): """Returns an existing emulator instance for the provided project_id. If an emulator instance doesn't yet exist, it creates one. Args: project_id: project ID Returns: a DatastoreEmulator """
if project_id in self._emulators: return self._emulators[project_id] emulator = self.Create(project_id) self._emulators[project_id] = emulator return emulator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Create(self, project_id, start_options=None, deadline=10): """Creates an emulator instance. This method will wait for up to 'deadline' seconds for the emulator to start. Args: project_id: project ID start_options: a list of additional command-line options to pass to the emulator 'start' command deadline: number of seconds to wait for the datastore to respond Returns: a DatastoreEmulator Raises: IOError: if the emulator could not be started within the deadline """
return DatastoreEmulator(self._emulator_cmd, self._working_directory, project_id, deadline, start_options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _WaitForStartup(self, deadline): """Waits for the emulator to start. Args: deadline: deadline in seconds Returns: True if the emulator responds within the deadline, False otherwise. """
start = time.time() sleep = 0.05 def Elapsed(): return time.time() - start while True: try: response, _ = self._http.request(self._host) if response.status == 200: logging.info('emulator responded after %f seconds', Elapsed()) return True except (socket.error, httplib.ResponseNotReady): pass if Elapsed() >= deadline: # Out of time; give up. return False else: time.sleep(sleep) sleep *= 2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Clear(self): """Clears all data from the emulator instance. Returns: True if the data was successfully cleared, False otherwise. """
headers = {'Content-length': '0'} response, _ = self._http.request('%s/reset' % self._host, method='POST', headers=headers) if response.status == 200: return True else: logging.warning('failed to clear emulator; response was: %s', response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Stop(self): """Stops the emulator instance."""
if not self.__running: return logging.info('shutting down the emulator running at %s', self._host) headers = {'Content-length': '0'} response, _ = self._http.request('%s/shutdown' % self._host, method='POST', headers=headers) if response.status != 200: logging.warning('failed to shut down emulator; response: %s', response) self.__running = False # Delete temp files. shutil.rmtree(self._tmp_dir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _call_method(self, method, req, resp_class): """_call_method call the given RPC method over HTTP. It uses the given protobuf message request as the payload and returns the deserialized protobuf message response. Args: method: RPC method name to be called. req: protobuf message for the RPC request. resp_class: protobuf message class for the RPC response. Returns: Deserialized resp_class protobuf message instance. Raises: RPCError: The rpc method call failed. """
payload = req.SerializeToString() headers = { 'Content-Type': 'application/x-protobuf', 'Content-Length': str(len(payload)), 'X-Goog-Api-Format-Version': '2' } response, content = self._http.request( '%s:%s' % (self._url, method), method='POST', body=payload, headers=headers) if response.status != 200: raise _make_rpc_error(method, response, content) resp = resp_class() resp.ParseFromString(content) return resp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_credentials_from_env(): """Get credentials from environment variables. Preference of credentials is: - No credentials if DATASTORE_EMULATOR_HOST is set. - Google APIs Signed JWT credentials based on DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE environments variables - Google Application Default https://developers.google.com/identity/protocols/application-default-credentials Returns: credentials or None. """
if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV): logging.info('connecting without credentials because %s is set.', _DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV) return None if os.getenv(_DATASTORE_EMULATOR_HOST_ENV): logging.info('connecting without credentials because %s is set.', _DATASTORE_EMULATOR_HOST_ENV) return None if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV) and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)): with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f: key = f.read() credentials = client.SignedJwtAssertionCredentials( os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE) logging.info('connecting using private key file.') return credentials try: credentials = client.GoogleCredentials.get_application_default() credentials = credentials.create_scoped(SCOPE) logging.info('connecting using Google Application Default Credentials.') return credentials except client.ApplicationDefaultCredentialsError, e: logging.error('Unable to find any credentials to use. ' 'If you are running locally, make sure to set the ' '%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV) raise e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_project_endpoint_from_env(project_id=None, host=None): """Get Datastore project endpoint from environment variables. Args: project_id: The Cloud project, defaults to the environment variable DATASTORE_PROJECT_ID. host: The Cloud Datastore API host to use. Returns: the endpoint to use, for example https://datastore.googleapis.com/v1/projects/my-project Raises: ValueError: if the wrong environment variable was set or a project_id was not provided. """
project_id = project_id or os.getenv(_DATASTORE_PROJECT_ID_ENV) if not project_id: raise ValueError('project_id was not provided. Either pass it in ' 'directly or set DATASTORE_PROJECT_ID.') # DATASTORE_HOST is deprecated. if os.getenv(_DATASTORE_HOST_ENV): logging.warning('Ignoring value of environment variable DATASTORE_HOST. ' 'To point datastore to a host running locally, use the ' 'environment variable DATASTORE_EMULATOR_HOST') url_override = os.getenv(_DATASTORE_URL_OVERRIDE_ENV) if url_override: return '%s/projects/%s' % (url_override, project_id) localhost = os.getenv(_DATASTORE_EMULATOR_HOST_ENV) if localhost: return ('http://%s/%s/projects/%s' % (localhost, API_VERSION, project_id)) host = host or GOOGLEAPIS_HOST return 'https://%s/%s/projects/%s' % (host, API_VERSION, project_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_key_path(key_proto, *path_elements): """Add path elements to the given datastore.Key proto message. Args: key_proto: datastore.Key proto message. *path_elements: list of ancestors to add to the key. represent the entity key, if no terminating id/name: they key will be an incomplete key. Raises: TypeError: the given id or name has the wrong type. Returns: the same datastore.Key. Usage: """
for i in range(0, len(path_elements), 2): pair = path_elements[i:i+2] elem = key_proto.path.add() elem.kind = pair[0] if len(pair) == 1: return # incomplete key id_or_name = pair[1] if isinstance(id_or_name, (int, long)): elem.id = id_or_name elif isinstance(id_or_name, basestring): elem.name = id_or_name else: raise TypeError( 'Expected an integer id or string name as argument %d; ' 'received %r (a %s).' % (i + 2, id_or_name, type(id_or_name))) return key_proto
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_properties(entity_proto, property_dict, exclude_from_indexes=None): """Add values to the given datastore.Entity proto message. Args: entity_proto: datastore.Entity proto message. property_dict: a dictionary from property name to either a python object or datastore.Value. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Usage: Raises: TypeError: if a given property value type is not supported. """
for name, value in property_dict.iteritems(): set_property(entity_proto.properties, name, value, exclude_from_indexes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_property(property_map, name, value, exclude_from_indexes=None): """Set property value in the given datastore.Property proto message. Args: property_map: a string->datastore.Value protobuf map. name: name of the property. value: python object or datastore.Value. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Usage: Raises: TypeError: if the given value type is not supported. """
set_value(property_map[name], value, exclude_from_indexes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_value(value_proto, value, exclude_from_indexes=None): """Set the corresponding datastore.Value _value field for the given arg. Args: value_proto: datastore.Value proto message. value: python object or datastore.Value. (unicode value will set a datastore string value, str value will set a blob string value). Undefined behavior if value is/contains value_proto. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Raises: TypeError: if the given value type is not supported. """
value_proto.Clear() if isinstance(value, (list, tuple)): for sub_value in value: set_value(value_proto.array_value.values.add(), sub_value, exclude_from_indexes) return # do not set indexed for a list property. if isinstance(value, entity_pb2.Value): value_proto.MergeFrom(value) elif isinstance(value, unicode): value_proto.string_value = value elif isinstance(value, str): value_proto.blob_value = value elif isinstance(value, bool): value_proto.boolean_value = value elif isinstance(value, (int, long)): value_proto.integer_value = value elif isinstance(value, float): value_proto.double_value = value elif isinstance(value, datetime.datetime): to_timestamp(value, value_proto.timestamp_value) elif isinstance(value, entity_pb2.Key): value_proto.key_value.CopyFrom(value) elif isinstance(value, entity_pb2.Entity): value_proto.entity_value.CopyFrom(value) else: raise TypeError('value type: %r not supported' % (value,)) if exclude_from_indexes is not None: value_proto.exclude_from_indexes = exclude_from_indexes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_value(value_proto): """Gets the python object equivalent for the given value proto. Args: value_proto: datastore.Value proto message. Returns: the corresponding python object value. timestamps are converted to datetime, and datastore.Value is returned for blob_key_value. """
field = value_proto.WhichOneof('value_type') if field in __native_value_types: return getattr(value_proto, field) if field == 'timestamp_value': return from_timestamp(value_proto.timestamp_value) if field == 'array_value': return [get_value(sub_value) for sub_value in value_proto.array_value.values] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_property_dict(entity_proto): """Convert datastore.Entity to a dict of property name -> datastore.Value. Args: entity_proto: datastore.Entity proto message. Usage: {'foo': {string_value='a'}, 'bar': {integer_value=2}} Returns: dict of entity properties. """
return dict((p.key, p.value) for p in entity_proto.property)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_kind(query_proto, kind): """Set the kind constraint for the given datastore.Query proto message."""
del query_proto.kind[:] query_proto.kind.add().name = kind
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_property_orders(query_proto, *orders): """Add ordering constraint for the given datastore.Query proto message. Args: query_proto: datastore.Query proto message. orders: list of propertype name string, default to ascending order and set descending if prefixed by '-'. Usage: """
for order in orders: proto = query_proto.order.add() if order[0] == '-': order = order[1:] proto.direction = query_pb2.PropertyOrder.DESCENDING else: proto.direction = query_pb2.PropertyOrder.ASCENDING proto.property.name = order
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_projection(query_proto, *projection): """Add projection properties to the given datatstore.Query proto message."""
for p in projection: proto = query_proto.projection.add() proto.property.name = p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_property_filter(filter_proto, name, op, value): """Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: """
filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_composite_filter(filter_proto, op, *filters): """Set composite filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message op: datastore.CompositeFilter.Operation filters: vararg list of datastore.Filter Returns: the same datastore.Filter. Usage: """
filter_proto.Clear() cf = filter_proto.composite_filter cf.op = op for f in filters: cf.filters.add().CopyFrom(f) return filter_proto
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def micros_to_timestamp(micros, timestamp): """Convert microseconds from utc epoch to google.protobuf.timestamp. Args: micros: a long, number of microseconds since utc epoch. timestamp: a google.protobuf.timestamp.Timestamp to populate. """
seconds = long(micros / _MICROS_PER_SECOND) micro_remainder = micros % _MICROS_PER_SECOND timestamp.seconds = seconds timestamp.nanos = micro_remainder * _NANOS_PER_MICRO
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_timestamp(dt, timestamp): """Convert datetime to google.protobuf.Timestamp. Args: dt: a timezone naive datetime. timestamp: a google.protobuf.Timestamp to populate. Raises: TypeError: if a timezone aware datetime was provided. """
if dt.tzinfo: # this is an "aware" datetime with an explicit timezone. Throw an error. raise TypeError('Cannot store a timezone aware datetime. ' 'Convert to UTC and store the naive datetime.') timestamp.seconds = calendar.timegm(dt.timetuple()) timestamp.nanos = dt.microsecond * _NANOS_PER_MICRO
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extract_params(self, kwargs, hyperparameters): """Extract init, fit and produce params from kwargs. The `init_params`, `fit_params` and `produce_params` are extracted from the passed `kwargs` taking the metadata hyperparameters as a reference. During this extraction, make sure that all the required hyperparameters have been given and that nothing unexpected exists in the input. Args: kwargs (dict): dict containing the Keyword arguments that have been passed to the `__init__` method upon initialization. hyperparameters (dict): hyperparameters dictionary, as found in the JSON annotation. Raises: TypeError: A `TypeError` is raised if a required argument is not found in the `kwargs` dict, or if an unexpected argument has been given. """
init_params = dict() fit_params = dict() produce_params = dict() for name, param in hyperparameters.get('fixed', dict()).items(): if name in kwargs: value = kwargs.pop(name) elif 'default' in param: value = param['default'] else: raise TypeError("{} required argument '{}' not found".format(self.name, name)) init_params[name] = value for name, param in hyperparameters.get('tunable', dict()).items(): if name in kwargs: init_params[name] = kwargs.pop(name) fit_args = [arg['name'] for arg in self.fit_args] produce_args = [arg['name'] for arg in self.produce_args] for name in list(kwargs.keys()): if name in fit_args: fit_params[name] = kwargs.pop(name) elif name in produce_args: produce_params[name] = kwargs.pop(name) if kwargs: error = "Unexpected hyperparameters '{}'".format(', '.join(kwargs.keys())) raise TypeError(error) return init_params, fit_params, produce_params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_hyperparameters(self, hyperparameters): """Set new hyperparameters. Only the specified hyperparameters are modified, so any other hyperparameter keeps the value that had been previously given. If necessary, a new instance of the primitive is created. Args: hyperparameters (dict): Dictionary containing as keys the name of the hyperparameters and as values the values to be used. """
self._hyperparameters.update(hyperparameters) if self._class: LOGGER.debug('Creating a new primitive instance for %s', self.name) self.instance = self.primitive(**self._hyperparameters)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, **kwargs): """Call the fit method of the primitive. The given keyword arguments will be passed directly to the `fit` method of the primitive instance specified in the JSON annotation. If any of the arguments expected by the produce method had been given during the MLBlock initialization, they will be passed as well. If the fit method was not specified in the JSON annotation, or if the primitive is a simple function, this will be a noop. Args: **kwargs: Any given keyword argument will be directly passed to the primitive fit method. Raises: TypeError: A `TypeError` might be raised if any argument not expected by the primitive fit method is given. """
if self.fit_method is not None: fit_args = self._fit_params.copy() fit_args.update(kwargs) getattr(self.instance, self.fit_method)(**fit_args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def produce(self, **kwargs): """Call the primitive function, or the predict method of the primitive. The given keyword arguments will be passed directly to the primitive, if it is a simple function, or to the `produce` method of the primitive instance specified in the JSON annotation, if it is a class. If any of the arguments expected by the fit method had been given during the MLBlock initialization, they will be passed as well. Returns: The output of the call to the primitive function or primitive produce method. """
produce_args = self._produce_params.copy() produce_args.update(kwargs) if self._class: return getattr(self.instance, self.produce_method)(**produce_args) produce_args.update(self._hyperparameters) return self.primitive(**produce_args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_hyperparameters(self): """Get the current hyperparamters of each block. Returns: dict: A dictionary containing the block names as keys and the current block hyperparameters dictionary as values. """
hyperparameters = {} for block_name, block in self.blocks.items(): hyperparameters[block_name] = block.get_hyperparameters() return hyperparameters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_hyperparameters(self, hyperparameters): """Set new hyperparameter values for some blocks. Args: hyperparameters (dict): A dictionary containing the block names as keys and the new hyperparameters dictionary as values. """
for block_name, block_hyperparams in hyperparameters.items(): self.blocks[block_name].set_hyperparameters(block_hyperparams)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, X=None, y=None, **kwargs): """Fit the blocks of this pipeline. Sequentially call the `fit` and the `produce` methods of each block, capturing the outputs each `produce` method before calling the `fit` method of the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `fit` and `produce` calls will be taken. Args: X: Fit Data, which the pipeline will learn from. y: Fit Data labels, which the pipeline will use to learn how to behave. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks. """
context = { 'X': X, 'y': y } context.update(kwargs) last_block_name = list(self.blocks.keys())[-1] for block_name, block in self.blocks.items(): LOGGER.debug("Fitting block %s", block_name) try: fit_args = self._get_block_args(block_name, block.fit_args, context) block.fit(**fit_args) except Exception: LOGGER.exception("Exception caught fitting MLBlock %s", block_name) raise if block_name != last_block_name: LOGGER.debug("Producing block %s", block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception("Exception caught producing MLBlock %s", block_name) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict(self, X=None, **kwargs): """Produce predictions using the blocks of this pipeline. Sequentially call the `produce` method of each block, capturing the outputs before calling the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `produce` calls will be taken. Args: X: Data which the pipeline will use to make predictions. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks. """
context = { 'X': X } context.update(kwargs) last_block_name = list(self.blocks.keys())[-1] for block_name, block in self.blocks.items(): LOGGER.debug("Producing block %s", block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) if block_name != last_block_name: output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception("Exception caught producing MLBlock %s", block_name) raise return outputs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """Return all the details of this MLPipeline in a dict. The dict structure contains all the `__init__` arguments of the MLPipeline, as well as the current hyperparameter values and the specification of the tunable_hyperparameters:: { "primitives": [ "a_primitive", "another_primitive" ], "init_params": { "a_primitive": { "an_argument": "a_value" } }, "hyperparameters": { "a_primitive#1": { "an_argument": "a_value", "another_argument": "another_value", }, "another_primitive#1": { "yet_another_argument": "yet_another_value" } }, "tunable_hyperparameters": { "another_primitive#1": { "yet_another_argument": { "type": "str", "default": "a_default_value", "values": [ "a_default_value", "yet_another_value" ] } } } } """
return { 'primitives': self.primitives, 'init_params': self.init_params, 'input_names': self.input_names, 'output_names': self.output_names, 'hyperparameters': self.get_hyperparameters(), 'tunable_hyperparameters': self._tunable_hyperparameters }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, path): """Save the specification of this MLPipeline in a JSON file. The content of the JSON file is the dict returned by the `to_dict` method. Args: path (str): Path to the JSON file to write. """
with open(path, 'w') as out_file: json.dump(self.to_dict(), out_file, indent=4)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_dict(cls, metadata): """Create a new MLPipeline from a dict specification. The dict structure is the same as the one created by the `to_dict` method. Args: metadata (dict): Dictionary containing the pipeline specification. Returns: MLPipeline: A new MLPipeline instance with the details found in the given specification dictionary. """
hyperparameters = metadata.get('hyperparameters') tunable = metadata.get('tunable_hyperparameters') pipeline = cls( metadata['primitives'], metadata.get('init_params'), metadata.get('input_names'), metadata.get('output_names'), ) if hyperparameters: pipeline.set_hyperparameters(hyperparameters) if tunable is not None: pipeline._tunable_hyperparameters = tunable return pipeline
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(cls, path): """Create a new MLPipeline from a JSON specification. The JSON file format is the same as the one created by the `to_dict` method. Args: path (str): Path of the JSON file to load. Returns: MLPipeline: A new MLPipeline instance with the specification found in the JSON file. """
with open(path, 'r') as in_file: metadata = json.load(in_file) return cls.from_dict(metadata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_primitives_path(path): """Add a new path to look for primitives. The new path will be inserted in the first place of the list, so any primitive found in this new folder will take precedence over any other primitive with the same name that existed in the system before. Args: path (str): path to add Raises: ValueError: A `ValueError` will be raised if the path is not valid. """
if path not in _PRIMITIVES_PATHS: if not os.path.isdir(path): raise ValueError('Invalid path: {}'.format(path)) LOGGER.debug('Adding new primitives path %s', path) _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_primitives_paths(): """Get the list of folders where the primitives will be looked for. This list will include the value of any `entry_point` named `jsons_path` published under the name `mlprimitives`. An example of such an entry point would be:: entry_points = { 'mlprimitives': [ 'jsons_path=some_module:SOME_VARIABLE' ] } where the module `some_module` contains a variable such as:: SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons') Returns: list: The list of folders. """
primitives_paths = list() entry_points = pkg_resources.iter_entry_points('mlprimitives') for entry_point in entry_points: if entry_point.name == 'jsons_path': path = entry_point.load() primitives_paths.append(path) return _PRIMITIVES_PATHS + primitives_paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_primitive(name): """Locate and load the JSON annotation of the given primitive. All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file with the given name, and as soon as a JSON with the given name is found it is returned. Args: name (str): name of the primitive to look for. The name should correspond to the primitive, not to the filename, as the `.json` extension will be added dynamically. Returns: dict: The content of the JSON annotation file loaded into a dict. Raises: ValueError: A `ValueError` will be raised if the primitive cannot be found. """
for base_path in get_primitives_paths(): parts = name.split('.') number_of_parts = len(parts) for folder_parts in range(number_of_parts): folder = os.path.join(base_path, *parts[:folder_parts]) filename = '.'.join(parts[folder_parts:]) + '.json' json_path = os.path.join(folder, filename) if os.path.isfile(json_path): with open(json_path, 'r') as json_file: LOGGER.debug('Loading primitive %s from %s', name, json_path) return json.load(json_file) raise ValueError("Unknown primitive: {}".format(name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_usps(): """USPs Digits Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 9298 224x224 RGB photos of handwritten digits, and the target is a 1d numpy integer array containing the label of the digit represented in the image. """
dataset_path = _load('usps') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.label.values return Dataset(load_usps.__doc__, X, y, accuracy_score, stratify=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_handgeometry(): """Hand Geometry Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 112 224x224 RGB photos of hands, and the target is a 1d numpy float array containing the width of the wrist in centimeters. """
dataset_path = _load('handgeometry') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.target.values return Dataset(load_handgeometry.__doc__, X, y, r2_score)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_personae(): """Personae Dataset. The data of this dataset is a 2d numpy array vector containing 145 entries that include texts written by Dutch users in Twitter, with some additional information about the author, and the target is a 1d numpy binary integer array indicating whether the author was extrovert or not. """
dataset_path = _load('personae') X = _load_csv(dataset_path, 'data') y = X.pop('label').values return Dataset(load_personae.__doc__, X, y, accuracy_score, stratify=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_umls(): """UMLs Dataset. The data consists of information about a 135 Graph and the relations between their nodes given as a DataFrame with three columns, source, target and type, indicating which nodes are related and with which type of link. The target is a 1d numpy binary integer array indicating whether the indicated link exists or not. """
dataset_path = _load('umls') X = _load_csv(dataset_path, 'data') y = X.pop('label').values graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml'))) return Dataset(load_umls.__doc__, X, y, accuracy_score, stratify=True, graph=graph)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_dic28(): """DIC28 Dataset from Pajek. This network represents connections among English words in a dictionary. It was generated from Knuth's dictionary. Two words are connected by an edge if we can reach one from the other by - changing a single character (e. g., work - word) - adding / removing a single character (e. g., ever - fever). There exist 52,652 words (vertices in a network) having 2 up to 8 characters in the dictionary. The obtained network has 89038 edges. """
dataset_path = _load('dic28') X = _load_csv(dataset_path, 'data') y = X.pop('label').values graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph1.gml'))) graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph2.gml'))) graph = graph1.copy() graph.add_nodes_from(graph2.nodes(data=True)) graph.add_edges_from(graph2.edges) graph.add_edges_from(X[['graph1', 'graph2']].values) graphs = { 'graph1': graph1, 'graph2': graph2, } return Dataset(load_dic28.__doc__, X, y, accuracy_score, stratify=True, graph=graph, graphs=graphs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_amazon(): """Amazon product co-purchasing network and ground-truth communities. Network was collected by crawling Amazon website. It is based on Customers Who Bought This Item Also Bought feature of the Amazon website. If a product i is frequently co-purchased with product j, the graph contains an undirected edge from i to j. Each product category provided by Amazon defines each ground-truth community. """
dataset_path = _load('amazon') X = _load_csv(dataset_path, 'data') y = X.pop('label').values graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml'))) return Dataset(load_amazon.__doc__, X, y, normalized_mutual_info_score, graph=graph)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_jester(): """Ratings from the Jester Online Joke Recommender System. This dataset consists of over 1.7 million instances of (user_id, item_id, rating) triples, which is split 50-50 into train and test data. source: "University of California Berkeley, CA" sourceURI: "http://eigentaste.berkeley.edu/dataset/" """
dataset_path = _load('jester') X = _load_csv(dataset_path, 'data') y = X.pop('rating').values return Dataset(load_jester.__doc__, X, y, r2_score)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_wikiqa(): """A Challenge Dataset for Open-Domain Question Answering. WikiQA dataset is a publicly available set of question and sentence (QS) pairs, collected and annotated for research on open-domain question answering. source: "Microsoft" sourceURI: "https://www.microsoft.com/en-us/research/publication/wikiqa-a-challenge-dataset-for-open-domain-question-answering/#" """
# noqa dataset_path = _load('wikiqa') data = _load_csv(dataset_path, 'data', set_index=True) questions = _load_csv(dataset_path, 'questions', set_index=True) sentences = _load_csv(dataset_path, 'sentences', set_index=True) vocabulary = _load_csv(dataset_path, 'vocabulary', set_index=True) entities = { 'data': (data, 'd3mIndex', None), 'questions': (questions, 'qIndex', None), 'sentences': (sentences, 'sIndex', None), 'vocabulary': (vocabulary, 'index', None) } relationships = [ ('questions', 'qIndex', 'data', 'qIndex'), ('sentences', 'sIndex', 'data', 'sIndex') ] target = data.pop('isAnswer').values return Dataset(load_wikiqa.__doc__, data, target, accuracy_score, startify=True, entities=entities, relationships=relationships)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_newsgroups(): """20 News Groups Dataset. The data of this dataset is a 1d numpy array vector containing the texts from 11314 newsgroups posts, and the target is a 1d numpy integer array containing the label of one of the 20 topics that they are about. """
dataset = datasets.fetch_20newsgroups() return Dataset(load_newsgroups.__doc__, np.array(dataset.data), dataset.target, accuracy_score, stratify=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_iris(): """Iris Dataset."""
dataset = datasets.load_iris() return Dataset(load_iris.__doc__, dataset.data, dataset.target, accuracy_score, stratify=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_boston(): """Boston House Prices Dataset."""
dataset = datasets.load_boston() return Dataset(load_boston.__doc__, dataset.data, dataset.target, r2_score)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_splits(self, n_splits=1): """Return splits of this dataset ready for Cross Validation. If n_splits is 1, a tuple containing the X for train and test and the y for train and test is returned. Otherwise, if n_splits is bigger than 1, a list of such tuples is returned, one for each split. Args: n_splits (int): Number of times that the data needs to be splitted. Returns: tuple or list: if n_splits is 1, a tuple containing the X for train and test and the y for train and test is returned. Otherwise, if n_splits is bigger than 1, a list of such tuples is returned, one for each split. """
if n_splits == 1: stratify = self.target if self._stratify else None return train_test_split( self.data, self.target, shuffle=self._shuffle, stratify=stratify ) else: cv_class = StratifiedKFold if self._stratify else KFold cv = cv_class(n_splits=n_splits, shuffle=self._shuffle) splits = list() for train, test in cv.split(self.data, self.target): X_train = self._get_split(self.data, train) y_train = self._get_split(self.target, train) X_test = self._get_split(self.data, test) y_test = self._get_split(self.target, test) splits.append((X_train, X_test, y_train, y_test)) return splits
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(key, value): """Adds context to the currently executing request. :key: Any String identifying the request context. Example: "user_ip", "plan", "alert_count" :value: Any json-serializable type. Example: "1.1.1.1", "free", 100 :returns: nothing. """
tr = TrackedRequest.instance() tr.tag(key, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def libc(cls): """ Alpine linux uses a non glibc version of the standard library, it uses the stripped down musl instead. The core agent can be built against it, but which one is running must be detected. Shelling out to `ldd` appears to be the most reliable way to do this. """
try: output = subprocess.check_output( ["ldd", "--version"], stderr=subprocess.STDOUT ) except (OSError, subprocess.CalledProcessError): return "gnu" else: if b"musl" in output: return "musl" else: return "gnu"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Called by the threading system """
try: self._connect() self._register() while True: try: body = self.command_queue.get(block=True, timeout=1 * SECOND) except queue.Empty: body = None if body is not None: result = self._send(body) if result: self.command_queue.task_done() else: # Something was wrong with the socket. self._disconnect() self._connect() self._register() # Check for stop event after a read from the queue. This is to # allow you to open a socket, immediately send to it, and then # stop it. We do this in the Metadata send at application start # time if self._stop_event.is_set(): logger.debug("CoreAgentSocket thread stopping.") break except Exception: logger.debug("CoreAgentSocket thread exception.") finally: self._started_event.clear() self._stop_event.clear() self._stopped_event.set() logger.debug("CoreAgentSocket thread stopped.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(cls, **kwargs): """ Sets a configuration value for the Scout agent. Values set here will not override values set in ENV. """
global SCOUT_PYTHON_VALUES for key, value in kwargs.items(): SCOUT_PYTHON_VALUES[key] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def text(value, encoding="utf-8", errors="strict"): """Convert a value to str on Python 3 and unicode on Python 2."""
if isinstance(value, text_type): return value elif isinstance(value, bytes): return text_type(value, encoding, errors) else: return text_type(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install(): """ Installs ScoutApm SQL Instrumentation by monkeypatching the `cursor` method of BaseDatabaseWrapper, to return a wrapper that instruments any calls going through it. """
@monkeypatch_method(BaseDatabaseWrapper) def cursor(original, self, *args, **kwargs): result = original(*args, **kwargs) return _DetailedTracingCursorWrapper(result, self) logger.debug("Monkey patched SQL")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dispatch_request(self): """Modified version of Flask.dispatch_request to call process_view."""
req = _request_ctx_stack.top.request app = current_app # Return flask's default options response. See issue #40 if req.method == "OPTIONS": return app.make_default_options_response() if req.routing_exception is not None: app.raise_routing_exception(req) # The routing rule has some handy attributes to extract how Flask found # this endpoint rule = req.url_rule # Wrap the real view_func view_func = self.wrap_view_func( app, rule, req, app.view_functions[rule.endpoint], req.view_args ) return view_func(**req.view_args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wrap_view_func(self, app, rule, req, view_func, view_kwargs): """ This method is called just before the flask view is called. This is done by the dispatch_request method. """
operation = view_func.__module__ + "." + view_func.__name__ return self.trace_view_function( view_func, ("Controller", {"path": req.path, "name": operation}) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_view(self, request, view_func, view_args, view_kwargs): """ Capture details about the view_func that is about to execute """
try: if ignore_path(request.path): TrackedRequest.instance().tag("ignore_transaction", True) view_name = request.resolver_match._func_path span = TrackedRequest.instance().current_span() if span is not None: span.operation = "Controller/" + view_name Context.add("path", request.path) Context.add("user_ip", RemoteIp.lookup_from_headers(request.META)) if getattr(request, "user", None) is not None: Context.add("username", request.user.get_username()) except Exception: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(hypervisor, identifier, configuration): """Creates a virtual network according to the given configuration. @param hypervisor: (libvirt.virConnect) connection to libvirt hypervisor. @param identifier: (str) UUID for the virtual network. @param configuration: (dict) network configuration. @return: (libvirt.virNetwork) virtual network. """
counter = count() xml_config = DEFAULT_NETWORK_XML if not {'configuration', 'dynamic_address'} & set(configuration.keys()): raise RuntimeError( "Either configuration or dynamic_address must be specified") if 'configuration' in configuration: with open(configuration['configuration']) as xml_file: xml_config = xml_file.read() while True: if 'dynamic_address' in configuration: address = generate_address(hypervisor, configuration['dynamic_address']) xml_string = network_xml(identifier, xml_config, address=address) else: xml_string = network_xml(identifier, xml_config) try: return hypervisor.networkCreateXML(xml_string) except libvirt.libvirtError as error: if next(counter) > MAX_ATTEMPTS: raise RuntimeError( "Exceeded failed attempts ({}) to get IP address.".format( MAX_ATTEMPTS), "Last error: {}".format(error))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookup(domain): """Find the virNetwork object associated to the domain. If the domain has more than one network interface, the first one is returned. None is returned if the domain is not attached to any network. """
xml = domain.XMLDesc(0) element = etree.fromstring(xml) subelm = element.find('.//interface[@type="network"]') if subelm is not None: network = subelm.find('.//source').get('network') hypervisor = domain.connect() return hypervisor.networkLookupByName(network) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(network): """libvirt network cleanup. @raise: libvirt.libvirtError. """
try: network.destroy() except libvirt.libvirtError as error: raise RuntimeError("Unable to destroy network: {}".format(error))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_address(network, address): """Sets the given address to the network XML element. Libvirt bridge will have address and DHCP server configured according to the subnet prefix length. """
if network.find('.//ip') is not None: raise RuntimeError("Address already specified in XML configuration.") netmask = str(address.netmask) ipv4 = str(address[1]) dhcp_start = str(address[2]) dhcp_end = str(address[-2]) ip = etree.SubElement(network, 'ip', address=ipv4, netmask=netmask) dhcp = etree.SubElement(ip, 'dhcp') etree.SubElement(dhcp, 'range', start=dhcp_start, end=dhcp_end)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_address(hypervisor, configuration): """Generate a valid IP address according to the configuration."""
ipv4 = configuration['ipv4'] prefix = configuration['prefix'] subnet_prefix = configuration['subnet_prefix'] subnet_address = ipaddress.IPv4Network(u'/'.join((str(ipv4), str(prefix)))) net_address_pool = subnet_address.subnets(new_prefix=subnet_prefix) return address_lookup(hypervisor, net_address_pool)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def address_lookup(hypervisor, address_pool): """Retrieves a valid and available network IP address."""
address_pool = set(address_pool) active_addresses = set(active_network_addresses(hypervisor)) try: return random.choice(tuple(address_pool - active_addresses)) except IndexError: raise RuntimeError("All IP addresses are in use")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def active_network_addresses(hypervisor): """Query libvirt for the already reserved addresses."""
active = [] for network in hypervisor.listNetworks(): try: xml = hypervisor.networkLookupByName(network).XMLDesc(0) except libvirt.libvirtError: # network has been destroyed meanwhile continue else: ip_element = etree.fromstring(xml).find('.//ip') address = ip_element.get('address') netmask = ip_element.get('netmask') active.append(ipaddress.IPv4Network(u'/'.join((address, netmask)), strict=False)) return active
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interface_lookup(interfaces, hwaddr, address_type): """Search the address within the interface list."""
for interface in interfaces.values(): if interface.get('hwaddr') == hwaddr: for address in interface.get('addrs'): if address.get('type') == address_type: return address.get('addr')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mac_address(self): """Returns the MAC address of the network interface. If multiple interfaces are provided, the address of the first found is returned. """
if self._mac_address is None: self._mac_address = self._get_mac_address() return self._mac_address
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ip4_address(self): """Returns the IPv4 address of the network interface. If multiple interfaces are provided, the address of the first found is returned. """
if self._ip4_address is None and self.network is not None: self._ip4_address = self._get_ip_address( libvirt.VIR_IP_ADDR_TYPE_IPV4) return self._ip4_address
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ip6_address(self): """Returns the IPv6 address of the network interface. If multiple interfaces are provided, the address of the first found is returned. """
if self._ip6_address is None and self.network is not None: self._ip6_address = self._get_ip_address( libvirt.VIR_IP_ADDR_TYPE_IPV6) return self._ip6_address
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shutdown(self, timeout=None, **kwargs): """ Shuts down the Context. Sends an ACPI request to the OS for a clean shutdown. Triggered events:: * pre_poweroff * post_poweroff .. note:: The Guest OS needs to support ACPI requests sent from the host, the completion of the operation is not ensured by the platform. If the Guest OS is still running after the given timeout, a RuntimeError will be raised. @param timeout: (int) amout of seconds to wait for the machine shutdown. @param kwargs: keyword arguments to pass altogether with the events. """
self._assert_transition('shutdown') self.trigger('pre_shutdown', **kwargs) self._execute_command(self.domain.shutdown) self._wait_for_shutdown(timeout) self.trigger('post_shutdown', **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _command(self, event, command, *args, **kwargs): """ Context state controller. Check whether the transition is possible or not, it executes it and triggers the Hooks with the pre_* and post_* events. @param event: (str) event generated by the command. @param command: (virDomain.method) state transition to impose. @raise: RuntimeError. """
self._assert_transition(event) self.trigger('pre_%s' % event, **kwargs) self._execute_command(command, *args) self.trigger('post_%s' % event, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _assert_transition(self, event): """Asserts the state transition validity."""
state = self.domain.state()[0] if event not in STATES_MAP[state]: raise RuntimeError("State transition %s not allowed" % event)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _execute_command(self, command, *args): """Execute the state transition command."""
try: command(*args) except libvirt.libvirtError as error: raise RuntimeError("Unable to execute command. %s" % error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def snapshot_to_checkpoint(volume, snapshot, folder_path): """Turns a QEMU internal snapshot into a QCOW file."""
create_folder(folder_path) name = snapshot.getName() path = os.path.join(folder_path, '%s.qcow2' % name) process = launch_process(QEMU_IMG, "convert", "-f", "qcow2", "-o", "backing_file=%s" % volume_backing_path(volume), "-O", "qcow2", "-s", name, volume_path(volume), path) collect_process_output(process) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compare_disks(disk0, disk1, configuration): """Compares two disks according to the given configuration."""
with DiskComparator(disk0, disk1) as comparator: results = comparator.compare( size=configuration.get('get_file_size', False), identify=configuration.get('identify_files', False), concurrent=configuration.get('use_concurrency', False)) if configuration.get('extract_files', False): extract = results['created_files'] + results['modified_files'] files = comparator.extract(1, extract, path=configuration['results_folder']) results.update(files) if configuration.get('compare_registries', False): results['registry'] = comparator.compare_registry( concurrent=configuration.get('use_concurrency', False)) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_processing_handler(self, event): """Asynchronous handler starting the disk analysis process."""
results_path = os.path.join(self.configuration['results_folder'], "filesystem.json") self.logger.debug("Event %s: start comparing %s with %s.", event, self.checkpoints[0], self.checkpoints[1]) results = compare_disks(self.checkpoints[0], self.checkpoints[1], self.configuration) with open(results_path, 'w') as results_file: json.dump(results, results_file) self.processing_done.set()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookup_class(fully_qualified_name): """ Given its fully qualified name, finds the desired class and imports it. Returns the Class object if found. """
module_name, class_name = str(fully_qualified_name).rsplit(".", 1) module = __import__(module_name, globals(), locals(), [class_name], 0) Class = getattr(module, class_name) if not inspect.isclass(Class): raise TypeError( "%s is not of type class: %s" % (class_name, type(Class))) return Class
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def prime_event(event, source, **kwargs): """ Returns the event ready to be triggered. If the given event is a string an Event instance is generated from it. """
if not isinstance(event, Event): event = Event(event, source=source, **kwargs) return event
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def asynchronous(function, event): """ Runs the function asynchronously taking care of exceptions. """
thread = Thread(target=synchronous, args=(function, event)) thread.daemon = True thread.start()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def synchronous(function, event): """ Runs the function synchronously taking care of exceptions. """
try: function(event) except Exception as error: logger = get_function_logger(function) logger.exception(error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscribe(self, event, handler): """ Subscribes a Handler for the given Event. @param event: (str|see.Event) event to react to. @param handler: (callable) function or method to subscribe. """
self._handlers.sync_handlers[event].append(handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscribe_async(self, event, handler): """ Subscribes an asynchronous Handler for the given Event. An asynchronous handler is executed concurrently to the others without blocking the Events flow. @param event: (str|see.Event) event to react to. @param handler: (callable) function or method to subscribe. """
self._handlers.async_handlers[event].append(handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unsubscribe(self, event, handler): """ Unsubscribes the Handler from the given Event. Both synchronous and asynchronous handlers are removed. @param event: (str|see.Event) event to which the handler is subscribed. @param handler: (callable) function or method to unsubscribe. """
try: self._handlers.sync_handlers[event].remove(handler) except ValueError: self._handlers.async_handlers[event].remove(handler) else: try: self._handlers.async_handlers[event].remove(handler) except ValueError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trigger(self, event, **kwargs): """ Triggers an event. All subscribed handlers will be executed, asynchronous ones won't block this call. @param event: (str|see.Event) event intended to be raised. """
with self._handlers.trigger_mutex: event = prime_event(event, self.__class__.__name__, **kwargs) for handler in self._handlers.async_handlers[event]: asynchronous(handler, event) for handler in self._handlers.sync_handlers[event]: synchronous(handler, event)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def provider_image(self): """Image path getter. This method uses a pluggable image provider to retrieve an image's path. """
if self._image is None: if isinstance(self.configuration['disk']['image'], dict): ProviderClass = lookup_provider_class( self.configuration['disk']['image']['provider']) self._image = ProviderClass( self.configuration['disk']['image']).image else: # If image is not a dictionary, return it as is for backwards # compatibility self._image = self.configuration['disk']['image'] return self._image
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_command(args, asynchronous=False): """Executes a command returning its exit code and output."""
logging.info("Executing %s command %s.", asynchronous and 'asynchronous' or 'synchronous', args) process = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: timeout = asynchronous and 1 or None output = process.communicate(timeout=timeout)[0].decode('utf8') except subprocess.TimeoutExpired: pass if asynchronous: return PopenOutput(None, 'Asynchronous call.') else: return PopenOutput(process.returncode, output)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def respond(self, output): """Generates server response."""
response = {'exit_code': output.code, 'command_output': output.log} self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(bytes(json.dumps(response), "utf8"))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def store_file(self, folder, name): """Stores the uploaded file in the given path."""
path = os.path.join(folder, name) length = self.headers['content-length'] with open(path, 'wb') as sample: sample.write(self.rfile.read(int(length))) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_processing_handler(self, event): """Asynchronous handler starting the Volatility processes."""
self.logger.debug("Event %s: starting Volatility process(es).", event) for snapshot in self.snapshots: self.process_snapshot(snapshot) self.processing_done.set()