code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def get_logger(name=None): """ Get virtualchain's logger """ level = logging.CRITICAL if DEBUG: logging.disable(logging.NOTSET) level = logging.DEBUG if name is None: name = "<unknown>" log = logging.getLogger(name=name) log.setLevel( level ) console = logging.StreamHandler() console.setLevel( level ) log_format = ('[%(asctime)s] [%(levelname)s] [%(module)s:%(lineno)d] (' + str(os.getpid()) + '.%(thread)d) %(message)s' if DEBUG else '%(message)s') formatter = logging.Formatter( log_format ) console.setFormatter(formatter) log.propagate = False if len(log.handlers) > 0: for i in xrange(0, len(log.handlers)): log.handlers.pop(0) log.addHandler(console) return log
Get virtualchain's logger
Below is the the instruction that describes the task: ### Input: Get virtualchain's logger ### Response: def get_logger(name=None): """ Get virtualchain's logger """ level = logging.CRITICAL if DEBUG: logging.disable(logging.NOTSET) level = logging.DEBUG if name is None: name = "<unknown>" log = logging.getLogger(name=name) log.setLevel( level ) console = logging.StreamHandler() console.setLevel( level ) log_format = ('[%(asctime)s] [%(levelname)s] [%(module)s:%(lineno)d] (' + str(os.getpid()) + '.%(thread)d) %(message)s' if DEBUG else '%(message)s') formatter = logging.Formatter( log_format ) console.setFormatter(formatter) log.propagate = False if len(log.handlers) > 0: for i in xrange(0, len(log.handlers)): log.handlers.pop(0) log.addHandler(console) return log
def wait_condition_spec(self): """Spec for a wait_condition block""" from harpoon.option_spec import image_objs formatted_string = formatted(string_spec(), formatter=MergedOptionStringFormatter) return create_spec(image_objs.WaitCondition , harpoon = formatted(overridden("{harpoon}"), formatter=MergedOptionStringFormatter) , timeout = defaulted(integer_spec(), 300) , wait_between_attempts = defaulted(float_spec(), 5) , greps = optional_spec(dictof(formatted_string, formatted_string)) , command = optional_spec(listof(formatted_string)) , port_open = optional_spec(listof(integer_spec())) , file_value = optional_spec(dictof(formatted_string, formatted_string)) , curl_result = optional_spec(dictof(formatted_string, formatted_string)) , file_exists = optional_spec(listof(formatted_string)) )
Spec for a wait_condition block
Below is the the instruction that describes the task: ### Input: Spec for a wait_condition block ### Response: def wait_condition_spec(self): """Spec for a wait_condition block""" from harpoon.option_spec import image_objs formatted_string = formatted(string_spec(), formatter=MergedOptionStringFormatter) return create_spec(image_objs.WaitCondition , harpoon = formatted(overridden("{harpoon}"), formatter=MergedOptionStringFormatter) , timeout = defaulted(integer_spec(), 300) , wait_between_attempts = defaulted(float_spec(), 5) , greps = optional_spec(dictof(formatted_string, formatted_string)) , command = optional_spec(listof(formatted_string)) , port_open = optional_spec(listof(integer_spec())) , file_value = optional_spec(dictof(formatted_string, formatted_string)) , curl_result = optional_spec(dictof(formatted_string, formatted_string)) , file_exists = optional_spec(listof(formatted_string)) )
def set_remote_config(experiment_config, port, config_file_name): '''Call setClusterMetadata to pass trial''' #set machine_list request_data = dict() request_data['machine_list'] = experiment_config['machineList'] if request_data['machine_list']: for i in range(len(request_data['machine_list'])): if isinstance(request_data['machine_list'][i].get('gpuIndices'), int): request_data['machine_list'][i]['gpuIndices'] = str(request_data['machine_list'][i].get('gpuIndices')) response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT) err_message = '' if not response or not check_response(response): if response is not None: err_message = response.text _, stderr_full_path = get_log_path(config_file_name) with open(stderr_full_path, 'a+') as fout: fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) return False, err_message result, message = setNNIManagerIp(experiment_config, port, config_file_name) if not result: return result, message #set trial_config return set_trial_config(experiment_config, port, config_file_name), err_message
Call setClusterMetadata to pass trial
Below is the the instruction that describes the task: ### Input: Call setClusterMetadata to pass trial ### Response: def set_remote_config(experiment_config, port, config_file_name): '''Call setClusterMetadata to pass trial''' #set machine_list request_data = dict() request_data['machine_list'] = experiment_config['machineList'] if request_data['machine_list']: for i in range(len(request_data['machine_list'])): if isinstance(request_data['machine_list'][i].get('gpuIndices'), int): request_data['machine_list'][i]['gpuIndices'] = str(request_data['machine_list'][i].get('gpuIndices')) response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT) err_message = '' if not response or not check_response(response): if response is not None: err_message = response.text _, stderr_full_path = get_log_path(config_file_name) with open(stderr_full_path, 'a+') as fout: fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(',', ':'))) return False, err_message result, message = setNNIManagerIp(experiment_config, port, config_file_name) if not result: return result, message #set trial_config return set_trial_config(experiment_config, port, config_file_name), err_message
def key_values_cache_key_name(cls, *key_fields): """ Key for fetching unique key values from the cache """ key_fields = key_fields or cls.KEY_FIELDS return 'configuration/{}/key_values/{}'.format(cls.__name__, ','.join(key_fields))
Key for fetching unique key values from the cache
Below is the the instruction that describes the task: ### Input: Key for fetching unique key values from the cache ### Response: def key_values_cache_key_name(cls, *key_fields): """ Key for fetching unique key values from the cache """ key_fields = key_fields or cls.KEY_FIELDS return 'configuration/{}/key_values/{}'.format(cls.__name__, ','.join(key_fields))
def get_productivity_stats(self): """Return the user's productivity stats. :return: A JSON-encoded representation of the user's productivity stats. :rtype: A JSON-encoded object. >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> stats = user.get_productivity_stats() >>> print(stats) {"karma_last_update": 50.0, "karma_trend": "up", ... } """ response = API.get_productivity_stats(self.api_token) _fail_if_contains_errors(response) return response.json()
Return the user's productivity stats. :return: A JSON-encoded representation of the user's productivity stats. :rtype: A JSON-encoded object. >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> stats = user.get_productivity_stats() >>> print(stats) {"karma_last_update": 50.0, "karma_trend": "up", ... }
Below is the the instruction that describes the task: ### Input: Return the user's productivity stats. :return: A JSON-encoded representation of the user's productivity stats. :rtype: A JSON-encoded object. >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> stats = user.get_productivity_stats() >>> print(stats) {"karma_last_update": 50.0, "karma_trend": "up", ... } ### Response: def get_productivity_stats(self): """Return the user's productivity stats. :return: A JSON-encoded representation of the user's productivity stats. :rtype: A JSON-encoded object. >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> stats = user.get_productivity_stats() >>> print(stats) {"karma_last_update": 50.0, "karma_trend": "up", ... } """ response = API.get_productivity_stats(self.api_token) _fail_if_contains_errors(response) return response.json()
def execute(self, input_data): ''' Execute method ''' # Spin up the rekall adapter adapter = RekallAdapter() adapter.set_plugin_name(self.plugin_name) rekall_output = adapter.execute(input_data) # Process the output data for line in rekall_output: if line['type'] == 'm': # Meta self.output['meta'] = line['data'] elif line['type'] == 's': # New Session (Table) if line['data']['name']: self.current_table_name = str(line['data']['name'][1].v()) elif line['type'] == 't': # New Table Headers (column names) self.column_map = {item['cname']: item['name'] if 'name' in item else item['cname'] for item in line['data']} elif line['type'] == 'r': # Row # Add the row to our current table row = RekallAdapter.process_row(line['data'], self.column_map) self.output['tables'][self.current_table_name].append(row) # Process Base entries if 'Base' in row: base_info = self.parse_base(row) row.update(base_info) else: print 'Got unknown line %s: %s' % (line['type'], line['data']) # All done return self.output
Execute method
Below is the the instruction that describes the task: ### Input: Execute method ### Response: def execute(self, input_data): ''' Execute method ''' # Spin up the rekall adapter adapter = RekallAdapter() adapter.set_plugin_name(self.plugin_name) rekall_output = adapter.execute(input_data) # Process the output data for line in rekall_output: if line['type'] == 'm': # Meta self.output['meta'] = line['data'] elif line['type'] == 's': # New Session (Table) if line['data']['name']: self.current_table_name = str(line['data']['name'][1].v()) elif line['type'] == 't': # New Table Headers (column names) self.column_map = {item['cname']: item['name'] if 'name' in item else item['cname'] for item in line['data']} elif line['type'] == 'r': # Row # Add the row to our current table row = RekallAdapter.process_row(line['data'], self.column_map) self.output['tables'][self.current_table_name].append(row) # Process Base entries if 'Base' in row: base_info = self.parse_base(row) row.update(base_info) else: print 'Got unknown line %s: %s' % (line['type'], line['data']) # All done return self.output
def p_propertyDeclaration_7(p): """propertyDeclaration_7 : qualifierList dataType propertyName array ';'""" quals = OrderedDict([(x.name, x) for x in p[1]]) p[0] = CIMProperty(p[3], None, type=p[2], qualifiers=quals, is_array=True, array_size=p[4])
propertyDeclaration_7 : qualifierList dataType propertyName array ';
Below is the the instruction that describes the task: ### Input: propertyDeclaration_7 : qualifierList dataType propertyName array '; ### Response: def p_propertyDeclaration_7(p): """propertyDeclaration_7 : qualifierList dataType propertyName array ';'""" quals = OrderedDict([(x.name, x) for x in p[1]]) p[0] = CIMProperty(p[3], None, type=p[2], qualifiers=quals, is_array=True, array_size=p[4])
def _contains(self, hit): '''Returns True iff (the query contig is contained in the reference contig and the query contig is not flagged to be kept)''' return ( hit.qry_name not in self.contigs_to_keep and hit.qry_name != hit.ref_name and (100 * hit.hit_length_qry / hit.qry_length >= self.min_contig_percent_match) and hit.percent_identity >= self.nucmer_min_id )
Returns True iff (the query contig is contained in the reference contig and the query contig is not flagged to be kept)
Below is the the instruction that describes the task: ### Input: Returns True iff (the query contig is contained in the reference contig and the query contig is not flagged to be kept) ### Response: def _contains(self, hit): '''Returns True iff (the query contig is contained in the reference contig and the query contig is not flagged to be kept)''' return ( hit.qry_name not in self.contigs_to_keep and hit.qry_name != hit.ref_name and (100 * hit.hit_length_qry / hit.qry_length >= self.min_contig_percent_match) and hit.percent_identity >= self.nucmer_min_id )
def hasApplication(self, app): """Returns True if app is among the loaded modules. @param app: Module name. @return: Boolean """ if self._applications is None: self._initApplicationList() return app in self._applications
Returns True if app is among the loaded modules. @param app: Module name. @return: Boolean
Below is the the instruction that describes the task: ### Input: Returns True if app is among the loaded modules. @param app: Module name. @return: Boolean ### Response: def hasApplication(self, app): """Returns True if app is among the loaded modules. @param app: Module name. @return: Boolean """ if self._applications is None: self._initApplicationList() return app in self._applications
def irip(ip, rc=None, r=None, fl=None, fs=None, ot=None, coe=None, moc=DEFAULT_ITER_MAXOBJECTCOUNT): # pylint: disable=too-many-arguments, redefined-outer-name, invalid-name """ *New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterReferenceInstancePaths`. Retrieve the instance paths of the association instances that reference a source instance, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`: A generator object that iterates the resulting CIM instance paths. These instance paths have their host and namespace components set. """ return CONN.IterReferenceInstancePaths(ip, ResultClass=rc, Role=r, FilterQueryLanguage=fl, FilterQuery=fs, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
*New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterReferenceInstancePaths`. Retrieve the instance paths of the association instances that reference a source instance, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`: A generator object that iterates the resulting CIM instance paths. These instance paths have their host and namespace components set.
Below is the the instruction that describes the task: ### Input: *New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterReferenceInstancePaths`. Retrieve the instance paths of the association instances that reference a source instance, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`: A generator object that iterates the resulting CIM instance paths. These instance paths have their host and namespace components set. ### Response: def irip(ip, rc=None, r=None, fl=None, fs=None, ot=None, coe=None, moc=DEFAULT_ITER_MAXOBJECTCOUNT): # pylint: disable=too-many-arguments, redefined-outer-name, invalid-name """ *New in pywbem 0.10 as experimental and finalized in 0.12.* This function is a wrapper for :meth:`~pywbem.WBEMConnection.IterReferenceInstancePaths`. Retrieve the instance paths of the association instances that reference a source instance, using the corresponding pull operations if supported by the WBEM server or otherwise the corresponding traditional operation, and using the Python :term:`py:generator` idiom to return the result. This method is an alternative to using the pull operations directly, that frees the user of having to know whether the WBEM server supports pull operations. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of instances the WBEM server may return for each of the open and pull requests issued during the iterations over the returned generator object. Zero and `None` are not allowed. Returns: :term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`: A generator object that iterates the resulting CIM instance paths. These instance paths have their host and namespace components set. """ return CONN.IterReferenceInstancePaths(ip, ResultClass=rc, Role=r, FilterQueryLanguage=fl, FilterQuery=fs, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
def tag_builder(parser, token, cls, flow_type): """Helper function handling flow form tags.""" tokens = token.split_contents() tokens_num = len(tokens) if tokens_num == 1 or (tokens_num == 3 and tokens[1] == 'for'): flow_name = None if tokens_num == 3: flow_name = tokens[2] return cls(flow_name) else: raise template.TemplateSyntaxError( '"sitegate_%(type)s_form" tag requires zero or two arguments. ' 'E.g. {%% sitegate_%(type)s_form %%} or ' '{%% sitegate_%(type)s_form for ClassicSignup %%}.' % {'type': flow_type})
Helper function handling flow form tags.
Below is the the instruction that describes the task: ### Input: Helper function handling flow form tags. ### Response: def tag_builder(parser, token, cls, flow_type): """Helper function handling flow form tags.""" tokens = token.split_contents() tokens_num = len(tokens) if tokens_num == 1 or (tokens_num == 3 and tokens[1] == 'for'): flow_name = None if tokens_num == 3: flow_name = tokens[2] return cls(flow_name) else: raise template.TemplateSyntaxError( '"sitegate_%(type)s_form" tag requires zero or two arguments. ' 'E.g. {%% sitegate_%(type)s_form %%} or ' '{%% sitegate_%(type)s_form for ClassicSignup %%}.' % {'type': flow_type})
def run_iterations(cls, the_callable, iterations=1, label=None, schedule='* * * * * *', userdata = None, run_immediately=False, delay_until=None): """Class method to run a callable with a specified number of iterations""" task = task_with_callable(the_callable, label=label, schedule=schedule, userdata=userdata) task.iterations = iterations if delay_until is not None: if isinstance(delay_until, datetime): if delay_until > timezone.now(): task.start_running = delay_until else: raise ValueError("Task cannot start running in the past") else: raise ValueError("delay_until must be a datetime.datetime instance") if run_immediately: task.next_run = timezone.now() else: task.calc_next_run() task.save()
Class method to run a callable with a specified number of iterations
Below is the the instruction that describes the task: ### Input: Class method to run a callable with a specified number of iterations ### Response: def run_iterations(cls, the_callable, iterations=1, label=None, schedule='* * * * * *', userdata = None, run_immediately=False, delay_until=None): """Class method to run a callable with a specified number of iterations""" task = task_with_callable(the_callable, label=label, schedule=schedule, userdata=userdata) task.iterations = iterations if delay_until is not None: if isinstance(delay_until, datetime): if delay_until > timezone.now(): task.start_running = delay_until else: raise ValueError("Task cannot start running in the past") else: raise ValueError("delay_until must be a datetime.datetime instance") if run_immediately: task.next_run = timezone.now() else: task.calc_next_run() task.save()
def begin_date(self, value): """ A datetime.datetime object of when the certificate becomes valid. """ if not isinstance(value, datetime): raise TypeError(_pretty_message( ''' begin_date must be an instance of datetime.datetime, not %s ''', _type_name(value) )) self._begin_date = value
A datetime.datetime object of when the certificate becomes valid.
Below is the the instruction that describes the task: ### Input: A datetime.datetime object of when the certificate becomes valid. ### Response: def begin_date(self, value): """ A datetime.datetime object of when the certificate becomes valid. """ if not isinstance(value, datetime): raise TypeError(_pretty_message( ''' begin_date must be an instance of datetime.datetime, not %s ''', _type_name(value) )) self._begin_date = value
def pickle_load(name, extension='.pkl'): """Load data with pickle. Parameters ---------- name: str Path to save to (includes dir, excludes extension). extension: str, optional File extension. Returns ------- Contents of file path. """ filename = name + extension infile = open(filename, 'rb') data = pickle.load(infile) infile.close() return data
Load data with pickle. Parameters ---------- name: str Path to save to (includes dir, excludes extension). extension: str, optional File extension. Returns ------- Contents of file path.
Below is the the instruction that describes the task: ### Input: Load data with pickle. Parameters ---------- name: str Path to save to (includes dir, excludes extension). extension: str, optional File extension. Returns ------- Contents of file path. ### Response: def pickle_load(name, extension='.pkl'): """Load data with pickle. Parameters ---------- name: str Path to save to (includes dir, excludes extension). extension: str, optional File extension. Returns ------- Contents of file path. """ filename = name + extension infile = open(filename, 'rb') data = pickle.load(infile) infile.close() return data
def jinja_loader(self): """Search templates in custom app templates dir (default Flask behaviour), fallback on abilian templates.""" loaders = self._jinja_loaders del self._jinja_loaders loaders.append(Flask.jinja_loader.func(self)) loaders.reverse() return jinja2.ChoiceLoader(loaders)
Search templates in custom app templates dir (default Flask behaviour), fallback on abilian templates.
Below is the the instruction that describes the task: ### Input: Search templates in custom app templates dir (default Flask behaviour), fallback on abilian templates. ### Response: def jinja_loader(self): """Search templates in custom app templates dir (default Flask behaviour), fallback on abilian templates.""" loaders = self._jinja_loaders del self._jinja_loaders loaders.append(Flask.jinja_loader.func(self)) loaders.reverse() return jinja2.ChoiceLoader(loaders)
def ping_pong(connection, command, strexp, timeout=10): ''' Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ''' Expect.enter(connection, command) return Expect.expect(connection, strexp, timeout)
Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed
Below is the the instruction that describes the task: ### Input: Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ### Response: def ping_pong(connection, command, strexp, timeout=10): ''' Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ''' Expect.enter(connection, command) return Expect.expect(connection, strexp, timeout)
def process(self, value, tag=None): """ Process (marshal) the tag with the specified value using the optional type information. @param value: The value (content) of the XML node. @type value: (L{Object}|any) @param tag: The (optional) tag name for the value. The default is value.__class__.__name__ @type tag: str @return: An xml node. @rtype: L{Element} """ content = Content(tag=tag, value=value) result = Core.process(self, content) return result
Process (marshal) the tag with the specified value using the optional type information. @param value: The value (content) of the XML node. @type value: (L{Object}|any) @param tag: The (optional) tag name for the value. The default is value.__class__.__name__ @type tag: str @return: An xml node. @rtype: L{Element}
Below is the the instruction that describes the task: ### Input: Process (marshal) the tag with the specified value using the optional type information. @param value: The value (content) of the XML node. @type value: (L{Object}|any) @param tag: The (optional) tag name for the value. The default is value.__class__.__name__ @type tag: str @return: An xml node. @rtype: L{Element} ### Response: def process(self, value, tag=None): """ Process (marshal) the tag with the specified value using the optional type information. @param value: The value (content) of the XML node. @type value: (L{Object}|any) @param tag: The (optional) tag name for the value. The default is value.__class__.__name__ @type tag: str @return: An xml node. @rtype: L{Element} """ content = Content(tag=tag, value=value) result = Core.process(self, content) return result
def entities(self, subject_id): """ Returns all the entities of assertions for a subject, disregarding whether the assertion still is valid or not. :param subject_id: The identifier of the subject :return: A possibly empty list of entity identifiers """ try: return [i["entity_id"] for i in self._cache.find({"subject_id": subject_id})] except ValueError: return []
Returns all the entities of assertions for a subject, disregarding whether the assertion still is valid or not. :param subject_id: The identifier of the subject :return: A possibly empty list of entity identifiers
Below is the the instruction that describes the task: ### Input: Returns all the entities of assertions for a subject, disregarding whether the assertion still is valid or not. :param subject_id: The identifier of the subject :return: A possibly empty list of entity identifiers ### Response: def entities(self, subject_id): """ Returns all the entities of assertions for a subject, disregarding whether the assertion still is valid or not. :param subject_id: The identifier of the subject :return: A possibly empty list of entity identifiers """ try: return [i["entity_id"] for i in self._cache.find({"subject_id": subject_id})] except ValueError: return []
def reset_everything(self, payload): """Kill all processes, delete the queue and clean everything up.""" kill_signal = signals['9'] self.process_handler.kill_all(kill_signal, True) self.process_handler.wait_for_finish() self.reset = True answer = {'message': 'Resetting current queue', 'status': 'success'} return answer
Kill all processes, delete the queue and clean everything up.
Below is the the instruction that describes the task: ### Input: Kill all processes, delete the queue and clean everything up. ### Response: def reset_everything(self, payload): """Kill all processes, delete the queue and clean everything up.""" kill_signal = signals['9'] self.process_handler.kill_all(kill_signal, True) self.process_handler.wait_for_finish() self.reset = True answer = {'message': 'Resetting current queue', 'status': 'success'} return answer
def send_mass_sms(datatuple, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Given a datatuple of (message, from_phone, to, flash), sends each message to each recipient list. :returns: the number of SMSs sent. """ from sendsms.message import SmsMessage connection = connection or get_connection( username = auth_user, password = auth_password, fail_silently = fail_silently ) messages = [SmsMessage(message=message, from_phone=from_phone, to=to, flash=flash) for message, from_phone, to, flash in datatuple] connection.send_messages(messages)
Given a datatuple of (message, from_phone, to, flash), sends each message to each recipient list. :returns: the number of SMSs sent.
Below is the the instruction that describes the task: ### Input: Given a datatuple of (message, from_phone, to, flash), sends each message to each recipient list. :returns: the number of SMSs sent. ### Response: def send_mass_sms(datatuple, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Given a datatuple of (message, from_phone, to, flash), sends each message to each recipient list. :returns: the number of SMSs sent. """ from sendsms.message import SmsMessage connection = connection or get_connection( username = auth_user, password = auth_password, fail_silently = fail_silently ) messages = [SmsMessage(message=message, from_phone=from_phone, to=to, flash=flash) for message, from_phone, to, flash in datatuple] connection.send_messages(messages)
def _isLastCodeColumn(self, block, column): """Return true if the given column is at least equal to the column that contains the last non-whitespace character at the given line, or if the rest of the line is a comment. """ return column >= self._lastColumn(block) or \ self._isComment(block, self._nextNonSpaceColumn(block, column + 1))
Return true if the given column is at least equal to the column that contains the last non-whitespace character at the given line, or if the rest of the line is a comment.
Below is the the instruction that describes the task: ### Input: Return true if the given column is at least equal to the column that contains the last non-whitespace character at the given line, or if the rest of the line is a comment. ### Response: def _isLastCodeColumn(self, block, column): """Return true if the given column is at least equal to the column that contains the last non-whitespace character at the given line, or if the rest of the line is a comment. """ return column >= self._lastColumn(block) or \ self._isComment(block, self._nextNonSpaceColumn(block, column + 1))
def retrieve(self, id) : """ Retrieve a single tag Returns a single tag available to the user according to the unique ID provided If the specified tag does not exist, this query will return an error :calls: ``get /tags/{id}`` :param int id: Unique identifier of a Tag. :return: Dictionary that support attriubte-style access and represent Tag resource. :rtype: dict """ _, _, tag = self.http_client.get("/tags/{id}".format(id=id)) return tag
Retrieve a single tag Returns a single tag available to the user according to the unique ID provided If the specified tag does not exist, this query will return an error :calls: ``get /tags/{id}`` :param int id: Unique identifier of a Tag. :return: Dictionary that support attriubte-style access and represent Tag resource. :rtype: dict
Below is the the instruction that describes the task: ### Input: Retrieve a single tag Returns a single tag available to the user according to the unique ID provided If the specified tag does not exist, this query will return an error :calls: ``get /tags/{id}`` :param int id: Unique identifier of a Tag. :return: Dictionary that support attriubte-style access and represent Tag resource. :rtype: dict ### Response: def retrieve(self, id) : """ Retrieve a single tag Returns a single tag available to the user according to the unique ID provided If the specified tag does not exist, this query will return an error :calls: ``get /tags/{id}`` :param int id: Unique identifier of a Tag. :return: Dictionary that support attriubte-style access and represent Tag resource. :rtype: dict """ _, _, tag = self.http_client.get("/tags/{id}".format(id=id)) return tag
def transact_with_contract_function( address, web3, function_name=None, transaction=None, contract_abi=None, fn_abi=None, *args, **kwargs): """ Helper function for interacting with a contract function by sending a transaction. """ transact_transaction = prepare_transaction( address, web3, fn_identifier=function_name, contract_abi=contract_abi, transaction=transaction, fn_abi=fn_abi, fn_args=args, fn_kwargs=kwargs, ) txn_hash = web3.eth.sendTransaction(transact_transaction) return txn_hash
Helper function for interacting with a contract function by sending a transaction.
Below is the the instruction that describes the task: ### Input: Helper function for interacting with a contract function by sending a transaction. ### Response: def transact_with_contract_function( address, web3, function_name=None, transaction=None, contract_abi=None, fn_abi=None, *args, **kwargs): """ Helper function for interacting with a contract function by sending a transaction. """ transact_transaction = prepare_transaction( address, web3, fn_identifier=function_name, contract_abi=contract_abi, transaction=transaction, fn_abi=fn_abi, fn_args=args, fn_kwargs=kwargs, ) txn_hash = web3.eth.sendTransaction(transact_transaction) return txn_hash
def get_word_level_vocab(self): """Provides word level vocabulary Returns ------- Vocab Word level vocabulary """ def simple_tokenize(source_str, token_delim=' ', seq_delim='\n'): return list(filter(None, re.split(token_delim + '|' + seq_delim, source_str))) return VocabProvider._create_squad_vocab(simple_tokenize, self._dataset)
Provides word level vocabulary Returns ------- Vocab Word level vocabulary
Below is the the instruction that describes the task: ### Input: Provides word level vocabulary Returns ------- Vocab Word level vocabulary ### Response: def get_word_level_vocab(self): """Provides word level vocabulary Returns ------- Vocab Word level vocabulary """ def simple_tokenize(source_str, token_delim=' ', seq_delim='\n'): return list(filter(None, re.split(token_delim + '|' + seq_delim, source_str))) return VocabProvider._create_squad_vocab(simple_tokenize, self._dataset)
def corpus(self): '''Command to add a corpus to the dsrt library''' # Initialize the addcorpus subcommand's argparser description = '''The corpus subcommand has a number of subcommands of its own, including: list\t-\tlists all available corpora in dsrt's library add\t-\tadds a corpus to dsrt's library''' parser = argparse.ArgumentParser(description=description) self.init_corpus_args(parser) # parse the args we got args = parser.parse_args(sys.argv[2:3]) corpus_command = 'corpus_' + args.corpus_command if not hasattr(self, corpus_command): print('Unrecognized corpus command.') parser.print_help() exit(1) getattr(self, corpus_command)()
Command to add a corpus to the dsrt library
Below is the the instruction that describes the task: ### Input: Command to add a corpus to the dsrt library ### Response: def corpus(self): '''Command to add a corpus to the dsrt library''' # Initialize the addcorpus subcommand's argparser description = '''The corpus subcommand has a number of subcommands of its own, including: list\t-\tlists all available corpora in dsrt's library add\t-\tadds a corpus to dsrt's library''' parser = argparse.ArgumentParser(description=description) self.init_corpus_args(parser) # parse the args we got args = parser.parse_args(sys.argv[2:3]) corpus_command = 'corpus_' + args.corpus_command if not hasattr(self, corpus_command): print('Unrecognized corpus command.') parser.print_help() exit(1) getattr(self, corpus_command)()
def process_input(self, stream, value, rpc_executor): """Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that. """ self.sensor_log.push(stream, value) # FIXME: This should be specified in our device model if stream.important: associated_output = stream.associated_stream() self.sensor_log.push(associated_output, value) to_check = deque([x for x in self.roots]) while len(to_check) > 0: node = to_check.popleft() if node.triggered(): try: results = node.process(rpc_executor, self.mark_streamer) for result in results: result.raw_time = value.raw_time self.sensor_log.push(node.stream, result) except: self._logger.exception("Unhandled exception in graph node processing function for node %s", str(node)) # If we generated any outputs, notify our downstream nodes # so that they are also checked to see if they should run. if len(results) > 0: to_check.extend(node.outputs)
Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that.
Below is the the instruction that describes the task: ### Input: Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that. ### Response: def process_input(self, stream, value, rpc_executor): """Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that. """ self.sensor_log.push(stream, value) # FIXME: This should be specified in our device model if stream.important: associated_output = stream.associated_stream() self.sensor_log.push(associated_output, value) to_check = deque([x for x in self.roots]) while len(to_check) > 0: node = to_check.popleft() if node.triggered(): try: results = node.process(rpc_executor, self.mark_streamer) for result in results: result.raw_time = value.raw_time self.sensor_log.push(node.stream, result) except: self._logger.exception("Unhandled exception in graph node processing function for node %s", str(node)) # If we generated any outputs, notify our downstream nodes # so that they are also checked to see if they should run. if len(results) > 0: to_check.extend(node.outputs)
def is_labial(c,lang): """ Is the character a labial """ o=get_offset(c,lang) return (o>=LABIAL_RANGE[0] and o<=LABIAL_RANGE[1])
Is the character a labial
Below is the the instruction that describes the task: ### Input: Is the character a labial ### Response: def is_labial(c,lang): """ Is the character a labial """ o=get_offset(c,lang) return (o>=LABIAL_RANGE[0] and o<=LABIAL_RANGE[1])
def edit_user(self, id, user_avatar_token=None, user_avatar_url=None, user_email=None, user_locale=None, user_name=None, user_short_name=None, user_sortable_name=None, user_time_zone=None): """ Edit a user. Modify an existing user. To modify a user's login, see the documentation for logins. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - user[name] """The full name of the user. This name will be used by teacher for grading.""" if user_name is not None: data["user[name]"] = user_name # OPTIONAL - user[short_name] """User's name as it will be displayed in discussions, messages, and comments.""" if user_short_name is not None: data["user[short_name]"] = user_short_name # OPTIONAL - user[sortable_name] """User's name as used to sort alphabetically in lists.""" if user_sortable_name is not None: data["user[sortable_name]"] = user_sortable_name # OPTIONAL - user[time_zone] """The time zone for the user. Allowed time zones are {http://www.iana.org/time-zones IANA time zones} or friendlier {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.""" if user_time_zone is not None: data["user[time_zone]"] = user_time_zone # OPTIONAL - user[email] """The default email address of the user.""" if user_email is not None: data["user[email]"] = user_email # OPTIONAL - user[locale] """The user's preferred language, from the list of languages Canvas supports. This is in RFC-5646 format.""" if user_locale is not None: data["user[locale]"] = user_locale # OPTIONAL - user[avatar][token] """A unique representation of the avatar record to assign as the user's current avatar. This token can be obtained from the user avatars endpoint. This supersedes the user [avatar] [url] argument, and if both are included the url will be ignored. Note: this is an internal representation and is subject to change without notice. It should be consumed with this api endpoint and used in the user update endpoint, and should not be constructed by the client.""" if user_avatar_token is not None: data["user[avatar][token]"] = user_avatar_token # OPTIONAL - user[avatar][url] """To set the user's avatar to point to an external url, do not include a token and instead pass the url here. Warning: For maximum compatibility, please use 128 px square images.""" if user_avatar_url is not None: data["user[avatar][url]"] = user_avatar_url self.logger.debug("PUT /api/v1/users/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/{id}".format(**path), data=data, params=params, single_item=True)
Edit a user. Modify an existing user. To modify a user's login, see the documentation for logins.
Below is the the instruction that describes the task: ### Input: Edit a user. Modify an existing user. To modify a user's login, see the documentation for logins. ### Response: def edit_user(self, id, user_avatar_token=None, user_avatar_url=None, user_email=None, user_locale=None, user_name=None, user_short_name=None, user_sortable_name=None, user_time_zone=None): """ Edit a user. Modify an existing user. To modify a user's login, see the documentation for logins. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - user[name] """The full name of the user. This name will be used by teacher for grading.""" if user_name is not None: data["user[name]"] = user_name # OPTIONAL - user[short_name] """User's name as it will be displayed in discussions, messages, and comments.""" if user_short_name is not None: data["user[short_name]"] = user_short_name # OPTIONAL - user[sortable_name] """User's name as used to sort alphabetically in lists.""" if user_sortable_name is not None: data["user[sortable_name]"] = user_sortable_name # OPTIONAL - user[time_zone] """The time zone for the user. Allowed time zones are {http://www.iana.org/time-zones IANA time zones} or friendlier {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}.""" if user_time_zone is not None: data["user[time_zone]"] = user_time_zone # OPTIONAL - user[email] """The default email address of the user.""" if user_email is not None: data["user[email]"] = user_email # OPTIONAL - user[locale] """The user's preferred language, from the list of languages Canvas supports. This is in RFC-5646 format.""" if user_locale is not None: data["user[locale]"] = user_locale # OPTIONAL - user[avatar][token] """A unique representation of the avatar record to assign as the user's current avatar. This token can be obtained from the user avatars endpoint. This supersedes the user [avatar] [url] argument, and if both are included the url will be ignored. Note: this is an internal representation and is subject to change without notice. It should be consumed with this api endpoint and used in the user update endpoint, and should not be constructed by the client.""" if user_avatar_token is not None: data["user[avatar][token]"] = user_avatar_token # OPTIONAL - user[avatar][url] """To set the user's avatar to point to an external url, do not include a token and instead pass the url here. Warning: For maximum compatibility, please use 128 px square images.""" if user_avatar_url is not None: data["user[avatar][url]"] = user_avatar_url self.logger.debug("PUT /api/v1/users/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/{id}".format(**path), data=data, params=params, single_item=True)
def upload(self, login, package_name, release, basename, fd, distribution_type, description='', md5=None, size=None, dependencies=None, attrs=None, channels=('main',), callback=None): ''' Upload a new distribution to a package release. :param login: the login of the package owner :param package_name: the name of the package :param version: the version string of the release :param basename: the basename of the distribution to download :param fd: a file like object to upload :param distribution_type: pypi or conda or ipynb, etc :param description: (optional) a short description about the file :param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx') ''' url = '%s/stage/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename)) if attrs is None: attrs = {} if not isinstance(attrs, dict): raise TypeError('argument attrs must be a dictionary') payload = dict(distribution_type=distribution_type, description=description, attrs=attrs, dependencies=dependencies, channels=channels) data, headers = jencode(payload) res = self.session.post(url, data=data, headers=headers) self._check_response(res) obj = res.json() s3url = obj['post_url'] s3data = obj['form_data'] if md5 is None: _hexmd5, b64md5, size = compute_hash(fd, size=size) elif size is None: spos = fd.tell() fd.seek(0, os.SEEK_END) size = fd.tell() - spos fd.seek(spos) s3data['Content-Length'] = size s3data['Content-MD5'] = b64md5 data_stream, headers = stream_multipart(s3data, files={'file':(basename, fd)}, callback=callback) request_method = self.session if s3url.startswith(self.domain) else requests s3res = request_method.post( s3url, data=data_stream, verify=self.session.verify, timeout=10 * 60 * 60, headers=headers ) if s3res.status_code != 201: logger.info(s3res.text) logger.info('') logger.info('') raise errors.BinstarError('Error uploading package', s3res.status_code) url = '%s/commit/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename)) payload = dict(dist_id=obj['dist_id']) data, headers = jencode(payload) res = self.session.post(url, data=data, headers=headers) self._check_response(res) return res.json()
Upload a new distribution to a package release. :param login: the login of the package owner :param package_name: the name of the package :param version: the version string of the release :param basename: the basename of the distribution to download :param fd: a file like object to upload :param distribution_type: pypi or conda or ipynb, etc :param description: (optional) a short description about the file :param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx')
Below is the the instruction that describes the task: ### Input: Upload a new distribution to a package release. :param login: the login of the package owner :param package_name: the name of the package :param version: the version string of the release :param basename: the basename of the distribution to download :param fd: a file like object to upload :param distribution_type: pypi or conda or ipynb, etc :param description: (optional) a short description about the file :param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx') ### Response: def upload(self, login, package_name, release, basename, fd, distribution_type, description='', md5=None, size=None, dependencies=None, attrs=None, channels=('main',), callback=None): ''' Upload a new distribution to a package release. :param login: the login of the package owner :param package_name: the name of the package :param version: the version string of the release :param basename: the basename of the distribution to download :param fd: a file like object to upload :param distribution_type: pypi or conda or ipynb, etc :param description: (optional) a short description about the file :param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx') ''' url = '%s/stage/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename)) if attrs is None: attrs = {} if not isinstance(attrs, dict): raise TypeError('argument attrs must be a dictionary') payload = dict(distribution_type=distribution_type, description=description, attrs=attrs, dependencies=dependencies, channels=channels) data, headers = jencode(payload) res = self.session.post(url, data=data, headers=headers) self._check_response(res) obj = res.json() s3url = obj['post_url'] s3data = obj['form_data'] if md5 is None: _hexmd5, b64md5, size = compute_hash(fd, size=size) elif size is None: spos = fd.tell() fd.seek(0, os.SEEK_END) size = fd.tell() - spos fd.seek(spos) s3data['Content-Length'] = size s3data['Content-MD5'] = b64md5 data_stream, headers = stream_multipart(s3data, files={'file':(basename, fd)}, callback=callback) request_method = self.session if s3url.startswith(self.domain) else requests s3res = request_method.post( s3url, data=data_stream, verify=self.session.verify, timeout=10 * 60 * 60, headers=headers ) if s3res.status_code != 201: logger.info(s3res.text) logger.info('') logger.info('') raise errors.BinstarError('Error uploading package', s3res.status_code) url = '%s/commit/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename)) payload = dict(dist_id=obj['dist_id']) data, headers = jencode(payload) res = self.session.post(url, data=data, headers=headers) self._check_response(res) return res.json()
def getWindows(input): """Get a source's windows""" with rasterio.open(input) as src: return [[window, ij] for ij, window in src.block_windows()]
Get a source's windows
Below is the the instruction that describes the task: ### Input: Get a source's windows ### Response: def getWindows(input): """Get a source's windows""" with rasterio.open(input) as src: return [[window, ij] for ij, window in src.block_windows()]
def to_numpy(self, dtype=None, copy=False): """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ result = np.array(self.values, dtype=dtype, copy=copy) return result
Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
Below is the the instruction that describes the task: ### Input: Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) ### Response: def to_numpy(self, dtype=None, copy=False): """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ result = np.array(self.values, dtype=dtype, copy=copy) return result
def request_view(request, request_pk): ''' The view of a single request. ''' if request.is_ajax(): if not request.user.is_authenticated(): return HttpResponse(json.dumps(dict()), content_type="application/json") try: relevant_request = Request.objects.get(pk=request_pk) except Request.DoesNotExist: return HttpResponse(json.dumps(dict()), content_type="application/json") try: user_profile = UserProfile.objects.get(user=request.user) except UserProfile.DoesNotExist: return HttpResponse(json.dumps(dict()), content_type="application/json") upvote = user_profile in relevant_request.upvotes.all() vote_form = VoteForm( request.POST if "upvote" in request.POST else None, profile=user_profile, request=relevant_request, ) if vote_form.is_valid(): vote_form.save() response = dict() response['vote_count_{pk}'.format(pk=request_pk)] = \ relevant_request.upvotes.all().count() list_string = 'vote_list_{pk}'.format(pk=request_pk) vote_string = 'in_votes_{pk}'.format(pk=request_pk) count_string = 'vote_count_{pk}'.format(pk=request_pk) response[list_string], response[vote_string], \ response[count_string] = build_ajax_votes( relevant_request, user_profile ) return HttpResponse(json.dumps(response), content_type="application/json") return HttpResponse(json.dumps(dict()), content_type="application/json") relevant_request = get_object_or_404(Request, pk=request_pk) if relevant_request.private: if relevant_request.owner.user != request.user or \ relevant_request.request_type.managers.filter(incumbent__user=request.user): return HttpResponseRedirect( reverse("managers:requests", kwargs={"requestType": relevant_request.request_type.url_name})) userProfile = UserProfile.objects.get(user=request.user) request_responses = Response.objects.filter(request=relevant_request) relevant_managers = relevant_request.request_type.managers.filter(active=True) manager = any(i.incumbent == userProfile for i in relevant_managers) if manager: response_form = ManagerResponseForm( request.POST if "add_response" in request.POST else None, initial={'action': Response.NONE}, profile=userProfile, request=relevant_request, ) else: response_form = ResponseForm( request.POST if "add_response" in request.POST else None, profile=userProfile, request=relevant_request, prefix="response", ) upvote = userProfile in relevant_request.upvotes.all() vote_form = VoteForm( request.POST if "upvote" in request.POST else None, profile=userProfile, request=relevant_request, ) if response_form.is_valid(): response_form.save() return HttpResponseRedirect(reverse('managers:view_request', kwargs={ 'request_pk': relevant_request.pk, })) if vote_form.is_valid(): vote_form.save() return HttpResponseRedirect(reverse('managers:view_request', kwargs={ 'request_pk': relevant_request.pk, })) upvote = userProfile in relevant_request.upvotes.all() return render_to_response('view_request.html', { 'page_name': "View Request", 'relevant_request': relevant_request, 'request_responses': request_responses, 'upvote': upvote, 'vote_form': vote_form, 'response_form': response_form, 'relevant_managers': relevant_managers, }, context_instance=RequestContext(request))
The view of a single request.
Below is the the instruction that describes the task: ### Input: The view of a single request. ### Response: def request_view(request, request_pk): ''' The view of a single request. ''' if request.is_ajax(): if not request.user.is_authenticated(): return HttpResponse(json.dumps(dict()), content_type="application/json") try: relevant_request = Request.objects.get(pk=request_pk) except Request.DoesNotExist: return HttpResponse(json.dumps(dict()), content_type="application/json") try: user_profile = UserProfile.objects.get(user=request.user) except UserProfile.DoesNotExist: return HttpResponse(json.dumps(dict()), content_type="application/json") upvote = user_profile in relevant_request.upvotes.all() vote_form = VoteForm( request.POST if "upvote" in request.POST else None, profile=user_profile, request=relevant_request, ) if vote_form.is_valid(): vote_form.save() response = dict() response['vote_count_{pk}'.format(pk=request_pk)] = \ relevant_request.upvotes.all().count() list_string = 'vote_list_{pk}'.format(pk=request_pk) vote_string = 'in_votes_{pk}'.format(pk=request_pk) count_string = 'vote_count_{pk}'.format(pk=request_pk) response[list_string], response[vote_string], \ response[count_string] = build_ajax_votes( relevant_request, user_profile ) return HttpResponse(json.dumps(response), content_type="application/json") return HttpResponse(json.dumps(dict()), content_type="application/json") relevant_request = get_object_or_404(Request, pk=request_pk) if relevant_request.private: if relevant_request.owner.user != request.user or \ relevant_request.request_type.managers.filter(incumbent__user=request.user): return HttpResponseRedirect( reverse("managers:requests", kwargs={"requestType": relevant_request.request_type.url_name})) userProfile = UserProfile.objects.get(user=request.user) request_responses = Response.objects.filter(request=relevant_request) relevant_managers = relevant_request.request_type.managers.filter(active=True) manager = any(i.incumbent == userProfile for i in relevant_managers) if manager: response_form = ManagerResponseForm( request.POST if "add_response" in request.POST else None, initial={'action': Response.NONE}, profile=userProfile, request=relevant_request, ) else: response_form = ResponseForm( request.POST if "add_response" in request.POST else None, profile=userProfile, request=relevant_request, prefix="response", ) upvote = userProfile in relevant_request.upvotes.all() vote_form = VoteForm( request.POST if "upvote" in request.POST else None, profile=userProfile, request=relevant_request, ) if response_form.is_valid(): response_form.save() return HttpResponseRedirect(reverse('managers:view_request', kwargs={ 'request_pk': relevant_request.pk, })) if vote_form.is_valid(): vote_form.save() return HttpResponseRedirect(reverse('managers:view_request', kwargs={ 'request_pk': relevant_request.pk, })) upvote = userProfile in relevant_request.upvotes.all() return render_to_response('view_request.html', { 'page_name': "View Request", 'relevant_request': relevant_request, 'request_responses': request_responses, 'upvote': upvote, 'vote_form': vote_form, 'response_form': response_form, 'relevant_managers': relevant_managers, }, context_instance=RequestContext(request))
def extract(archive_file, path=".", delete_on_success=False, enable_rar=False): """ Automatically detect archive type and extract all files to specified path. .. code:: python import os os.listdir(".") # ['test_structure.zip'] reusables.extract("test_structure.zip") os.listdir(".") # [ 'test_structure', 'test_structure.zip'] :param archive_file: path to file to extract :param path: location to extract to :param delete_on_success: Will delete the original archive if set to True :param enable_rar: include the rarfile import and extract :return: path to extracted files """ if not os.path.exists(archive_file) or not os.path.getsize(archive_file): logger.error("File {0} unextractable".format(archive_file)) raise OSError("File does not exist or has zero size") arch = None if zipfile.is_zipfile(archive_file): logger.debug("File {0} detected as a zip file".format(archive_file)) arch = zipfile.ZipFile(archive_file) elif tarfile.is_tarfile(archive_file): logger.debug("File {0} detected as a tar file".format(archive_file)) arch = tarfile.open(archive_file) elif enable_rar: import rarfile if rarfile.is_rarfile(archive_file): logger.debug("File {0} detected as " "a rar file".format(archive_file)) arch = rarfile.RarFile(archive_file) if not arch: raise TypeError("File is not a known archive") logger.debug("Extracting files to {0}".format(path)) try: arch.extractall(path=path) finally: arch.close() if delete_on_success: logger.debug("Archive {0} will now be deleted".format(archive_file)) os.unlink(archive_file) return os.path.abspath(path)
Automatically detect archive type and extract all files to specified path. .. code:: python import os os.listdir(".") # ['test_structure.zip'] reusables.extract("test_structure.zip") os.listdir(".") # [ 'test_structure', 'test_structure.zip'] :param archive_file: path to file to extract :param path: location to extract to :param delete_on_success: Will delete the original archive if set to True :param enable_rar: include the rarfile import and extract :return: path to extracted files
Below is the the instruction that describes the task: ### Input: Automatically detect archive type and extract all files to specified path. .. code:: python import os os.listdir(".") # ['test_structure.zip'] reusables.extract("test_structure.zip") os.listdir(".") # [ 'test_structure', 'test_structure.zip'] :param archive_file: path to file to extract :param path: location to extract to :param delete_on_success: Will delete the original archive if set to True :param enable_rar: include the rarfile import and extract :return: path to extracted files ### Response: def extract(archive_file, path=".", delete_on_success=False, enable_rar=False): """ Automatically detect archive type and extract all files to specified path. .. code:: python import os os.listdir(".") # ['test_structure.zip'] reusables.extract("test_structure.zip") os.listdir(".") # [ 'test_structure', 'test_structure.zip'] :param archive_file: path to file to extract :param path: location to extract to :param delete_on_success: Will delete the original archive if set to True :param enable_rar: include the rarfile import and extract :return: path to extracted files """ if not os.path.exists(archive_file) or not os.path.getsize(archive_file): logger.error("File {0} unextractable".format(archive_file)) raise OSError("File does not exist or has zero size") arch = None if zipfile.is_zipfile(archive_file): logger.debug("File {0} detected as a zip file".format(archive_file)) arch = zipfile.ZipFile(archive_file) elif tarfile.is_tarfile(archive_file): logger.debug("File {0} detected as a tar file".format(archive_file)) arch = tarfile.open(archive_file) elif enable_rar: import rarfile if rarfile.is_rarfile(archive_file): logger.debug("File {0} detected as " "a rar file".format(archive_file)) arch = rarfile.RarFile(archive_file) if not arch: raise TypeError("File is not a known archive") logger.debug("Extracting files to {0}".format(path)) try: arch.extractall(path=path) finally: arch.close() if delete_on_success: logger.debug("Archive {0} will now be deleted".format(archive_file)) os.unlink(archive_file) return os.path.abspath(path)
def publish(self, load): ''' Publish "load" to minions ''' payload = {'enc': 'aes'} crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value) payload['load'] = crypticle.dumps(load) if self.opts['sign_pub_messages']: master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem') log.debug("Signing data packet") payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load']) # Use the Salt IPC server if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514)) else: pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') # TODO: switch to the actual asynchronous interface #pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop) pub_sock = salt.utils.asynchronous.SyncWrapper( salt.transport.ipc.IPCMessageClient, (pull_uri,) ) pub_sock.connect() int_payload = {'payload': self.serial.dumps(payload)} # add some targeting stuff for lists only (for now) if load['tgt_type'] == 'list': if isinstance(load['tgt'], six.string_types): # Fetch a list of minions that match _res = self.ckminions.check_minions(load['tgt'], tgt_type=load['tgt_type']) match_ids = _res['minions'] log.debug("Publish Side Match: %s", match_ids) # Send list of miions thru so zmq can target them int_payload['topic_lst'] = match_ids else: int_payload['topic_lst'] = load['tgt'] # Send it over IPC! pub_sock.send(int_payload)
Publish "load" to minions
Below is the the instruction that describes the task: ### Input: Publish "load" to minions ### Response: def publish(self, load): ''' Publish "load" to minions ''' payload = {'enc': 'aes'} crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value) payload['load'] = crypticle.dumps(load) if self.opts['sign_pub_messages']: master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem') log.debug("Signing data packet") payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load']) # Use the Salt IPC server if self.opts.get('ipc_mode', '') == 'tcp': pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514)) else: pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc') # TODO: switch to the actual asynchronous interface #pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop) pub_sock = salt.utils.asynchronous.SyncWrapper( salt.transport.ipc.IPCMessageClient, (pull_uri,) ) pub_sock.connect() int_payload = {'payload': self.serial.dumps(payload)} # add some targeting stuff for lists only (for now) if load['tgt_type'] == 'list': if isinstance(load['tgt'], six.string_types): # Fetch a list of minions that match _res = self.ckminions.check_minions(load['tgt'], tgt_type=load['tgt_type']) match_ids = _res['minions'] log.debug("Publish Side Match: %s", match_ids) # Send list of miions thru so zmq can target them int_payload['topic_lst'] = match_ids else: int_payload['topic_lst'] = load['tgt'] # Send it over IPC! pub_sock.send(int_payload)
def datetime( self, year, month, day, hour=0, minute=0, second=0, microsecond=0 ): # type: (int, int, int, int, int, int, int) -> datetime """ Return a normalized datetime for the current timezone. """ if _HAS_FOLD: return self.convert( datetime(year, month, day, hour, minute, second, microsecond, fold=1) ) return self.convert( datetime(year, month, day, hour, minute, second, microsecond), dst_rule=POST_TRANSITION, )
Return a normalized datetime for the current timezone.
Below is the the instruction that describes the task: ### Input: Return a normalized datetime for the current timezone. ### Response: def datetime( self, year, month, day, hour=0, minute=0, second=0, microsecond=0 ): # type: (int, int, int, int, int, int, int) -> datetime """ Return a normalized datetime for the current timezone. """ if _HAS_FOLD: return self.convert( datetime(year, month, day, hour, minute, second, microsecond, fold=1) ) return self.convert( datetime(year, month, day, hour, minute, second, microsecond), dst_rule=POST_TRANSITION, )
def create_ants_transform(transform_type='AffineTransform', precision='float', dimension=3, matrix=None, offset=None, center=None, translation=None, parameters=None, fixed_parameters=None, displacement_field=None, supported_types=False): """ Create and initialize an ANTsTransform ANTsR function: `createAntsrTransform` Arguments --------- transform_type : string type of transform(s) precision : string numerical precision dimension : integer spatial dimension of transform matrix : ndarray matrix for linear transforms offset : tuple/list offset for linear transforms center : tuple/list center for linear transforms translation : tuple/list translation for linear transforms parameters : ndarray/list array of parameters fixed_parameters : ndarray/list array of fixed parameters displacement_field : ANTsImage multichannel ANTsImage for non-linear transform supported_types : boolean flag that returns array of possible transforms types Returns ------- ANTsTransform or list of ANTsTransform types Example ------- >>> import ants >>> translation = (3,4,5) >>> tx = ants.create_ants_transform( type='Euler3DTransform', translation=translation ) """ def _check_arg(arg, dim=1): if arg is None: if dim == 1: return [] elif dim == 2: return [[]] elif isinstance(arg, np.ndarray): return arg.tolist() elif isinstance(arg, (tuple, list)): return list(arg) else: raise ValueError('Incompatible input argument') matrix = _check_arg(matrix, dim=2) offset = _check_arg(offset) center = _check_arg(center) translation = _check_arg(translation) parameters = _check_arg(parameters) fixed_parameters = _check_arg(fixed_parameters) matrix_offset_types = {'AffineTransform', 'CenteredAffineTransform', 'Euler2DTransform', 'Euler3DTransform', 'Rigid3DTransform', 'Rigid2DTransform', 'QuaternionRigidTransform', 'Similarity2DTransform', 'CenteredSimilarity2DTransform', 'Similarity3DTransform', 'CenteredRigid2DTransform', 'CenteredEuler3DTransform'} #user_matrix_types = {'Affine','CenteredAffine', # 'Euler', 'CenteredEuler', # 'Rigid', 'CenteredRigid', 'QuaternionRigid', # 'Similarity', 'CenteredSimilarity'} if supported_types: return set(list(matrix_offset_types) + ['DisplacementFieldTransform']) # Check for valid dimension if (dimension < 2) or (dimension > 4): raise ValueError('Unsupported dimension: %i' % dimension) # Check for valid precision precision_types = ('float', 'double') if precision not in precision_types: raise ValueError('Unsupported Precision %s' % str(precision)) # Check for supported transform type if (transform_type not in matrix_offset_types) and (transform_type != 'DisplacementFieldTransform'): raise ValueError('Unsupported type %s' % str(transform_type)) # Check parameters with type if (transform_type=='Euler3DTransform'): dimension = 3 elif (transform_type=='Euler2DTransform'): dimension = 2 elif (transform_type=='Rigid3DTransform'): dimension = 3 elif (transform_type=='QuaternionRigidTransform'): dimension = 3 elif (transform_type=='Rigid2DTransform'): dimension = 2 elif (transform_type=='CenteredRigid2DTransform'): dimension = 2 elif (transform_type=='CenteredEuler3DTransform'): dimension = 3 elif (transform_type=='Similarity3DTransform'): dimension = 3 elif (transform_type=='Similarity2DTransform'): dimension = 2 elif (transform_type=='CenteredSimilarity2DTransform'): dimension = 2 # If displacement field if displacement_field is not None: raise ValueError('Displacement field transform not currently supported') # itk_tx = transform_from_displacement_field(displacement_field) # return tio.ants_transform(itk_tx) # Transforms that derive from itk::MatrixOffsetTransformBase libfn = utils.get_lib_fn('matrixOffset%s%i' % (utils.short_ptype(precision), dimension)) itk_tx = libfn(transform_type, precision, dimension, matrix, offset, center, translation, parameters, fixed_parameters) return tio.ANTsTransform(precision=precision, dimension=dimension, transform_type=transform_type, pointer=itk_tx)
Create and initialize an ANTsTransform ANTsR function: `createAntsrTransform` Arguments --------- transform_type : string type of transform(s) precision : string numerical precision dimension : integer spatial dimension of transform matrix : ndarray matrix for linear transforms offset : tuple/list offset for linear transforms center : tuple/list center for linear transforms translation : tuple/list translation for linear transforms parameters : ndarray/list array of parameters fixed_parameters : ndarray/list array of fixed parameters displacement_field : ANTsImage multichannel ANTsImage for non-linear transform supported_types : boolean flag that returns array of possible transforms types Returns ------- ANTsTransform or list of ANTsTransform types Example ------- >>> import ants >>> translation = (3,4,5) >>> tx = ants.create_ants_transform( type='Euler3DTransform', translation=translation )
Below is the the instruction that describes the task: ### Input: Create and initialize an ANTsTransform ANTsR function: `createAntsrTransform` Arguments --------- transform_type : string type of transform(s) precision : string numerical precision dimension : integer spatial dimension of transform matrix : ndarray matrix for linear transforms offset : tuple/list offset for linear transforms center : tuple/list center for linear transforms translation : tuple/list translation for linear transforms parameters : ndarray/list array of parameters fixed_parameters : ndarray/list array of fixed parameters displacement_field : ANTsImage multichannel ANTsImage for non-linear transform supported_types : boolean flag that returns array of possible transforms types Returns ------- ANTsTransform or list of ANTsTransform types Example ------- >>> import ants >>> translation = (3,4,5) >>> tx = ants.create_ants_transform( type='Euler3DTransform', translation=translation ) ### Response: def create_ants_transform(transform_type='AffineTransform', precision='float', dimension=3, matrix=None, offset=None, center=None, translation=None, parameters=None, fixed_parameters=None, displacement_field=None, supported_types=False): """ Create and initialize an ANTsTransform ANTsR function: `createAntsrTransform` Arguments --------- transform_type : string type of transform(s) precision : string numerical precision dimension : integer spatial dimension of transform matrix : ndarray matrix for linear transforms offset : tuple/list offset for linear transforms center : tuple/list center for linear transforms translation : tuple/list translation for linear transforms parameters : ndarray/list array of parameters fixed_parameters : ndarray/list array of fixed parameters displacement_field : ANTsImage multichannel ANTsImage for non-linear transform supported_types : boolean flag that returns array of possible transforms types Returns ------- ANTsTransform or list of ANTsTransform types Example ------- >>> import ants >>> translation = (3,4,5) >>> tx = ants.create_ants_transform( type='Euler3DTransform', translation=translation ) """ def _check_arg(arg, dim=1): if arg is None: if dim == 1: return [] elif dim == 2: return [[]] elif isinstance(arg, np.ndarray): return arg.tolist() elif isinstance(arg, (tuple, list)): return list(arg) else: raise ValueError('Incompatible input argument') matrix = _check_arg(matrix, dim=2) offset = _check_arg(offset) center = _check_arg(center) translation = _check_arg(translation) parameters = _check_arg(parameters) fixed_parameters = _check_arg(fixed_parameters) matrix_offset_types = {'AffineTransform', 'CenteredAffineTransform', 'Euler2DTransform', 'Euler3DTransform', 'Rigid3DTransform', 'Rigid2DTransform', 'QuaternionRigidTransform', 'Similarity2DTransform', 'CenteredSimilarity2DTransform', 'Similarity3DTransform', 'CenteredRigid2DTransform', 'CenteredEuler3DTransform'} #user_matrix_types = {'Affine','CenteredAffine', # 'Euler', 'CenteredEuler', # 'Rigid', 'CenteredRigid', 'QuaternionRigid', # 'Similarity', 'CenteredSimilarity'} if supported_types: return set(list(matrix_offset_types) + ['DisplacementFieldTransform']) # Check for valid dimension if (dimension < 2) or (dimension > 4): raise ValueError('Unsupported dimension: %i' % dimension) # Check for valid precision precision_types = ('float', 'double') if precision not in precision_types: raise ValueError('Unsupported Precision %s' % str(precision)) # Check for supported transform type if (transform_type not in matrix_offset_types) and (transform_type != 'DisplacementFieldTransform'): raise ValueError('Unsupported type %s' % str(transform_type)) # Check parameters with type if (transform_type=='Euler3DTransform'): dimension = 3 elif (transform_type=='Euler2DTransform'): dimension = 2 elif (transform_type=='Rigid3DTransform'): dimension = 3 elif (transform_type=='QuaternionRigidTransform'): dimension = 3 elif (transform_type=='Rigid2DTransform'): dimension = 2 elif (transform_type=='CenteredRigid2DTransform'): dimension = 2 elif (transform_type=='CenteredEuler3DTransform'): dimension = 3 elif (transform_type=='Similarity3DTransform'): dimension = 3 elif (transform_type=='Similarity2DTransform'): dimension = 2 elif (transform_type=='CenteredSimilarity2DTransform'): dimension = 2 # If displacement field if displacement_field is not None: raise ValueError('Displacement field transform not currently supported') # itk_tx = transform_from_displacement_field(displacement_field) # return tio.ants_transform(itk_tx) # Transforms that derive from itk::MatrixOffsetTransformBase libfn = utils.get_lib_fn('matrixOffset%s%i' % (utils.short_ptype(precision), dimension)) itk_tx = libfn(transform_type, precision, dimension, matrix, offset, center, translation, parameters, fixed_parameters) return tio.ANTsTransform(precision=precision, dimension=dimension, transform_type=transform_type, pointer=itk_tx)
def acquire(self,blocking=True,timeout=None): """Attempt to acquire this lock. If the optional argument "blocking" is True and "timeout" is None, this methods blocks until is successfully acquires the lock. If "blocking" is False, it returns immediately if the lock could not be acquired. Otherwise, it blocks for at most "timeout" seconds trying to acquire the lock. In all cases, this methods returns True if the lock was successfully acquired and False otherwise. """ if timeout is None: return self.__lock.acquire(blocking) else: # Simulated timeout using progressively longer sleeps. # This is the same timeout scheme used in the stdlib Condition # class. If there's lots of contention on the lock then there's # a good chance you won't get it; but then again, Python doesn't # guarantee fairness anyway. We hope that platform-specific # extensions can provide a better mechanism. endtime = _time() + timeout delay = 0.0005 while not self.__lock.acquire(False): remaining = endtime - _time() if remaining <= 0: return False delay = min(delay*2,remaining,0.05) _sleep(delay) return True
Attempt to acquire this lock. If the optional argument "blocking" is True and "timeout" is None, this methods blocks until is successfully acquires the lock. If "blocking" is False, it returns immediately if the lock could not be acquired. Otherwise, it blocks for at most "timeout" seconds trying to acquire the lock. In all cases, this methods returns True if the lock was successfully acquired and False otherwise.
Below is the the instruction that describes the task: ### Input: Attempt to acquire this lock. If the optional argument "blocking" is True and "timeout" is None, this methods blocks until is successfully acquires the lock. If "blocking" is False, it returns immediately if the lock could not be acquired. Otherwise, it blocks for at most "timeout" seconds trying to acquire the lock. In all cases, this methods returns True if the lock was successfully acquired and False otherwise. ### Response: def acquire(self,blocking=True,timeout=None): """Attempt to acquire this lock. If the optional argument "blocking" is True and "timeout" is None, this methods blocks until is successfully acquires the lock. If "blocking" is False, it returns immediately if the lock could not be acquired. Otherwise, it blocks for at most "timeout" seconds trying to acquire the lock. In all cases, this methods returns True if the lock was successfully acquired and False otherwise. """ if timeout is None: return self.__lock.acquire(blocking) else: # Simulated timeout using progressively longer sleeps. # This is the same timeout scheme used in the stdlib Condition # class. If there's lots of contention on the lock then there's # a good chance you won't get it; but then again, Python doesn't # guarantee fairness anyway. We hope that platform-specific # extensions can provide a better mechanism. endtime = _time() + timeout delay = 0.0005 while not self.__lock.acquire(False): remaining = endtime - _time() if remaining <= 0: return False delay = min(delay*2,remaining,0.05) _sleep(delay) return True
def get_data(self): """ Retrieve the relation data for each unit involved in a relation and, if complete, store it in a list under `self[self.name]`. This is automatically called when the RelationContext is instantiated. The units are sorted lexographically first by the service ID, then by the unit ID. Thus, if an interface has two other services, 'db:1' and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', and 'db:2' having one unit, 'mediawiki/0', all of which have a complete set of data, the relation data for the units will be stored in the order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. If you only care about a single unit on the relation, you can just access it as `{{ interface[0]['key'] }}`. However, if you can at all support multiple units on a relation, you should iterate over the list, like:: {% for unit in interface -%} {{ unit['key'] }}{% if not loop.last %},{% endif %} {%- endfor %} Note that since all sets of relation data from all related services and units are in a single list, if you need to know which service or unit a set of data came from, you'll need to extend this class to preserve that information. """ if not hookenv.relation_ids(self.name): return ns = self.setdefault(self.name, []) for rid in sorted(hookenv.relation_ids(self.name)): for unit in sorted(hookenv.related_units(rid)): reldata = hookenv.relation_get(rid=rid, unit=unit) if self._is_ready(reldata): ns.append(reldata)
Retrieve the relation data for each unit involved in a relation and, if complete, store it in a list under `self[self.name]`. This is automatically called when the RelationContext is instantiated. The units are sorted lexographically first by the service ID, then by the unit ID. Thus, if an interface has two other services, 'db:1' and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', and 'db:2' having one unit, 'mediawiki/0', all of which have a complete set of data, the relation data for the units will be stored in the order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. If you only care about a single unit on the relation, you can just access it as `{{ interface[0]['key'] }}`. However, if you can at all support multiple units on a relation, you should iterate over the list, like:: {% for unit in interface -%} {{ unit['key'] }}{% if not loop.last %},{% endif %} {%- endfor %} Note that since all sets of relation data from all related services and units are in a single list, if you need to know which service or unit a set of data came from, you'll need to extend this class to preserve that information.
Below is the the instruction that describes the task: ### Input: Retrieve the relation data for each unit involved in a relation and, if complete, store it in a list under `self[self.name]`. This is automatically called when the RelationContext is instantiated. The units are sorted lexographically first by the service ID, then by the unit ID. Thus, if an interface has two other services, 'db:1' and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', and 'db:2' having one unit, 'mediawiki/0', all of which have a complete set of data, the relation data for the units will be stored in the order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. If you only care about a single unit on the relation, you can just access it as `{{ interface[0]['key'] }}`. However, if you can at all support multiple units on a relation, you should iterate over the list, like:: {% for unit in interface -%} {{ unit['key'] }}{% if not loop.last %},{% endif %} {%- endfor %} Note that since all sets of relation data from all related services and units are in a single list, if you need to know which service or unit a set of data came from, you'll need to extend this class to preserve that information. ### Response: def get_data(self): """ Retrieve the relation data for each unit involved in a relation and, if complete, store it in a list under `self[self.name]`. This is automatically called when the RelationContext is instantiated. The units are sorted lexographically first by the service ID, then by the unit ID. Thus, if an interface has two other services, 'db:1' and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', and 'db:2' having one unit, 'mediawiki/0', all of which have a complete set of data, the relation data for the units will be stored in the order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. If you only care about a single unit on the relation, you can just access it as `{{ interface[0]['key'] }}`. However, if you can at all support multiple units on a relation, you should iterate over the list, like:: {% for unit in interface -%} {{ unit['key'] }}{% if not loop.last %},{% endif %} {%- endfor %} Note that since all sets of relation data from all related services and units are in a single list, if you need to know which service or unit a set of data came from, you'll need to extend this class to preserve that information. """ if not hookenv.relation_ids(self.name): return ns = self.setdefault(self.name, []) for rid in sorted(hookenv.relation_ids(self.name)): for unit in sorted(hookenv.related_units(rid)): reldata = hookenv.relation_get(rid=rid, unit=unit) if self._is_ready(reldata): ns.append(reldata)
def BorueErukhimovich_Powerlaw(q, C, r0, s, t, nu): """Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over. """ def get_xsep(alpha, s, t): A = alpha + 2 B = 2 * s * alpha + t * alpha + 4 * s C = s * t * alpha + alpha + alpha * s ** 2 + alpha * s * t - 2 + 2 * s ** 2 D = alpha * s ** 2 * t + alpha * s r = np.roots([A, B, C, D]) #print "get_xsep: ", alpha, s, t, r return r[r > 0][0] ** 0.5 get_B = lambda C, xsep, s, t, nu:C * (xsep ** 2 + s) / ((xsep ** 2 + s) * (xsep ** 2 + t) + 1) * xsep ** (1.0 / nu) x = q * r0 xsep = np.real_if_close(get_xsep(-1.0 / nu, s, t)) A = get_B(C, xsep, s, t, nu) return np.piecewise(q, (x < xsep, x >= xsep), (lambda a:BorueErukhimovich(a, C, r0, s, t), lambda a:A * (a * r0) ** (-1.0 / nu)))
Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over.
Below is the the instruction that describes the task: ### Input: Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over. ### Response: def BorueErukhimovich_Powerlaw(q, C, r0, s, t, nu): """Borue-Erukhimovich model ending in a power-law. Inputs: ------- ``q``: independent variable ``C``: scaling factor ``r0``: typical el.stat. screening length ``s``: dimensionless charge concentration ``t``: dimensionless temperature ``nu``: excluded volume parameter Formula: -------- ``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep`` ``A*q^(-1/nu)``if ``q>qsep`` ``A`` and ``qsep`` are determined from conditions of smoothness at the cross-over. """ def get_xsep(alpha, s, t): A = alpha + 2 B = 2 * s * alpha + t * alpha + 4 * s C = s * t * alpha + alpha + alpha * s ** 2 + alpha * s * t - 2 + 2 * s ** 2 D = alpha * s ** 2 * t + alpha * s r = np.roots([A, B, C, D]) #print "get_xsep: ", alpha, s, t, r return r[r > 0][0] ** 0.5 get_B = lambda C, xsep, s, t, nu:C * (xsep ** 2 + s) / ((xsep ** 2 + s) * (xsep ** 2 + t) + 1) * xsep ** (1.0 / nu) x = q * r0 xsep = np.real_if_close(get_xsep(-1.0 / nu, s, t)) A = get_B(C, xsep, s, t, nu) return np.piecewise(q, (x < xsep, x >= xsep), (lambda a:BorueErukhimovich(a, C, r0, s, t), lambda a:A * (a * r0) ** (-1.0 / nu)))
def dashboard_absent( name, hosts=None, profile='grafana'): ''' Ensure the named grafana dashboard is deleted. name Name of the grafana dashboard. profile A pillar key or dict that contains a list of hosts and an elasticsearch index to use. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} hosts, index = _parse_profile(profile) if not index: raise SaltInvocationError('index is a required key in the profile.') exists = __salt__['elasticsearch.exists']( index=index, id=name, doc_type='dashboard', hosts=hosts ) if exists: if __opts__['test']: ret['comment'] = 'Dashboard {0} is set to be removed.'.format( name ) return ret deleted = __salt__['elasticsearch.delete']( index=index, doc_type='dashboard', id=name, hosts=hosts ) if deleted: ret['result'] = True ret['changes']['old'] = name ret['changes']['new'] = None else: ret['result'] = False ret['comment'] = 'Failed to delete {0} dashboard.'.format(name) else: ret['result'] = True ret['comment'] = 'Dashboard {0} does not exist.'.format(name) return ret
Ensure the named grafana dashboard is deleted. name Name of the grafana dashboard. profile A pillar key or dict that contains a list of hosts and an elasticsearch index to use.
Below is the the instruction that describes the task: ### Input: Ensure the named grafana dashboard is deleted. name Name of the grafana dashboard. profile A pillar key or dict that contains a list of hosts and an elasticsearch index to use. ### Response: def dashboard_absent( name, hosts=None, profile='grafana'): ''' Ensure the named grafana dashboard is deleted. name Name of the grafana dashboard. profile A pillar key or dict that contains a list of hosts and an elasticsearch index to use. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} hosts, index = _parse_profile(profile) if not index: raise SaltInvocationError('index is a required key in the profile.') exists = __salt__['elasticsearch.exists']( index=index, id=name, doc_type='dashboard', hosts=hosts ) if exists: if __opts__['test']: ret['comment'] = 'Dashboard {0} is set to be removed.'.format( name ) return ret deleted = __salt__['elasticsearch.delete']( index=index, doc_type='dashboard', id=name, hosts=hosts ) if deleted: ret['result'] = True ret['changes']['old'] = name ret['changes']['new'] = None else: ret['result'] = False ret['comment'] = 'Failed to delete {0} dashboard.'.format(name) else: ret['result'] = True ret['comment'] = 'Dashboard {0} does not exist.'.format(name) return ret
def ReplaceItem(self, document_link, new_document, options=None): """Replaces a document and returns it. :param str document_link: The link to the document. :param dict new_document: :param dict options: The request options for the request. :return: The new Document. :rtype: dict """ CosmosClient.__ValidateResource(new_document) path = base.GetPathFromLink(document_link) document_id = base.GetResourceIdOrFullNameFromLink(document_link) # Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby). # This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well. # So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the function so that it remains local # For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/ if options is None: options = {} # Extract the document collection link and add the partition key to options collection_link = base.GetItemContainerLink(document_link) options = self._AddPartitionKey(collection_link, new_document, options) return self.Replace(new_document, path, 'docs', document_id, None, options)
Replaces a document and returns it. :param str document_link: The link to the document. :param dict new_document: :param dict options: The request options for the request. :return: The new Document. :rtype: dict
Below is the the instruction that describes the task: ### Input: Replaces a document and returns it. :param str document_link: The link to the document. :param dict new_document: :param dict options: The request options for the request. :return: The new Document. :rtype: dict ### Response: def ReplaceItem(self, document_link, new_document, options=None): """Replaces a document and returns it. :param str document_link: The link to the document. :param dict new_document: :param dict options: The request options for the request. :return: The new Document. :rtype: dict """ CosmosClient.__ValidateResource(new_document) path = base.GetPathFromLink(document_link) document_id = base.GetResourceIdOrFullNameFromLink(document_link) # Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby). # This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well. # So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the function so that it remains local # For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/ if options is None: options = {} # Extract the document collection link and add the partition key to options collection_link = base.GetItemContainerLink(document_link) options = self._AddPartitionKey(collection_link, new_document, options) return self.Replace(new_document, path, 'docs', document_id, None, options)
def filter(args): """ %prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features """ p = OptionParser(filter.__doc__) p.add_option("--type", default="mRNA", help="The feature to scan for the attributes [default: %default]") g1 = OptionGroup(p, "Filter by identity/coverage attribute values") g1.add_option("--id", default=95, type="float", help="Minimum identity [default: %default]") g1.add_option("--coverage", default=90, type="float", help="Minimum coverage [default: %default]") g1.add_option("--nocase", default=False, action="store_true", help="Case insensitive lookup of attribute names [default: %default]") p.add_option_group(g1) g2 = OptionGroup(p, "Filter by child feature bp length") g2.add_option("--child_ftype", default=None, type="str", help="Child featuretype to consider") g2.add_option("--child_bp", default=None, type="int", help="Filter by total bp of children of chosen ftype") p.add_option_group(g2) p.set_outfile() opts, args = p.parse_args(args) otype, oid, ocov = opts.type, opts.id, opts.coverage cftype, clenbp = opts.child_ftype, opts.child_bp id_attr, cov_attr = "Identity", "Coverage" if opts.nocase: id_attr, cov_attr = id_attr.lower(), cov_attr.lower() if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gffdb = make_index(gffile) bad = set() ptype = None for g in gffdb.features_of_type(otype, order_by=('seqid', 'start')): if not ptype: parent = list(gffdb.parents(g)) ptype = parent[0].featuretype \ if len(parent) > 0 else otype if cftype and clenbp: if gffdb.children_bp(g, child_featuretype=cftype) < clenbp: bad.add(g.id) elif oid and ocov: identity = float(g.attributes[id_attr][0]) coverage = float(g.attributes[cov_attr][0]) if identity < oid or coverage < ocov: bad.add(g.id) logging.debug("{0} bad accns marked.".format(len(bad))) fw = must_open(opts.outfile, "w") for g in gffdb.features_of_type(ptype, order_by=('seqid', 'start')): if ptype != otype: feats = list(gffdb.children(g, featuretype=otype, order_by=('start'))) ok_feats = [f for f in feats if f.id not in bad] if len(ok_feats) > 0: print(g, file=fw) for feat in ok_feats: print(feat, file=fw) for child in gffdb.children(feat, order_by=('start')): print(child, file=fw) else: if g.id not in bad: print(g, file=fw) for child in gffdb.children(g, order_by=('start')): print(child, file=fw) fw.close()
%prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features
Below is the the instruction that describes the task: ### Input: %prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features ### Response: def filter(args): """ %prog filter gffile > filtered.gff Filter the gff file based on criteria below: (1) feature attribute values: [Identity, Coverage]. You can get this type of gff by using gmap $ gmap -f 2 .... (2) Total bp length of child features """ p = OptionParser(filter.__doc__) p.add_option("--type", default="mRNA", help="The feature to scan for the attributes [default: %default]") g1 = OptionGroup(p, "Filter by identity/coverage attribute values") g1.add_option("--id", default=95, type="float", help="Minimum identity [default: %default]") g1.add_option("--coverage", default=90, type="float", help="Minimum coverage [default: %default]") g1.add_option("--nocase", default=False, action="store_true", help="Case insensitive lookup of attribute names [default: %default]") p.add_option_group(g1) g2 = OptionGroup(p, "Filter by child feature bp length") g2.add_option("--child_ftype", default=None, type="str", help="Child featuretype to consider") g2.add_option("--child_bp", default=None, type="int", help="Filter by total bp of children of chosen ftype") p.add_option_group(g2) p.set_outfile() opts, args = p.parse_args(args) otype, oid, ocov = opts.type, opts.id, opts.coverage cftype, clenbp = opts.child_ftype, opts.child_bp id_attr, cov_attr = "Identity", "Coverage" if opts.nocase: id_attr, cov_attr = id_attr.lower(), cov_attr.lower() if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gffdb = make_index(gffile) bad = set() ptype = None for g in gffdb.features_of_type(otype, order_by=('seqid', 'start')): if not ptype: parent = list(gffdb.parents(g)) ptype = parent[0].featuretype \ if len(parent) > 0 else otype if cftype and clenbp: if gffdb.children_bp(g, child_featuretype=cftype) < clenbp: bad.add(g.id) elif oid and ocov: identity = float(g.attributes[id_attr][0]) coverage = float(g.attributes[cov_attr][0]) if identity < oid or coverage < ocov: bad.add(g.id) logging.debug("{0} bad accns marked.".format(len(bad))) fw = must_open(opts.outfile, "w") for g in gffdb.features_of_type(ptype, order_by=('seqid', 'start')): if ptype != otype: feats = list(gffdb.children(g, featuretype=otype, order_by=('start'))) ok_feats = [f for f in feats if f.id not in bad] if len(ok_feats) > 0: print(g, file=fw) for feat in ok_feats: print(feat, file=fw) for child in gffdb.children(feat, order_by=('start')): print(child, file=fw) else: if g.id not in bad: print(g, file=fw) for child in gffdb.children(g, order_by=('start')): print(child, file=fw) fw.close()
def _ratelimit(self, http_method, url, **kwargs): """ Ensure we do not hit the rate limit. """ def time_since_last_call(): if self.callsafety['lastcalltime'] is not None: return int(time() - self.callsafety['lastcalltime']) else: return None lastlimitremaining = self.callsafety['lastlimitremaining'] if time_since_last_call() is None or time_since_last_call() >= self.ratelimit_request_interval or \ lastlimitremaining >= self.ratelimit: response = http_method(url, **kwargs) else: # We hit our limit floor and aren't quite at ratelimit_request_interval value in seconds yet.. log.warning( "Safety Limit Reached of %s remaining calls and time since last call is under %s seconds" % (self.ratelimit, self.ratelimit_request_interval) ) while time_since_last_call() < self.ratelimit_request_interval: remaining_sleep = int(self.ratelimit_request_interval - time_since_last_call()) log.debug(" -> sleeping: %s more seconds" % remaining_sleep) self.check_ratelimit_budget(1) sleep(1) response = http_method(url, **kwargs) self.callsafety['lastcalltime'] = time() self.callsafety['lastlimitremaining'] = int(response.headers.get('X-Rate-Limit-Remaining', 0)) return response
Ensure we do not hit the rate limit.
Below is the the instruction that describes the task: ### Input: Ensure we do not hit the rate limit. ### Response: def _ratelimit(self, http_method, url, **kwargs): """ Ensure we do not hit the rate limit. """ def time_since_last_call(): if self.callsafety['lastcalltime'] is not None: return int(time() - self.callsafety['lastcalltime']) else: return None lastlimitremaining = self.callsafety['lastlimitremaining'] if time_since_last_call() is None or time_since_last_call() >= self.ratelimit_request_interval or \ lastlimitremaining >= self.ratelimit: response = http_method(url, **kwargs) else: # We hit our limit floor and aren't quite at ratelimit_request_interval value in seconds yet.. log.warning( "Safety Limit Reached of %s remaining calls and time since last call is under %s seconds" % (self.ratelimit, self.ratelimit_request_interval) ) while time_since_last_call() < self.ratelimit_request_interval: remaining_sleep = int(self.ratelimit_request_interval - time_since_last_call()) log.debug(" -> sleeping: %s more seconds" % remaining_sleep) self.check_ratelimit_budget(1) sleep(1) response = http_method(url, **kwargs) self.callsafety['lastcalltime'] = time() self.callsafety['lastlimitremaining'] = int(response.headers.get('X-Rate-Limit-Remaining', 0)) return response
def pad_for_tpu(shapes_dict, hparams, max_length): """Pads unknown features' dimensions for TPU.""" padded_shapes = {} def get_filler(specified_max_length): if not specified_max_length: return max_length return min(specified_max_length, max_length) inputs_none_filler = get_filler(hparams.max_input_seq_length) targets_none_filler = get_filler(hparams.max_target_seq_length) def pad_one_shape(shape, none_filler): return [ (dim if dim is not None else none_filler) for dim in shape.as_list() ] for key, shape in six.iteritems(shapes_dict): if key == "inputs": padded_shapes[key] = pad_one_shape(shape, inputs_none_filler) elif key == "targets": padded_shapes[key] = pad_one_shape(shape, targets_none_filler) else: padded_shapes[key] = pad_one_shape(shape, max_length) return padded_shapes
Pads unknown features' dimensions for TPU.
Below is the the instruction that describes the task: ### Input: Pads unknown features' dimensions for TPU. ### Response: def pad_for_tpu(shapes_dict, hparams, max_length): """Pads unknown features' dimensions for TPU.""" padded_shapes = {} def get_filler(specified_max_length): if not specified_max_length: return max_length return min(specified_max_length, max_length) inputs_none_filler = get_filler(hparams.max_input_seq_length) targets_none_filler = get_filler(hparams.max_target_seq_length) def pad_one_shape(shape, none_filler): return [ (dim if dim is not None else none_filler) for dim in shape.as_list() ] for key, shape in six.iteritems(shapes_dict): if key == "inputs": padded_shapes[key] = pad_one_shape(shape, inputs_none_filler) elif key == "targets": padded_shapes[key] = pad_one_shape(shape, targets_none_filler) else: padded_shapes[key] = pad_one_shape(shape, max_length) return padded_shapes
def accept_freeware_license(): '''different Eagle versions need differnt TAB count. 6.5 -> 2 6.6 -> 3 7.4 -> 2 ''' ntab = 3 if version().startswith('6.6.') else 2 for _ in range(ntab): EasyProcess('xdotool key KP_Tab').call() time.sleep(0.5) EasyProcess('xdotool key KP_Space').call() time.sleep(0.5) # say OK to any more question EasyProcess('xdotool key KP_Space').call()
different Eagle versions need differnt TAB count. 6.5 -> 2 6.6 -> 3 7.4 -> 2
Below is the the instruction that describes the task: ### Input: different Eagle versions need differnt TAB count. 6.5 -> 2 6.6 -> 3 7.4 -> 2 ### Response: def accept_freeware_license(): '''different Eagle versions need differnt TAB count. 6.5 -> 2 6.6 -> 3 7.4 -> 2 ''' ntab = 3 if version().startswith('6.6.') else 2 for _ in range(ntab): EasyProcess('xdotool key KP_Tab').call() time.sleep(0.5) EasyProcess('xdotool key KP_Space').call() time.sleep(0.5) # say OK to any more question EasyProcess('xdotool key KP_Space').call()
def path(self, *paths, **kwargs): """Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path """ return self.__class__(self.__root__, *paths, **kwargs)
Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path
Below is the the instruction that describes the task: ### Input: Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path ### Response: def path(self, *paths, **kwargs): """Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path """ return self.__class__(self.__root__, *paths, **kwargs)
def get_user_group(user_group): """ Formats a user and group in the format ``user:group``, as needed for `chown`. If user_group is a tuple, this is used for the fomatting. If a string or integer is given, it will be formatted as ``user:user``. Otherwise the input is returned - this method does not perform any more checks. :param user_group: User name, user id, user and group in format ``user:group``, ``user_id:group_id``, or tuple of ``(user, group)``. :type user_group: unicode | str | int | tuple :return: Formatted string with in the format ``user:group``. :rtype: unicode | str """ if isinstance(user_group, tuple): return '{0}:{1}'.format(*user_group) elif isinstance(user_group, six.integer_types) or ':' not in user_group: return '{0}:{0}'.format(user_group) return user_group
Formats a user and group in the format ``user:group``, as needed for `chown`. If user_group is a tuple, this is used for the fomatting. If a string or integer is given, it will be formatted as ``user:user``. Otherwise the input is returned - this method does not perform any more checks. :param user_group: User name, user id, user and group in format ``user:group``, ``user_id:group_id``, or tuple of ``(user, group)``. :type user_group: unicode | str | int | tuple :return: Formatted string with in the format ``user:group``. :rtype: unicode | str
Below is the the instruction that describes the task: ### Input: Formats a user and group in the format ``user:group``, as needed for `chown`. If user_group is a tuple, this is used for the fomatting. If a string or integer is given, it will be formatted as ``user:user``. Otherwise the input is returned - this method does not perform any more checks. :param user_group: User name, user id, user and group in format ``user:group``, ``user_id:group_id``, or tuple of ``(user, group)``. :type user_group: unicode | str | int | tuple :return: Formatted string with in the format ``user:group``. :rtype: unicode | str ### Response: def get_user_group(user_group): """ Formats a user and group in the format ``user:group``, as needed for `chown`. If user_group is a tuple, this is used for the fomatting. If a string or integer is given, it will be formatted as ``user:user``. Otherwise the input is returned - this method does not perform any more checks. :param user_group: User name, user id, user and group in format ``user:group``, ``user_id:group_id``, or tuple of ``(user, group)``. :type user_group: unicode | str | int | tuple :return: Formatted string with in the format ``user:group``. :rtype: unicode | str """ if isinstance(user_group, tuple): return '{0}:{1}'.format(*user_group) elif isinstance(user_group, six.integer_types) or ':' not in user_group: return '{0}:{0}'.format(user_group) return user_group
def capakey_rest_gateway_request(url, headers={}, params={}): ''' Utility function that helps making requests to the CAPAKEY REST service. :param string url: URL to request. :param dict headers: Headers to send with the URL. :param dict params: Parameters to send with the URL. :returns: Result of the call. ''' try: res = requests.get(url, headers=headers, params=params) res.raise_for_status() return res except requests.ConnectionError as ce: raise GatewayRuntimeException( 'Could not execute request due to connection problems:\n%s' % repr(ce), ce ) except requests.HTTPError as he: raise GatewayResourceNotFoundException() except requests.RequestException as re: raise GatewayRuntimeException( 'Could not execute request due to:\n%s' % repr(re), re )
Utility function that helps making requests to the CAPAKEY REST service. :param string url: URL to request. :param dict headers: Headers to send with the URL. :param dict params: Parameters to send with the URL. :returns: Result of the call.
Below is the the instruction that describes the task: ### Input: Utility function that helps making requests to the CAPAKEY REST service. :param string url: URL to request. :param dict headers: Headers to send with the URL. :param dict params: Parameters to send with the URL. :returns: Result of the call. ### Response: def capakey_rest_gateway_request(url, headers={}, params={}): ''' Utility function that helps making requests to the CAPAKEY REST service. :param string url: URL to request. :param dict headers: Headers to send with the URL. :param dict params: Parameters to send with the URL. :returns: Result of the call. ''' try: res = requests.get(url, headers=headers, params=params) res.raise_for_status() return res except requests.ConnectionError as ce: raise GatewayRuntimeException( 'Could not execute request due to connection problems:\n%s' % repr(ce), ce ) except requests.HTTPError as he: raise GatewayResourceNotFoundException() except requests.RequestException as re: raise GatewayRuntimeException( 'Could not execute request due to:\n%s' % repr(re), re )
def get_job_image_info(project: 'Project', job: Any) -> Tuple[str, str]: """Return the image name and image tag for a job""" project_name = project.name repo_name = project_name image_name = '{}/{}'.format(conf.get('REGISTRY_URI'), repo_name) try: last_commit = project.repo.last_commit except ValueError: raise ValueError('Repo was not found for project `{}`.'.format(project)) return image_name, last_commit[0]
Return the image name and image tag for a job
Below is the the instruction that describes the task: ### Input: Return the image name and image tag for a job ### Response: def get_job_image_info(project: 'Project', job: Any) -> Tuple[str, str]: """Return the image name and image tag for a job""" project_name = project.name repo_name = project_name image_name = '{}/{}'.format(conf.get('REGISTRY_URI'), repo_name) try: last_commit = project.repo.last_commit except ValueError: raise ValueError('Repo was not found for project `{}`.'.format(project)) return image_name, last_commit[0]
def process_resource(self, req, resp, resource): """ Process the request after routing. Deserializer selection needs a resource to determine which deserializers are allowed. If a deserializer is required then it will be initialized & added to the request object for further processing. """ if req.content_required and resource: allowed = resource.deserializer_mimetypes if req.content_length in (None, 0): abort(EmptyRequestBody) elif req.content_type not in allowed: abort(ContentTypeUnsupported(allowed)) else: deserializer = self._get_deserializer(req.content_type) req.deserializer = deserializer(req, resp)
Process the request after routing. Deserializer selection needs a resource to determine which deserializers are allowed. If a deserializer is required then it will be initialized & added to the request object for further processing.
Below is the the instruction that describes the task: ### Input: Process the request after routing. Deserializer selection needs a resource to determine which deserializers are allowed. If a deserializer is required then it will be initialized & added to the request object for further processing. ### Response: def process_resource(self, req, resp, resource): """ Process the request after routing. Deserializer selection needs a resource to determine which deserializers are allowed. If a deserializer is required then it will be initialized & added to the request object for further processing. """ if req.content_required and resource: allowed = resource.deserializer_mimetypes if req.content_length in (None, 0): abort(EmptyRequestBody) elif req.content_type not in allowed: abort(ContentTypeUnsupported(allowed)) else: deserializer = self._get_deserializer(req.content_type) req.deserializer = deserializer(req, resp)
def _retrieve_and_validate_certificate_chain(self, cert_url): # type: (str) -> Certificate """Retrieve and validate certificate chain. This method validates if the URL is valid and loads and validates the certificate chain, before returning it. :param cert_url: URL for retrieving certificate chain :type cert_url: str :return The certificate chain loaded from the URL :rtype cryptography.x509.Certificate :raises: :py:class:`VerificationException` if the URL is invalid, if the loaded certificate chain is invalid """ self._validate_certificate_url(cert_url) cert_chain = self._load_cert_chain(cert_url) self._validate_cert_chain(cert_chain) return cert_chain
Retrieve and validate certificate chain. This method validates if the URL is valid and loads and validates the certificate chain, before returning it. :param cert_url: URL for retrieving certificate chain :type cert_url: str :return The certificate chain loaded from the URL :rtype cryptography.x509.Certificate :raises: :py:class:`VerificationException` if the URL is invalid, if the loaded certificate chain is invalid
Below is the the instruction that describes the task: ### Input: Retrieve and validate certificate chain. This method validates if the URL is valid and loads and validates the certificate chain, before returning it. :param cert_url: URL for retrieving certificate chain :type cert_url: str :return The certificate chain loaded from the URL :rtype cryptography.x509.Certificate :raises: :py:class:`VerificationException` if the URL is invalid, if the loaded certificate chain is invalid ### Response: def _retrieve_and_validate_certificate_chain(self, cert_url): # type: (str) -> Certificate """Retrieve and validate certificate chain. This method validates if the URL is valid and loads and validates the certificate chain, before returning it. :param cert_url: URL for retrieving certificate chain :type cert_url: str :return The certificate chain loaded from the URL :rtype cryptography.x509.Certificate :raises: :py:class:`VerificationException` if the URL is invalid, if the loaded certificate chain is invalid """ self._validate_certificate_url(cert_url) cert_chain = self._load_cert_chain(cert_url) self._validate_cert_chain(cert_chain) return cert_chain
def _get_c_valid_arr(self, x): """ Warning! Interpretation of axes is different for C code. """ orig_shape = x.shape x = np.ascontiguousarray(x.reshape(orig_shape[0], -1).T) return orig_shape, x
Warning! Interpretation of axes is different for C code.
Below is the the instruction that describes the task: ### Input: Warning! Interpretation of axes is different for C code. ### Response: def _get_c_valid_arr(self, x): """ Warning! Interpretation of axes is different for C code. """ orig_shape = x.shape x = np.ascontiguousarray(x.reshape(orig_shape[0], -1).T) return orig_shape, x
def barh(d, plt, title=None): """A convenience function for plotting a horizontal bar plot from a Counter""" labels = sorted(d, key=d.get) index = range(len(labels)) plt.yticks(index, labels) plt.barh(index, [d[v] for v in labels]) if title is not None: plt.title(title)
A convenience function for plotting a horizontal bar plot from a Counter
Below is the the instruction that describes the task: ### Input: A convenience function for plotting a horizontal bar plot from a Counter ### Response: def barh(d, plt, title=None): """A convenience function for plotting a horizontal bar plot from a Counter""" labels = sorted(d, key=d.get) index = range(len(labels)) plt.yticks(index, labels) plt.barh(index, [d[v] for v in labels]) if title is not None: plt.title(title)
def tracking_event_post(node_id): """Enqueue a TrackingEvent worker for the specified Node. """ details = request_parameter(parameter="details", optional=True) if details: details = loads(details) # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info POST, node does not exist") db.logger.debug( "rq: Queueing %s with for node: %s for worker_function", "TrackingEvent", node_id, ) q.enqueue( worker_function, "TrackingEvent", None, None, node_id=node_id, details=details ) return success_response(details=details)
Enqueue a TrackingEvent worker for the specified Node.
Below is the the instruction that describes the task: ### Input: Enqueue a TrackingEvent worker for the specified Node. ### Response: def tracking_event_post(node_id): """Enqueue a TrackingEvent worker for the specified Node. """ details = request_parameter(parameter="details", optional=True) if details: details = loads(details) # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info POST, node does not exist") db.logger.debug( "rq: Queueing %s with for node: %s for worker_function", "TrackingEvent", node_id, ) q.enqueue( worker_function, "TrackingEvent", None, None, node_id=node_id, details=details ) return success_response(details=details)
def _setup_incorporate(self): """Helper to setup incorporate functions :return (transcript sequence, cds start [1-based], cds stop [1-based], cds start index in seq [inc, 0-based], cds end index in seq [excl, 0-based]) :rtype (list, int, int, int, int) """ seq = list(self._transcript_data.transcript_sequence) # get initial start/end points; will modify these based on the variant length cds_start = self._transcript_data.cds_start cds_stop = self._transcript_data.cds_stop start_end = [] for pos in (self._var_c.posedit.pos.start, self._var_c.posedit.pos.end): # list is zero-based; seq pos is 1-based if pos.datum == Datum.CDS_START: if pos.base < 0: # 5' UTR result = cds_start - 1 else: # cds/intron if pos.offset <= 0: result = (cds_start - 1) + pos.base - 1 else: result = (cds_start - 1) + pos.base elif pos.datum == Datum.CDS_END: # 3' UTR result = cds_stop + pos.base - 1 else: raise NotImplementedError("Unsupported/unexpected location") start_end.append(result) # unpack; increment end by 1 (0-based exclusive) (start, end) = start_end end += 1 if DBG: print("len seq:{} cds_start:{} cds_stop:{} start:{} end:{}".format( len(seq), cds_start, cds_stop, start, end)) return seq, cds_start, cds_stop, start, end
Helper to setup incorporate functions :return (transcript sequence, cds start [1-based], cds stop [1-based], cds start index in seq [inc, 0-based], cds end index in seq [excl, 0-based]) :rtype (list, int, int, int, int)
Below is the the instruction that describes the task: ### Input: Helper to setup incorporate functions :return (transcript sequence, cds start [1-based], cds stop [1-based], cds start index in seq [inc, 0-based], cds end index in seq [excl, 0-based]) :rtype (list, int, int, int, int) ### Response: def _setup_incorporate(self): """Helper to setup incorporate functions :return (transcript sequence, cds start [1-based], cds stop [1-based], cds start index in seq [inc, 0-based], cds end index in seq [excl, 0-based]) :rtype (list, int, int, int, int) """ seq = list(self._transcript_data.transcript_sequence) # get initial start/end points; will modify these based on the variant length cds_start = self._transcript_data.cds_start cds_stop = self._transcript_data.cds_stop start_end = [] for pos in (self._var_c.posedit.pos.start, self._var_c.posedit.pos.end): # list is zero-based; seq pos is 1-based if pos.datum == Datum.CDS_START: if pos.base < 0: # 5' UTR result = cds_start - 1 else: # cds/intron if pos.offset <= 0: result = (cds_start - 1) + pos.base - 1 else: result = (cds_start - 1) + pos.base elif pos.datum == Datum.CDS_END: # 3' UTR result = cds_stop + pos.base - 1 else: raise NotImplementedError("Unsupported/unexpected location") start_end.append(result) # unpack; increment end by 1 (0-based exclusive) (start, end) = start_end end += 1 if DBG: print("len seq:{} cds_start:{} cds_stop:{} start:{} end:{}".format( len(seq), cds_start, cds_stop, start, end)) return seq, cds_start, cds_stop, start, end
def dock(self, other): ''' Subtract another conc from this one. This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF, then logically ABCDEF - DEF = ABC. ''' # e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6] len=7 # e.g. other has mults at indices [0, 1, 2] len=3 new = list(self.mults) for i in reversed(range(len(other.mults))): # [2, 1, 0] # e.g. i = 1, j = 7 - 3 + 1 = 5 j = len(self.mults) - len(other.mults) + i new[j] = new[j].dock(other.mults[i]) if new[j].multiplier == zero: # omit that mult entirely since it has been factored out del new[j] # If the subtraction is incomplete but there is more to # other.mults, then we have a problem. For example, "ABC{2} - BC" # subtracts the C successfully but leaves something behind, # then tries to subtract the B too, which isn't possible else: if i != 0: raise Exception("Can't subtract " + repr(other) + " from " + repr(self)) return conc(*new)
Subtract another conc from this one. This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF, then logically ABCDEF - DEF = ABC.
Below is the the instruction that describes the task: ### Input: Subtract another conc from this one. This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF, then logically ABCDEF - DEF = ABC. ### Response: def dock(self, other): ''' Subtract another conc from this one. This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF, then logically ABCDEF - DEF = ABC. ''' # e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6] len=7 # e.g. other has mults at indices [0, 1, 2] len=3 new = list(self.mults) for i in reversed(range(len(other.mults))): # [2, 1, 0] # e.g. i = 1, j = 7 - 3 + 1 = 5 j = len(self.mults) - len(other.mults) + i new[j] = new[j].dock(other.mults[i]) if new[j].multiplier == zero: # omit that mult entirely since it has been factored out del new[j] # If the subtraction is incomplete but there is more to # other.mults, then we have a problem. For example, "ABC{2} - BC" # subtracts the C successfully but leaves something behind, # then tries to subtract the B too, which isn't possible else: if i != 0: raise Exception("Can't subtract " + repr(other) + " from " + repr(self)) return conc(*new)
def get_entries(self, start=0, end=0, data_request=None, steam_ids=None): """Get leaderboard entries. :param start: start entry, not index (e.g. rank 1 is ``start=1``) :type start: :class:`int` :param end: end entry, not index (e.g. only one entry then ``start=1,end=1``) :type end: :class:`int` :param data_request: data being requested :type data_request: :class:`steam.enums.common.ELeaderboardDataRequest` :param steam_ids: list of steam ids when using :prop:`.ELeaderboardDataRequest.Users` :type steamids: :class:`list` :return: a list of entries, see ``CMsgClientLBSGetLBEntriesResponse`` :rtype: :class:`list` :raises: :class:`LookupError` on message timeout or error """ message = MsgProto(EMsg.ClientLBSGetLBEntries) message.body.app_id = self.app_id message.body.leaderboard_id = self.id message.body.range_start = start message.body.range_end = end message.body.leaderboard_data_request = self.data_request if data_request is None else data_request if steam_ids: message.body.steamids.extend(steam_ids) resp = self._steam.send_job_and_wait(message, timeout=15) if not resp: raise LookupError("Didn't receive response within 15seconds :(") if resp.eresult != EResult.OK: raise LookupError(EResult(resp.eresult)) if resp.HasField('leaderboard_entry_count'): self.entry_count = resp.leaderboard_entry_count return resp.entries
Get leaderboard entries. :param start: start entry, not index (e.g. rank 1 is ``start=1``) :type start: :class:`int` :param end: end entry, not index (e.g. only one entry then ``start=1,end=1``) :type end: :class:`int` :param data_request: data being requested :type data_request: :class:`steam.enums.common.ELeaderboardDataRequest` :param steam_ids: list of steam ids when using :prop:`.ELeaderboardDataRequest.Users` :type steamids: :class:`list` :return: a list of entries, see ``CMsgClientLBSGetLBEntriesResponse`` :rtype: :class:`list` :raises: :class:`LookupError` on message timeout or error
Below is the the instruction that describes the task: ### Input: Get leaderboard entries. :param start: start entry, not index (e.g. rank 1 is ``start=1``) :type start: :class:`int` :param end: end entry, not index (e.g. only one entry then ``start=1,end=1``) :type end: :class:`int` :param data_request: data being requested :type data_request: :class:`steam.enums.common.ELeaderboardDataRequest` :param steam_ids: list of steam ids when using :prop:`.ELeaderboardDataRequest.Users` :type steamids: :class:`list` :return: a list of entries, see ``CMsgClientLBSGetLBEntriesResponse`` :rtype: :class:`list` :raises: :class:`LookupError` on message timeout or error ### Response: def get_entries(self, start=0, end=0, data_request=None, steam_ids=None): """Get leaderboard entries. :param start: start entry, not index (e.g. rank 1 is ``start=1``) :type start: :class:`int` :param end: end entry, not index (e.g. only one entry then ``start=1,end=1``) :type end: :class:`int` :param data_request: data being requested :type data_request: :class:`steam.enums.common.ELeaderboardDataRequest` :param steam_ids: list of steam ids when using :prop:`.ELeaderboardDataRequest.Users` :type steamids: :class:`list` :return: a list of entries, see ``CMsgClientLBSGetLBEntriesResponse`` :rtype: :class:`list` :raises: :class:`LookupError` on message timeout or error """ message = MsgProto(EMsg.ClientLBSGetLBEntries) message.body.app_id = self.app_id message.body.leaderboard_id = self.id message.body.range_start = start message.body.range_end = end message.body.leaderboard_data_request = self.data_request if data_request is None else data_request if steam_ids: message.body.steamids.extend(steam_ids) resp = self._steam.send_job_and_wait(message, timeout=15) if not resp: raise LookupError("Didn't receive response within 15seconds :(") if resp.eresult != EResult.OK: raise LookupError(EResult(resp.eresult)) if resp.HasField('leaderboard_entry_count'): self.entry_count = resp.leaderboard_entry_count return resp.entries
def add_mandates(self, representative, rep_json): ''' Create mandates from rep data based on variant configuration ''' # Mandate in country group for party constituency if rep_json.get('parti_ratt_financier'): constituency, _ = Constituency.objects.get_or_create( name=rep_json.get('parti_ratt_financier'), country=self.france) group, _ = self.touch_model(model=Group, abbreviation=self.france.code, kind='country', name=self.france.name) _create_mandate(representative, group, constituency, 'membre') # Configurable mandates for mdef in self.variant['mandates']: if mdef.get('chamber', False): chamber = self.chamber else: chamber = None if 'from' in mdef: elems = mdef['from'](rep_json) else: elems = [rep_json] for elem in elems: name = _get_mdef_item(mdef, 'name', elem, '') abbr = _get_mdef_item(mdef, 'abbr', elem, '') group, _ = self.touch_model(model=Group, abbreviation=abbr, kind=mdef['kind'], chamber=chamber, name=name) role = _get_mdef_item(mdef, 'role', elem, 'membre') start = _get_mdef_item(mdef, 'start', elem, None) if start is not None: start = _parse_date(start) end = _get_mdef_item(mdef, 'end', elem, None) if end is not None: end = _parse_date(end) _create_mandate(representative, group, self.ch_constituency, role, start, end) logger.debug( '%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'], mdef['kind'], role, name, abbr, start, end))
Create mandates from rep data based on variant configuration
Below is the the instruction that describes the task: ### Input: Create mandates from rep data based on variant configuration ### Response: def add_mandates(self, representative, rep_json): ''' Create mandates from rep data based on variant configuration ''' # Mandate in country group for party constituency if rep_json.get('parti_ratt_financier'): constituency, _ = Constituency.objects.get_or_create( name=rep_json.get('parti_ratt_financier'), country=self.france) group, _ = self.touch_model(model=Group, abbreviation=self.france.code, kind='country', name=self.france.name) _create_mandate(representative, group, constituency, 'membre') # Configurable mandates for mdef in self.variant['mandates']: if mdef.get('chamber', False): chamber = self.chamber else: chamber = None if 'from' in mdef: elems = mdef['from'](rep_json) else: elems = [rep_json] for elem in elems: name = _get_mdef_item(mdef, 'name', elem, '') abbr = _get_mdef_item(mdef, 'abbr', elem, '') group, _ = self.touch_model(model=Group, abbreviation=abbr, kind=mdef['kind'], chamber=chamber, name=name) role = _get_mdef_item(mdef, 'role', elem, 'membre') start = _get_mdef_item(mdef, 'start', elem, None) if start is not None: start = _parse_date(start) end = _get_mdef_item(mdef, 'end', elem, None) if end is not None: end = _parse_date(end) _create_mandate(representative, group, self.ch_constituency, role, start, end) logger.debug( '%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'], mdef['kind'], role, name, abbr, start, end))
def updated(bank, key, cachedir): ''' Return the epoch of the mtime for this cache file ''' key_file = os.path.join(cachedir, os.path.normpath(bank), '{0}.p'.format(key)) if not os.path.isfile(key_file): log.warning('Cache file "%s" does not exist', key_file) return None try: return int(os.path.getmtime(key_file)) except IOError as exc: raise SaltCacheError( 'There was an error reading the mtime for "{0}": {1}'.format( key_file, exc ) )
Return the epoch of the mtime for this cache file
Below is the the instruction that describes the task: ### Input: Return the epoch of the mtime for this cache file ### Response: def updated(bank, key, cachedir): ''' Return the epoch of the mtime for this cache file ''' key_file = os.path.join(cachedir, os.path.normpath(bank), '{0}.p'.format(key)) if not os.path.isfile(key_file): log.warning('Cache file "%s" does not exist', key_file) return None try: return int(os.path.getmtime(key_file)) except IOError as exc: raise SaltCacheError( 'There was an error reading the mtime for "{0}": {1}'.format( key_file, exc ) )
def upload(self, path, docs, **params): """ A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs. """ json_data = json.dumps(list(docs)) return self.post_data(path, json_data, 'application/json', **params)
A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs.
Below is the the instruction that describes the task: ### Input: A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs. ### Response: def upload(self, path, docs, **params): """ A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs. """ json_data = json.dumps(list(docs)) return self.post_data(path, json_data, 'application/json', **params)
def generate_checks(fact): """Given a fact, generate a list of Check objects for checking it.""" yield TypeCheck(type(fact)) fact_captured = False for key, value in fact.items(): if (isinstance(key, str) and key.startswith('__') and key.endswith('__')): # Special fact feature if key == '__bind__': yield FactCapture(value) fact_captured = True else: # pragma: no cover yield FeatureCheck(key, value) else: yield FeatureCheck(key, value) # Assign the matching fact to the context if not fact_captured: yield FactCapture("__pattern_%s__" % id(fact))
Given a fact, generate a list of Check objects for checking it.
Below is the the instruction that describes the task: ### Input: Given a fact, generate a list of Check objects for checking it. ### Response: def generate_checks(fact): """Given a fact, generate a list of Check objects for checking it.""" yield TypeCheck(type(fact)) fact_captured = False for key, value in fact.items(): if (isinstance(key, str) and key.startswith('__') and key.endswith('__')): # Special fact feature if key == '__bind__': yield FactCapture(value) fact_captured = True else: # pragma: no cover yield FeatureCheck(key, value) else: yield FeatureCheck(key, value) # Assign the matching fact to the context if not fact_captured: yield FactCapture("__pattern_%s__" % id(fact))
def extract(text, default=UNKNOWN): """extracts calling convention from the text. If the calling convention could not be found, the "default"is used""" if not text: return default found = CALLING_CONVENTION_TYPES.pattern.match(text) if found: return found.group('cc') return default
extracts calling convention from the text. If the calling convention could not be found, the "default"is used
Below is the the instruction that describes the task: ### Input: extracts calling convention from the text. If the calling convention could not be found, the "default"is used ### Response: def extract(text, default=UNKNOWN): """extracts calling convention from the text. If the calling convention could not be found, the "default"is used""" if not text: return default found = CALLING_CONVENTION_TYPES.pattern.match(text) if found: return found.group('cc') return default
def create_decl_string( return_type, arguments_types, with_defaults=True): """ Returns free function type :param return_type: function return type :type return_type: :class:`type_t` :param arguments_types: list of argument :class:`type <type_t>` :rtype: :class:`free_function_type_t` """ return free_function_type_t.NAME_TEMPLATE % { 'return_type': return_type.build_decl_string(with_defaults), 'arguments': ','.join( [_f(x, with_defaults) for x in arguments_types])}
Returns free function type :param return_type: function return type :type return_type: :class:`type_t` :param arguments_types: list of argument :class:`type <type_t>` :rtype: :class:`free_function_type_t`
Below is the the instruction that describes the task: ### Input: Returns free function type :param return_type: function return type :type return_type: :class:`type_t` :param arguments_types: list of argument :class:`type <type_t>` :rtype: :class:`free_function_type_t` ### Response: def create_decl_string( return_type, arguments_types, with_defaults=True): """ Returns free function type :param return_type: function return type :type return_type: :class:`type_t` :param arguments_types: list of argument :class:`type <type_t>` :rtype: :class:`free_function_type_t` """ return free_function_type_t.NAME_TEMPLATE % { 'return_type': return_type.build_decl_string(with_defaults), 'arguments': ','.join( [_f(x, with_defaults) for x in arguments_types])}
def nx_to_ontology(graph, source_node, output_path, base_iri): """Graph nodes are ID's, and have a 'label' in the node data with the right label :param graph: :param source_node: :param str output_path: :param base_iri: """ ontology = owlready.Ontology(base_iri) parent_lookup = { source_node: types.new_class(source_node, (owlready.Thing,), kwds={"ontology": ontology}) } def recur(pnode): for neighbor in graph.neighbors(pnode): data = graph.node[neighbor] neighbor_class = types.new_class(neighbor, (parent_lookup[pnode],), kwds={"ontology": ontology}) owlready.ANNOTATIONS[neighbor_class].add_annotation(owlready.rdfs.label, data['label']) recur(neighbor) recur(source_node) ontology.save(filename=output_path)
Graph nodes are ID's, and have a 'label' in the node data with the right label :param graph: :param source_node: :param str output_path: :param base_iri:
Below is the the instruction that describes the task: ### Input: Graph nodes are ID's, and have a 'label' in the node data with the right label :param graph: :param source_node: :param str output_path: :param base_iri: ### Response: def nx_to_ontology(graph, source_node, output_path, base_iri): """Graph nodes are ID's, and have a 'label' in the node data with the right label :param graph: :param source_node: :param str output_path: :param base_iri: """ ontology = owlready.Ontology(base_iri) parent_lookup = { source_node: types.new_class(source_node, (owlready.Thing,), kwds={"ontology": ontology}) } def recur(pnode): for neighbor in graph.neighbors(pnode): data = graph.node[neighbor] neighbor_class = types.new_class(neighbor, (parent_lookup[pnode],), kwds={"ontology": ontology}) owlready.ANNOTATIONS[neighbor_class].add_annotation(owlready.rdfs.label, data['label']) recur(neighbor) recur(source_node) ontology.save(filename=output_path)
def conjugate(self): """Complex conjugate of of the product""" return self.__class__.create( *[arg.conjugate() for arg in reversed(self.args)])
Complex conjugate of of the product
Below is the the instruction that describes the task: ### Input: Complex conjugate of of the product ### Response: def conjugate(self): """Complex conjugate of of the product""" return self.__class__.create( *[arg.conjugate() for arg in reversed(self.args)])
def make_publication_epub(binders, publisher, publication_message, file): """Creates an epub file from a binder(s). Also requires publication information, meant to be used in a EPUB publication request. """ if not isinstance(binders, (list, set, tuple,)): binders = [binders] packages = [] for binder in binders: metadata = binder.metadata binder.metadata = deepcopy(metadata) binder.metadata.update({'publisher': publisher, 'publication_message': publication_message}) packages.append(_make_package(binder)) binder.metadata = metadata epub = EPUB(packages) epub.to_file(epub, file)
Creates an epub file from a binder(s). Also requires publication information, meant to be used in a EPUB publication request.
Below is the the instruction that describes the task: ### Input: Creates an epub file from a binder(s). Also requires publication information, meant to be used in a EPUB publication request. ### Response: def make_publication_epub(binders, publisher, publication_message, file): """Creates an epub file from a binder(s). Also requires publication information, meant to be used in a EPUB publication request. """ if not isinstance(binders, (list, set, tuple,)): binders = [binders] packages = [] for binder in binders: metadata = binder.metadata binder.metadata = deepcopy(metadata) binder.metadata.update({'publisher': publisher, 'publication_message': publication_message}) packages.append(_make_package(binder)) binder.metadata = metadata epub = EPUB(packages) epub.to_file(epub, file)
def Copy(self, name=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist """ new = copy.copy(self) new.d = copy.copy(self.d) new.name = name if name is not None else self.name return new
Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist
Below is the the instruction that describes the task: ### Input: Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist ### Response: def Copy(self, name=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist """ new = copy.copy(self) new.d = copy.copy(self.d) new.name = name if name is not None else self.name return new
async def multi_get(self, keys, loads_fn=None, namespace=None, _conn=None): """ Get multiple values from the cache, values not found are Nones. :param keys: list of str :param loads_fn: callable alternative to use as loads function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: list of objs :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout """ start = time.monotonic() loads = loads_fn or self._serializer.loads ns_keys = [self.build_key(key, namespace=namespace) for key in keys] values = [ loads(value) for value in await self._multi_get( ns_keys, encoding=self.serializer.encoding, _conn=_conn ) ] logger.debug( "MULTI_GET %s %d (%.4f)s", ns_keys, len([value for value in values if value is not None]), time.monotonic() - start, ) return values
Get multiple values from the cache, values not found are Nones. :param keys: list of str :param loads_fn: callable alternative to use as loads function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: list of objs :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
Below is the the instruction that describes the task: ### Input: Get multiple values from the cache, values not found are Nones. :param keys: list of str :param loads_fn: callable alternative to use as loads function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: list of objs :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout ### Response: async def multi_get(self, keys, loads_fn=None, namespace=None, _conn=None): """ Get multiple values from the cache, values not found are Nones. :param keys: list of str :param loads_fn: callable alternative to use as loads function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout for the operations to last :returns: list of objs :raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout """ start = time.monotonic() loads = loads_fn or self._serializer.loads ns_keys = [self.build_key(key, namespace=namespace) for key in keys] values = [ loads(value) for value in await self._multi_get( ns_keys, encoding=self.serializer.encoding, _conn=_conn ) ] logger.debug( "MULTI_GET %s %d (%.4f)s", ns_keys, len([value for value in values if value is not None]), time.monotonic() - start, ) return values
def get_port_def(port_num, proto='tcp'): ''' Given a port number and protocol, returns the port definition expected by docker-py. For TCP ports this is simply an integer, for UDP ports this is (port_num, 'udp'). port_num can also be a string in the format 'port_num/udp'. If so, the "proto" argument will be ignored. The reason we need to be able to pass in the protocol separately is because this function is sometimes invoked on data derived from a port range (e.g. '2222-2223/udp'). In these cases the protocol has already been stripped off and the port range resolved into the start and end of the range, and get_port_def() is invoked once for each port number in that range. So, rather than munge udp ports back into strings before passing them to this function, the function will see if it has a string and use the protocol from it if present. This function does not catch the TypeError or ValueError which would be raised if the port number is non-numeric. This function either needs to be run on known good input, or should be run within a try/except that catches these two exceptions. ''' try: port_num, _, port_num_proto = port_num.partition('/') except AttributeError: pass else: if port_num_proto: proto = port_num_proto try: if proto.lower() == 'udp': return int(port_num), 'udp' except AttributeError: pass return int(port_num)
Given a port number and protocol, returns the port definition expected by docker-py. For TCP ports this is simply an integer, for UDP ports this is (port_num, 'udp'). port_num can also be a string in the format 'port_num/udp'. If so, the "proto" argument will be ignored. The reason we need to be able to pass in the protocol separately is because this function is sometimes invoked on data derived from a port range (e.g. '2222-2223/udp'). In these cases the protocol has already been stripped off and the port range resolved into the start and end of the range, and get_port_def() is invoked once for each port number in that range. So, rather than munge udp ports back into strings before passing them to this function, the function will see if it has a string and use the protocol from it if present. This function does not catch the TypeError or ValueError which would be raised if the port number is non-numeric. This function either needs to be run on known good input, or should be run within a try/except that catches these two exceptions.
Below is the the instruction that describes the task: ### Input: Given a port number and protocol, returns the port definition expected by docker-py. For TCP ports this is simply an integer, for UDP ports this is (port_num, 'udp'). port_num can also be a string in the format 'port_num/udp'. If so, the "proto" argument will be ignored. The reason we need to be able to pass in the protocol separately is because this function is sometimes invoked on data derived from a port range (e.g. '2222-2223/udp'). In these cases the protocol has already been stripped off and the port range resolved into the start and end of the range, and get_port_def() is invoked once for each port number in that range. So, rather than munge udp ports back into strings before passing them to this function, the function will see if it has a string and use the protocol from it if present. This function does not catch the TypeError or ValueError which would be raised if the port number is non-numeric. This function either needs to be run on known good input, or should be run within a try/except that catches these two exceptions. ### Response: def get_port_def(port_num, proto='tcp'): ''' Given a port number and protocol, returns the port definition expected by docker-py. For TCP ports this is simply an integer, for UDP ports this is (port_num, 'udp'). port_num can also be a string in the format 'port_num/udp'. If so, the "proto" argument will be ignored. The reason we need to be able to pass in the protocol separately is because this function is sometimes invoked on data derived from a port range (e.g. '2222-2223/udp'). In these cases the protocol has already been stripped off and the port range resolved into the start and end of the range, and get_port_def() is invoked once for each port number in that range. So, rather than munge udp ports back into strings before passing them to this function, the function will see if it has a string and use the protocol from it if present. This function does not catch the TypeError or ValueError which would be raised if the port number is non-numeric. This function either needs to be run on known good input, or should be run within a try/except that catches these two exceptions. ''' try: port_num, _, port_num_proto = port_num.partition('/') except AttributeError: pass else: if port_num_proto: proto = port_num_proto try: if proto.lower() == 'udp': return int(port_num), 'udp' except AttributeError: pass return int(port_num)
def filter_query(self, query, filter_info, model): """Filter query according to jsonapi 1.0 :param Query query: sqlalchemy query to sort :param filter_info: filter information :type filter_info: dict or None :param DeclarativeMeta model: an sqlalchemy model :return Query: the sorted query """ if filter_info: filters = create_filters(model, filter_info, self.resource) query = query.filter(*filters) return query
Filter query according to jsonapi 1.0 :param Query query: sqlalchemy query to sort :param filter_info: filter information :type filter_info: dict or None :param DeclarativeMeta model: an sqlalchemy model :return Query: the sorted query
Below is the the instruction that describes the task: ### Input: Filter query according to jsonapi 1.0 :param Query query: sqlalchemy query to sort :param filter_info: filter information :type filter_info: dict or None :param DeclarativeMeta model: an sqlalchemy model :return Query: the sorted query ### Response: def filter_query(self, query, filter_info, model): """Filter query according to jsonapi 1.0 :param Query query: sqlalchemy query to sort :param filter_info: filter information :type filter_info: dict or None :param DeclarativeMeta model: an sqlalchemy model :return Query: the sorted query """ if filter_info: filters = create_filters(model, filter_info, self.resource) query = query.filter(*filters) return query
def run(self, dag:DAGCircuit) -> DAGCircuit: """ Run one pass of optimisation on the circuit and route for the given backend. :param dag: The circuit to optimise and route :return: The modified circuit """ circ = dagcircuit_to_tk(dag, _DROP_CONDS=self.DROP_CONDS,_BOX_UNKNOWN=self.BOX_UNKNOWN) circ, circlay = self.process_circ(circ) newdag = tk_to_dagcircuit(circ) newdag.name = dag.name finlay = dict() for i, qi in enumerate(circlay): finlay[('q', i)] = ('q', qi) newdag.final_layout = finlay return newdag
Run one pass of optimisation on the circuit and route for the given backend. :param dag: The circuit to optimise and route :return: The modified circuit
Below is the the instruction that describes the task: ### Input: Run one pass of optimisation on the circuit and route for the given backend. :param dag: The circuit to optimise and route :return: The modified circuit ### Response: def run(self, dag:DAGCircuit) -> DAGCircuit: """ Run one pass of optimisation on the circuit and route for the given backend. :param dag: The circuit to optimise and route :return: The modified circuit """ circ = dagcircuit_to_tk(dag, _DROP_CONDS=self.DROP_CONDS,_BOX_UNKNOWN=self.BOX_UNKNOWN) circ, circlay = self.process_circ(circ) newdag = tk_to_dagcircuit(circ) newdag.name = dag.name finlay = dict() for i, qi in enumerate(circlay): finlay[('q', i)] = ('q', qi) newdag.final_layout = finlay return newdag
def login(self, username, password): """ Log in. AuthenticationFailedException exception is raised if authentication fails. """ url = get_url("/login.action", scheme="https") params = { 'username': username, 'password': password, 'submit-login': '', } with contextlib.closing(self.opener.open(url, urllib.parse.urlencode(params))) as response: if not "loginAction.jsp" in response.url: raise AuthenticationError("Login failed. Unexpected redirection: %s" % response.url) if not "resultMessage=success" in response.url: raise AuthenticationError("Login failed.") self.authenticated = True
Log in. AuthenticationFailedException exception is raised if authentication fails.
Below is the the instruction that describes the task: ### Input: Log in. AuthenticationFailedException exception is raised if authentication fails. ### Response: def login(self, username, password): """ Log in. AuthenticationFailedException exception is raised if authentication fails. """ url = get_url("/login.action", scheme="https") params = { 'username': username, 'password': password, 'submit-login': '', } with contextlib.closing(self.opener.open(url, urllib.parse.urlencode(params))) as response: if not "loginAction.jsp" in response.url: raise AuthenticationError("Login failed. Unexpected redirection: %s" % response.url) if not "resultMessage=success" in response.url: raise AuthenticationError("Login failed.") self.authenticated = True
def parse(cls, request: web.Request) -> AuthWidgetData: """ Parse request as Telegram auth widget data. :param request: :return: :obj:`AuthWidgetData` :raise: :obj:`aiohttp.web.HTTPBadRequest` """ try: query = dict(request.query) query['id'] = int(query['id']) query['auth_date'] = int(query['auth_date']) widget = AuthWidgetData(**query) except (ValueError, KeyError): raise web.HTTPBadRequest(text='Invalid auth data') else: return widget
Parse request as Telegram auth widget data. :param request: :return: :obj:`AuthWidgetData` :raise: :obj:`aiohttp.web.HTTPBadRequest`
Below is the the instruction that describes the task: ### Input: Parse request as Telegram auth widget data. :param request: :return: :obj:`AuthWidgetData` :raise: :obj:`aiohttp.web.HTTPBadRequest` ### Response: def parse(cls, request: web.Request) -> AuthWidgetData: """ Parse request as Telegram auth widget data. :param request: :return: :obj:`AuthWidgetData` :raise: :obj:`aiohttp.web.HTTPBadRequest` """ try: query = dict(request.query) query['id'] = int(query['id']) query['auth_date'] = int(query['auth_date']) widget = AuthWidgetData(**query) except (ValueError, KeyError): raise web.HTTPBadRequest(text='Invalid auth data') else: return widget
def create_node(self, *args, **kwargs): """ Creates a new IOU VM. :returns: IOUVM instance """ with (yield from self._iou_id_lock): # wait for a node to be completely created before adding a new one # this is important otherwise we allocate the same application ID # when creating multiple IOU node at the same time application_id = get_next_application_id(self.nodes) node = yield from super().create_node(*args, application_id=application_id, **kwargs) return node
Creates a new IOU VM. :returns: IOUVM instance
Below is the the instruction that describes the task: ### Input: Creates a new IOU VM. :returns: IOUVM instance ### Response: def create_node(self, *args, **kwargs): """ Creates a new IOU VM. :returns: IOUVM instance """ with (yield from self._iou_id_lock): # wait for a node to be completely created before adding a new one # this is important otherwise we allocate the same application ID # when creating multiple IOU node at the same time application_id = get_next_application_id(self.nodes) node = yield from super().create_node(*args, application_id=application_id, **kwargs) return node
def type_id(self): """ A short string representing the provider implementation id used for serialization of :class:`.Credentials` and to identify the type of provider in JavaScript. The part before hyphen denotes the type of the provider, the part after hyphen denotes the class id e.g. ``oauth2.Facebook.type_id = '2-5'``, ``oauth1.Twitter.type_id = '1-5'``. """ cls = self.__class__ mod = sys.modules.get(cls.__module__) return str(self.PROVIDER_TYPE_ID) + '-' + \ str(mod.PROVIDER_ID_MAP.index(cls))
A short string representing the provider implementation id used for serialization of :class:`.Credentials` and to identify the type of provider in JavaScript. The part before hyphen denotes the type of the provider, the part after hyphen denotes the class id e.g. ``oauth2.Facebook.type_id = '2-5'``, ``oauth1.Twitter.type_id = '1-5'``.
Below is the the instruction that describes the task: ### Input: A short string representing the provider implementation id used for serialization of :class:`.Credentials` and to identify the type of provider in JavaScript. The part before hyphen denotes the type of the provider, the part after hyphen denotes the class id e.g. ``oauth2.Facebook.type_id = '2-5'``, ``oauth1.Twitter.type_id = '1-5'``. ### Response: def type_id(self): """ A short string representing the provider implementation id used for serialization of :class:`.Credentials` and to identify the type of provider in JavaScript. The part before hyphen denotes the type of the provider, the part after hyphen denotes the class id e.g. ``oauth2.Facebook.type_id = '2-5'``, ``oauth1.Twitter.type_id = '1-5'``. """ cls = self.__class__ mod = sys.modules.get(cls.__module__) return str(self.PROVIDER_TYPE_ID) + '-' + \ str(mod.PROVIDER_ID_MAP.index(cls))
def p_optional_value(t): """optional_value : value | empty""" # return value or None. t[0] = t[1] # Note this must be unsigned value = t[0] if value is None or value[0].isdigit(): return msg = '' if value[0] == '-': msg = "Can't use negative index %s" % value elif value not in name_dict: msg = "Can't derefence index %s" % value else: data = name_dict[value] if data.type != 'const': msg = "Can't use non-constant %s %s as index" % (data.type, value) elif not data.positive: msg = "Can't use negative index %s" % value if msg: global error_occurred error_occurred = True print(u"ERROR - {0:s} near line {1:d}".format(msg, t.lineno(1)))
optional_value : value | empty
Below is the the instruction that describes the task: ### Input: optional_value : value | empty ### Response: def p_optional_value(t): """optional_value : value | empty""" # return value or None. t[0] = t[1] # Note this must be unsigned value = t[0] if value is None or value[0].isdigit(): return msg = '' if value[0] == '-': msg = "Can't use negative index %s" % value elif value not in name_dict: msg = "Can't derefence index %s" % value else: data = name_dict[value] if data.type != 'const': msg = "Can't use non-constant %s %s as index" % (data.type, value) elif not data.positive: msg = "Can't use negative index %s" % value if msg: global error_occurred error_occurred = True print(u"ERROR - {0:s} near line {1:d}".format(msg, t.lineno(1)))
def get_time_interval(start_time, end_time): """ 获取两个unix时间戳之间的时间间隔 :param: * start_time: (int) 开始时间,unix 时间戳 * end_time: (int) 结束时间,unix 时间戳 :return: * interval_dict: (dict) 时间间隔字典 举例如下:: print('--- get_time_interval demo ---') import time start = int(time.time()) end = start - 98908 print(get_time_interval(end, start)) print('---') 执行结果:: --- get_time_interval demo --- {'days': 1, 'hours': 3, 'minutes': 28, 'seconds': 28} --- """ if not isinstance(start_time, int) or not isinstance(end_time, int): raise TypeError('start_time and end_time should be int, bu we got {0} and {1}'. format(type(start_time), type(end_time))) # 计算天数 time_diff = abs(end_time - start_time) days = (time_diff // (60*60*24)) # 计算小时数 remain = time_diff % (60*60*24) hours = (remain // (60*60)) # 计算分钟数 remain = remain % (60*60) minutes = (remain // 60) # 计算秒数 seconds = remain % 60 interval_dict = {"days": days, "hours": hours, "minutes": minutes, "seconds": seconds} return interval_dict
获取两个unix时间戳之间的时间间隔 :param: * start_time: (int) 开始时间,unix 时间戳 * end_time: (int) 结束时间,unix 时间戳 :return: * interval_dict: (dict) 时间间隔字典 举例如下:: print('--- get_time_interval demo ---') import time start = int(time.time()) end = start - 98908 print(get_time_interval(end, start)) print('---') 执行结果:: --- get_time_interval demo --- {'days': 1, 'hours': 3, 'minutes': 28, 'seconds': 28} ---
Below is the the instruction that describes the task: ### Input: 获取两个unix时间戳之间的时间间隔 :param: * start_time: (int) 开始时间,unix 时间戳 * end_time: (int) 结束时间,unix 时间戳 :return: * interval_dict: (dict) 时间间隔字典 举例如下:: print('--- get_time_interval demo ---') import time start = int(time.time()) end = start - 98908 print(get_time_interval(end, start)) print('---') 执行结果:: --- get_time_interval demo --- {'days': 1, 'hours': 3, 'minutes': 28, 'seconds': 28} --- ### Response: def get_time_interval(start_time, end_time): """ 获取两个unix时间戳之间的时间间隔 :param: * start_time: (int) 开始时间,unix 时间戳 * end_time: (int) 结束时间,unix 时间戳 :return: * interval_dict: (dict) 时间间隔字典 举例如下:: print('--- get_time_interval demo ---') import time start = int(time.time()) end = start - 98908 print(get_time_interval(end, start)) print('---') 执行结果:: --- get_time_interval demo --- {'days': 1, 'hours': 3, 'minutes': 28, 'seconds': 28} --- """ if not isinstance(start_time, int) or not isinstance(end_time, int): raise TypeError('start_time and end_time should be int, bu we got {0} and {1}'. format(type(start_time), type(end_time))) # 计算天数 time_diff = abs(end_time - start_time) days = (time_diff // (60*60*24)) # 计算小时数 remain = time_diff % (60*60*24) hours = (remain // (60*60)) # 计算分钟数 remain = remain % (60*60) minutes = (remain // 60) # 计算秒数 seconds = remain % 60 interval_dict = {"days": days, "hours": hours, "minutes": minutes, "seconds": seconds} return interval_dict
def find_bounds(model): """ Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions], dtype=float) upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions], dtype=float) lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0]) upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0]) if np.isnan(lower_bound): LOGGER.warning("Could not identify a median lower bound.") lower_bound = -1000.0 if np.isnan(upper_bound): LOGGER.warning("Could not identify a median upper bound.") upper_bound = 1000.0 return lower_bound, upper_bound
Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation.
Below is the the instruction that describes the task: ### Input: Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation. ### Response: def find_bounds(model): """ Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions], dtype=float) upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions], dtype=float) lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0]) upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0]) if np.isnan(lower_bound): LOGGER.warning("Could not identify a median lower bound.") lower_bound = -1000.0 if np.isnan(upper_bound): LOGGER.warning("Could not identify a median upper bound.") upper_bound = 1000.0 return lower_bound, upper_bound
def insert(self, index, *grids): """Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append """ index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(grids) == 0: # Copy of `self` return RectGrid(*self.coord_vectors) elif len(grids) == 1: # Insert single grid grid = grids[0] if not isinstance(grid, RectGrid): raise TypeError('{!r} is not a `RectGrid` instance' ''.format(grid)) new_vecs = (self.coord_vectors[:index] + grid.coord_vectors + self.coord_vectors[index:]) return RectGrid(*new_vecs) else: # Recursively insert first grid and the remaining into the result return self.insert(index, grids[0]).insert( index + grids[0].ndim, *(grids[1:]))
Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append
Below is the the instruction that describes the task: ### Input: Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append ### Response: def insert(self, index, *grids): """Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append """ index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(grids) == 0: # Copy of `self` return RectGrid(*self.coord_vectors) elif len(grids) == 1: # Insert single grid grid = grids[0] if not isinstance(grid, RectGrid): raise TypeError('{!r} is not a `RectGrid` instance' ''.format(grid)) new_vecs = (self.coord_vectors[:index] + grid.coord_vectors + self.coord_vectors[index:]) return RectGrid(*new_vecs) else: # Recursively insert first grid and the remaining into the result return self.insert(index, grids[0]).insert( index + grids[0].ndim, *(grids[1:]))
def next_url(request): """ Returns URL to redirect to from the ``next`` param in the request. """ next = request.GET.get("next", request.POST.get("next", "")) host = request.get_host() return next if next and is_safe_url(next, host=host) else None
Returns URL to redirect to from the ``next`` param in the request.
Below is the the instruction that describes the task: ### Input: Returns URL to redirect to from the ``next`` param in the request. ### Response: def next_url(request): """ Returns URL to redirect to from the ``next`` param in the request. """ next = request.GET.get("next", request.POST.get("next", "")) host = request.get_host() return next if next and is_safe_url(next, host=host) else None
def register_deliver_command(self, deliver_func): """ Add 'deliver' command for transferring a project to another user., :param deliver_func: function to run when user choses this option """ description = "Initiate delivery of a project to another user. Removes other user's current permissions. " \ "Send message to D4S2 service to send email and allow access to the project once user " \ "acknowledges receiving the data." deliver_parser = self.subparsers.add_parser('deliver', description=description) add_project_name_or_id_arg(deliver_parser) user_or_email = deliver_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) add_share_usernames_arg(deliver_parser) add_share_emails_arg(deliver_parser) _add_copy_project_arg(deliver_parser) _add_resend_arg(deliver_parser, "Resend delivery") include_or_exclude = deliver_parser.add_mutually_exclusive_group(required=False) _add_include_arg(include_or_exclude) _add_exclude_arg(include_or_exclude) _add_message_file(deliver_parser, "Filename containing a message to be sent with the delivery. " "Pass - to read from stdin.") deliver_parser.set_defaults(func=deliver_func)
Add 'deliver' command for transferring a project to another user., :param deliver_func: function to run when user choses this option
Below is the the instruction that describes the task: ### Input: Add 'deliver' command for transferring a project to another user., :param deliver_func: function to run when user choses this option ### Response: def register_deliver_command(self, deliver_func): """ Add 'deliver' command for transferring a project to another user., :param deliver_func: function to run when user choses this option """ description = "Initiate delivery of a project to another user. Removes other user's current permissions. " \ "Send message to D4S2 service to send email and allow access to the project once user " \ "acknowledges receiving the data." deliver_parser = self.subparsers.add_parser('deliver', description=description) add_project_name_or_id_arg(deliver_parser) user_or_email = deliver_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) add_share_usernames_arg(deliver_parser) add_share_emails_arg(deliver_parser) _add_copy_project_arg(deliver_parser) _add_resend_arg(deliver_parser, "Resend delivery") include_or_exclude = deliver_parser.add_mutually_exclusive_group(required=False) _add_include_arg(include_or_exclude) _add_exclude_arg(include_or_exclude) _add_message_file(deliver_parser, "Filename containing a message to be sent with the delivery. " "Pass - to read from stdin.") deliver_parser.set_defaults(func=deliver_func)
def import_from(self, from_loc, module_name, import_loc, names): """ (2.6, 2.7) import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) (3.0-) # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) """ (dots_loc, dots_count), dotted_name_opt = module_name module_loc = module = None if dotted_name_opt: module_loc, module = dotted_name_opt lparen_loc, names, rparen_loc = names loc = from_loc.join(names[-1].loc) if rparen_loc: loc = loc.join(rparen_loc) if module == "__future__": self.add_flags([x.name for x in names]) return ast.ImportFrom(names=names, module=module, level=dots_count, keyword_loc=from_loc, dots_loc=dots_loc, module_loc=module_loc, import_loc=import_loc, lparen_loc=lparen_loc, rparen_loc=rparen_loc, loc=loc)
(2.6, 2.7) import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) (3.0-) # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names))
Below is the the instruction that describes the task: ### Input: (2.6, 2.7) import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) (3.0-) # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) ### Response: def import_from(self, from_loc, module_name, import_loc, names): """ (2.6, 2.7) import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) (3.0-) # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) """ (dots_loc, dots_count), dotted_name_opt = module_name module_loc = module = None if dotted_name_opt: module_loc, module = dotted_name_opt lparen_loc, names, rparen_loc = names loc = from_loc.join(names[-1].loc) if rparen_loc: loc = loc.join(rparen_loc) if module == "__future__": self.add_flags([x.name for x in names]) return ast.ImportFrom(names=names, module=module, level=dots_count, keyword_loc=from_loc, dots_loc=dots_loc, module_loc=module_loc, import_loc=import_loc, lparen_loc=lparen_loc, rparen_loc=rparen_loc, loc=loc)
def apply(self, s, active=None): """ Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps """ if active is None: active = self.active return self.group.apply(s, active=active)
Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps
Below is the the instruction that describes the task: ### Input: Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps ### Response: def apply(self, s, active=None): """ Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps """ if active is None: active = self.active return self.group.apply(s, active=active)
def build_import_pattern(mapping1, mapping2): u""" mapping1: A dict mapping py3k modules to all possible py2k replacements mapping2: A dict mapping py2k modules to the things they do This builds a HUGE pattern to match all ways that things can be imported """ # py3k: urllib.request, py2k: ('urllib2', 'urllib') yield from_import % (all_modules_subpattern()) for py3k, py2k in mapping1.items(): name, attr = py3k.split(u'.') s_name = simple_name % (name) s_attr = simple_attr % (attr) d_name = dotted_name % (s_name, s_attr) yield name_import % (d_name) yield power_twoname % (s_name, s_attr) if attr == u'__init__': yield name_import % (s_name) yield power_onename % (s_name) yield name_import_rename % (d_name) yield from_import_rename % (s_name, s_attr, s_attr, s_attr, s_attr)
u""" mapping1: A dict mapping py3k modules to all possible py2k replacements mapping2: A dict mapping py2k modules to the things they do This builds a HUGE pattern to match all ways that things can be imported
Below is the the instruction that describes the task: ### Input: u""" mapping1: A dict mapping py3k modules to all possible py2k replacements mapping2: A dict mapping py2k modules to the things they do This builds a HUGE pattern to match all ways that things can be imported ### Response: def build_import_pattern(mapping1, mapping2): u""" mapping1: A dict mapping py3k modules to all possible py2k replacements mapping2: A dict mapping py2k modules to the things they do This builds a HUGE pattern to match all ways that things can be imported """ # py3k: urllib.request, py2k: ('urllib2', 'urllib') yield from_import % (all_modules_subpattern()) for py3k, py2k in mapping1.items(): name, attr = py3k.split(u'.') s_name = simple_name % (name) s_attr = simple_attr % (attr) d_name = dotted_name % (s_name, s_attr) yield name_import % (d_name) yield power_twoname % (s_name, s_attr) if attr == u'__init__': yield name_import % (s_name) yield power_onename % (s_name) yield name_import_rename % (d_name) yield from_import_rename % (s_name, s_attr, s_attr, s_attr, s_attr)
def sg_get_context(): r"""Get current context information Returns: tf.sg_opt class object which contains all context information """ global _context # merge current context res = tf.sg_opt() for c in _context: res += c return res
r"""Get current context information Returns: tf.sg_opt class object which contains all context information
Below is the the instruction that describes the task: ### Input: r"""Get current context information Returns: tf.sg_opt class object which contains all context information ### Response: def sg_get_context(): r"""Get current context information Returns: tf.sg_opt class object which contains all context information """ global _context # merge current context res = tf.sg_opt() for c in _context: res += c return res
def classification_tikhonov(G, y, M, tau=0): r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X otherwise, where :math:`X` and :math:`Y` are logits. The function returns the maximum of the logits. Parameters ---------- G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- logits : array, length G.n_vertices The logits :math:`X`. Examples -------- >>> from pygsp import graphs, learning >>> import matplotlib.pyplot as plt >>> >>> G = graphs.Logo() Create a ground truth signal: >>> signal = np.zeros(G.n_vertices) >>> signal[G.info['idx_s']] = 1 >>> signal[G.info['idx_p']] = 2 Construct a measurement signal from a binary mask: >>> rs = np.random.RandomState(42) >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5 >>> measures = signal.copy() >>> measures[~mask] = np.nan Solve the classification problem by reconstructing the signal: >>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0) Plot the results. Note that we recover the class with ``np.argmax(recovery, axis=1)``. >>> prediction = np.argmax(recovery, axis=1) >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6)) >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth') >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements') >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class') >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0') >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1') >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2') >>> _ = fig.tight_layout() """ y[M == False] = 0 Y = _to_logits(y.astype(np.int)) return regression_tikhonov(G, Y, M, tau)
r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X otherwise, where :math:`X` and :math:`Y` are logits. The function returns the maximum of the logits. Parameters ---------- G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- logits : array, length G.n_vertices The logits :math:`X`. Examples -------- >>> from pygsp import graphs, learning >>> import matplotlib.pyplot as plt >>> >>> G = graphs.Logo() Create a ground truth signal: >>> signal = np.zeros(G.n_vertices) >>> signal[G.info['idx_s']] = 1 >>> signal[G.info['idx_p']] = 2 Construct a measurement signal from a binary mask: >>> rs = np.random.RandomState(42) >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5 >>> measures = signal.copy() >>> measures[~mask] = np.nan Solve the classification problem by reconstructing the signal: >>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0) Plot the results. Note that we recover the class with ``np.argmax(recovery, axis=1)``. >>> prediction = np.argmax(recovery, axis=1) >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6)) >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth') >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements') >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class') >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0') >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1') >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2') >>> _ = fig.tight_layout()
Below is the the instruction that describes the task: ### Input: r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X otherwise, where :math:`X` and :math:`Y` are logits. The function returns the maximum of the logits. Parameters ---------- G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- logits : array, length G.n_vertices The logits :math:`X`. Examples -------- >>> from pygsp import graphs, learning >>> import matplotlib.pyplot as plt >>> >>> G = graphs.Logo() Create a ground truth signal: >>> signal = np.zeros(G.n_vertices) >>> signal[G.info['idx_s']] = 1 >>> signal[G.info['idx_p']] = 2 Construct a measurement signal from a binary mask: >>> rs = np.random.RandomState(42) >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5 >>> measures = signal.copy() >>> measures[~mask] = np.nan Solve the classification problem by reconstructing the signal: >>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0) Plot the results. Note that we recover the class with ``np.argmax(recovery, axis=1)``. >>> prediction = np.argmax(recovery, axis=1) >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6)) >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth') >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements') >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class') >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0') >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1') >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2') >>> _ = fig.tight_layout() ### Response: def classification_tikhonov(G, y, M, tau=0): r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X otherwise, where :math:`X` and :math:`Y` are logits. The function returns the maximum of the logits. Parameters ---------- G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- logits : array, length G.n_vertices The logits :math:`X`. Examples -------- >>> from pygsp import graphs, learning >>> import matplotlib.pyplot as plt >>> >>> G = graphs.Logo() Create a ground truth signal: >>> signal = np.zeros(G.n_vertices) >>> signal[G.info['idx_s']] = 1 >>> signal[G.info['idx_p']] = 2 Construct a measurement signal from a binary mask: >>> rs = np.random.RandomState(42) >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5 >>> measures = signal.copy() >>> measures[~mask] = np.nan Solve the classification problem by reconstructing the signal: >>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0) Plot the results. Note that we recover the class with ``np.argmax(recovery, axis=1)``. >>> prediction = np.argmax(recovery, axis=1) >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6)) >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth') >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements') >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class') >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0') >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1') >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2') >>> _ = fig.tight_layout() """ y[M == False] = 0 Y = _to_logits(y.astype(np.int)) return regression_tikhonov(G, Y, M, tau)
def GetDWORD(self, buff, idx=0): '''Internal method. Reads a double word (4 bytes) from a buffer. ''' result = buff[idx] + (buff[idx+1] << 8) + (buff[idx+2] << 16) + \ (buff[idx+3] << 24) if result == 0xFFFFFFFF: result = 0 return result
Internal method. Reads a double word (4 bytes) from a buffer.
Below is the the instruction that describes the task: ### Input: Internal method. Reads a double word (4 bytes) from a buffer. ### Response: def GetDWORD(self, buff, idx=0): '''Internal method. Reads a double word (4 bytes) from a buffer. ''' result = buff[idx] + (buff[idx+1] << 8) + (buff[idx+2] << 16) + \ (buff[idx+3] << 24) if result == 0xFFFFFFFF: result = 0 return result
def project_list(self, limit=None): """ Provide the project list :param limit: OPTIONAL 25 is default :return: """ params = {} if limit: params['limit'] = limit return (self.get('rest/api/1.0/projects', params=params) or {}).get('values')
Provide the project list :param limit: OPTIONAL 25 is default :return:
Below is the the instruction that describes the task: ### Input: Provide the project list :param limit: OPTIONAL 25 is default :return: ### Response: def project_list(self, limit=None): """ Provide the project list :param limit: OPTIONAL 25 is default :return: """ params = {} if limit: params['limit'] = limit return (self.get('rest/api/1.0/projects', params=params) or {}).get('values')
def save(url, destination): """ This is just the thread target. It's actually responsible for downloading and saving. :param str url: which dump to download :param str destination: a file path to save to """ r = requests.get(url, stream=True) with open(destination, 'wb') as fd: for chunk in r.iter_content(chunk_size=128): fd.write(chunk)
This is just the thread target. It's actually responsible for downloading and saving. :param str url: which dump to download :param str destination: a file path to save to
Below is the the instruction that describes the task: ### Input: This is just the thread target. It's actually responsible for downloading and saving. :param str url: which dump to download :param str destination: a file path to save to ### Response: def save(url, destination): """ This is just the thread target. It's actually responsible for downloading and saving. :param str url: which dump to download :param str destination: a file path to save to """ r = requests.get(url, stream=True) with open(destination, 'wb') as fd: for chunk in r.iter_content(chunk_size=128): fd.write(chunk)
def unload_plug_in(self, name): """Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") self._call("unloadPlugIn", in_p=[name])
Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins.
Below is the the instruction that describes the task: ### Input: Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins. ### Response: def unload_plug_in(self, name): """Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") self._call("unloadPlugIn", in_p=[name])
def get_image_output(self): """ Create the output for the image This is the Koji Content Generator metadata, along with the 'docker save' output to upload. :return: tuple, (metadata dict, Output instance) """ saved_image = self.workflow.exported_image_sequence[-1].get('path') image_name = get_image_upload_filename(self.workflow.exported_image_sequence[-1], self.workflow.builder.image_id, self.platform) metadata = self.get_output_metadata(saved_image, image_name) output = Output(file=open(saved_image), metadata=metadata) return metadata, output
Create the output for the image This is the Koji Content Generator metadata, along with the 'docker save' output to upload. :return: tuple, (metadata dict, Output instance)
Below is the the instruction that describes the task: ### Input: Create the output for the image This is the Koji Content Generator metadata, along with the 'docker save' output to upload. :return: tuple, (metadata dict, Output instance) ### Response: def get_image_output(self): """ Create the output for the image This is the Koji Content Generator metadata, along with the 'docker save' output to upload. :return: tuple, (metadata dict, Output instance) """ saved_image = self.workflow.exported_image_sequence[-1].get('path') image_name = get_image_upload_filename(self.workflow.exported_image_sequence[-1], self.workflow.builder.image_id, self.platform) metadata = self.get_output_metadata(saved_image, image_name) output = Output(file=open(saved_image), metadata=metadata) return metadata, output
def evaluate(self, x): """TODO: will become _evaluate once polynomial filtering is merged.""" if not hasattr(self, '_coefficients'): # Graph Fourier transform -> modulation -> inverse GFT. c = self.G.igft(self._kernels.evaluate(self.G.e).squeeze()) c = np.sqrt(self.G.n_vertices) * self.G.U * c[:, np.newaxis] self._coefficients = self.G.gft(c) shape = x.shape x = x.flatten() y = np.full((self.n_features_out, x.size), np.nan) for i in range(len(x)): query = self._coefficients[x[i] == self.G.e] if len(query) != 0: y[:, i] = query[0] return y.reshape((self.n_features_out,) + shape)
TODO: will become _evaluate once polynomial filtering is merged.
Below is the the instruction that describes the task: ### Input: TODO: will become _evaluate once polynomial filtering is merged. ### Response: def evaluate(self, x): """TODO: will become _evaluate once polynomial filtering is merged.""" if not hasattr(self, '_coefficients'): # Graph Fourier transform -> modulation -> inverse GFT. c = self.G.igft(self._kernels.evaluate(self.G.e).squeeze()) c = np.sqrt(self.G.n_vertices) * self.G.U * c[:, np.newaxis] self._coefficients = self.G.gft(c) shape = x.shape x = x.flatten() y = np.full((self.n_features_out, x.size), np.nan) for i in range(len(x)): query = self._coefficients[x[i] == self.G.e] if len(query) != 0: y[:, i] = query[0] return y.reshape((self.n_features_out,) + shape)
def size(self,*args): """ Set the size of the chart, args are width,height and can be tuple APIPARAM: chs """ if len(args) == 2: x,y = map(int,args) else: x,y = map(int,args[0]) self.check_size(x,y) self['chs'] = '%dx%d'%(x,y) return self
Set the size of the chart, args are width,height and can be tuple APIPARAM: chs
Below is the the instruction that describes the task: ### Input: Set the size of the chart, args are width,height and can be tuple APIPARAM: chs ### Response: def size(self,*args): """ Set the size of the chart, args are width,height and can be tuple APIPARAM: chs """ if len(args) == 2: x,y = map(int,args) else: x,y = map(int,args[0]) self.check_size(x,y) self['chs'] = '%dx%d'%(x,y) return self
def make_qadapter(**kwargs): """ Return the concrete :class:`QueueAdapter` class from a string. Note that one can register a customized version with: .. example:: from qadapters import SlurmAdapter class MyAdapter(SlurmAdapter): QTYPE = "myslurm" # Add your customized code here # Register your class. SlurmAdapter.register(MyAdapter) make_qadapter(qtype="myslurm", **kwargs) .. warning:: MyAdapter should be pickleable, hence one should declare it at the module level so that pickle can import it at run-time. """ # Get all known subclasses of QueueAdapter. d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)} # Preventive copy before pop kwargs = copy.deepcopy(kwargs) qtype = kwargs["queue"].pop("qtype") return d[qtype](**kwargs)
Return the concrete :class:`QueueAdapter` class from a string. Note that one can register a customized version with: .. example:: from qadapters import SlurmAdapter class MyAdapter(SlurmAdapter): QTYPE = "myslurm" # Add your customized code here # Register your class. SlurmAdapter.register(MyAdapter) make_qadapter(qtype="myslurm", **kwargs) .. warning:: MyAdapter should be pickleable, hence one should declare it at the module level so that pickle can import it at run-time.
Below is the the instruction that describes the task: ### Input: Return the concrete :class:`QueueAdapter` class from a string. Note that one can register a customized version with: .. example:: from qadapters import SlurmAdapter class MyAdapter(SlurmAdapter): QTYPE = "myslurm" # Add your customized code here # Register your class. SlurmAdapter.register(MyAdapter) make_qadapter(qtype="myslurm", **kwargs) .. warning:: MyAdapter should be pickleable, hence one should declare it at the module level so that pickle can import it at run-time. ### Response: def make_qadapter(**kwargs): """ Return the concrete :class:`QueueAdapter` class from a string. Note that one can register a customized version with: .. example:: from qadapters import SlurmAdapter class MyAdapter(SlurmAdapter): QTYPE = "myslurm" # Add your customized code here # Register your class. SlurmAdapter.register(MyAdapter) make_qadapter(qtype="myslurm", **kwargs) .. warning:: MyAdapter should be pickleable, hence one should declare it at the module level so that pickle can import it at run-time. """ # Get all known subclasses of QueueAdapter. d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)} # Preventive copy before pop kwargs = copy.deepcopy(kwargs) qtype = kwargs["queue"].pop("qtype") return d[qtype](**kwargs)
def _update_resource_view(self, log=False): # type: () -> bool """Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not """ update = False if 'id' in self.data and self._load_from_hdx('resource view', self.data['id']): update = True else: if 'resource_id' in self.data: resource_views = self.get_all_for_resource(self.data['resource_id']) for resource_view in resource_views: if self.data['title'] == resource_view['title']: self.old_data = self.data self.data = resource_view.data update = True break if update: if log: logger.warning('resource view exists. Updating %s' % self.data['id']) self._merge_hdx_update('resource view', 'id') return update
Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not
Below is the the instruction that describes the task: ### Input: Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not ### Response: def _update_resource_view(self, log=False): # type: () -> bool """Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not """ update = False if 'id' in self.data and self._load_from_hdx('resource view', self.data['id']): update = True else: if 'resource_id' in self.data: resource_views = self.get_all_for_resource(self.data['resource_id']) for resource_view in resource_views: if self.data['title'] == resource_view['title']: self.old_data = self.data self.data = resource_view.data update = True break if update: if log: logger.warning('resource view exists. Updating %s' % self.data['id']) self._merge_hdx_update('resource view', 'id') return update
def ParsePartitionsTable( self, parser_mediator, database=None, table=None, **unused_kwargs): """Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing. """ if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = MsieWebCachePartitionsEventData() event_data.directory = record_values.get('Directory', None) event_data.partition_identifier = record_values.get('PartitionId', None) event_data.partition_type = record_values.get('PartitionType', None) event_data.table_identifier = record_values.get('TableId', None) timestamp = record_values.get('LastScavengeTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Last Scavenge Time') parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
Below is the the instruction that describes the task: ### Input: Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing. ### Response: def ParsePartitionsTable( self, parser_mediator, database=None, table=None, **unused_kwargs): """Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing. """ if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = MsieWebCachePartitionsEventData() event_data.directory = record_values.get('Directory', None) event_data.partition_identifier = record_values.get('PartitionId', None) event_data.partition_type = record_values.get('PartitionType', None) event_data.table_identifier = record_values.get('TableId', None) timestamp = record_values.get('LastScavengeTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Last Scavenge Time') parser_mediator.ProduceEventWithEventData(event, event_data)
def explode(self, contactgroups, notificationways): """Explode all contact for each contactsgroup :param contactgroups: contactgroups to explode :type contactgroups: alignak.objects.contactgroup.Contactgroups :param notificationways: notificationways to explode :type notificationways: alignak.objects.notificationway.Notificationways :return: None """ # Contactgroups property need to be fulfill for got the information self.apply_partial_inheritance('contactgroups') # _special properties maybe came from a template, so # import them before grok ourselves for prop in Contact.special_properties: if prop == 'contact_name': continue self.apply_partial_inheritance(prop) # Register ourselves into the contactsgroups we are in for contact in self: if not (hasattr(contact, 'contact_name') and hasattr(contact, 'contactgroups')): continue for contactgroup in contact.contactgroups: contactgroups.add_member(contact.contact_name, contactgroup.strip()) # Now create a notification way with the simple parameter of the # contacts for contact in self: need_notificationway = False params = {} for param in Contact.simple_way_parameters: if hasattr(contact, param): need_notificationway = True params[param] = getattr(contact, param) elif contact.properties[param].has_default: # put a default text value # Remove the value and put a default value setattr(contact, param, contact.properties[param].default) if need_notificationway: cname = getattr(contact, 'contact_name', getattr(contact, 'alias', '')) nw_name = cname + '_inner_nw' notificationways.new_inner_member(nw_name, params) if not hasattr(contact, 'notificationways'): contact.notificationways = [nw_name] else: contact.notificationways = list(contact.notificationways) contact.notificationways.append(nw_name)
Explode all contact for each contactsgroup :param contactgroups: contactgroups to explode :type contactgroups: alignak.objects.contactgroup.Contactgroups :param notificationways: notificationways to explode :type notificationways: alignak.objects.notificationway.Notificationways :return: None
Below is the the instruction that describes the task: ### Input: Explode all contact for each contactsgroup :param contactgroups: contactgroups to explode :type contactgroups: alignak.objects.contactgroup.Contactgroups :param notificationways: notificationways to explode :type notificationways: alignak.objects.notificationway.Notificationways :return: None ### Response: def explode(self, contactgroups, notificationways): """Explode all contact for each contactsgroup :param contactgroups: contactgroups to explode :type contactgroups: alignak.objects.contactgroup.Contactgroups :param notificationways: notificationways to explode :type notificationways: alignak.objects.notificationway.Notificationways :return: None """ # Contactgroups property need to be fulfill for got the information self.apply_partial_inheritance('contactgroups') # _special properties maybe came from a template, so # import them before grok ourselves for prop in Contact.special_properties: if prop == 'contact_name': continue self.apply_partial_inheritance(prop) # Register ourselves into the contactsgroups we are in for contact in self: if not (hasattr(contact, 'contact_name') and hasattr(contact, 'contactgroups')): continue for contactgroup in contact.contactgroups: contactgroups.add_member(contact.contact_name, contactgroup.strip()) # Now create a notification way with the simple parameter of the # contacts for contact in self: need_notificationway = False params = {} for param in Contact.simple_way_parameters: if hasattr(contact, param): need_notificationway = True params[param] = getattr(contact, param) elif contact.properties[param].has_default: # put a default text value # Remove the value and put a default value setattr(contact, param, contact.properties[param].default) if need_notificationway: cname = getattr(contact, 'contact_name', getattr(contact, 'alias', '')) nw_name = cname + '_inner_nw' notificationways.new_inner_member(nw_name, params) if not hasattr(contact, 'notificationways'): contact.notificationways = [nw_name] else: contact.notificationways = list(contact.notificationways) contact.notificationways.append(nw_name)
def _get_client_secret(client, service_id): """ Get client secret for service :param client: Accounts Service API Client :param service_id: Service ID :return: Client secret (if available) """ try: response = client.accounts.services[service_id].secrets.get() except httpclient.HTTPError as exc: if exc.code == 404: # If error is a 404 then this means the service_id is not recognised. Raise this error immediately msg = ('Service {} cannot be found.'.format(service_id)) raise click.ClickException(click.style(msg, fg='red')) else: raise exc client_secrets = response['data'] if client_secrets: return client_secrets[0] return None
Get client secret for service :param client: Accounts Service API Client :param service_id: Service ID :return: Client secret (if available)
Below is the the instruction that describes the task: ### Input: Get client secret for service :param client: Accounts Service API Client :param service_id: Service ID :return: Client secret (if available) ### Response: def _get_client_secret(client, service_id): """ Get client secret for service :param client: Accounts Service API Client :param service_id: Service ID :return: Client secret (if available) """ try: response = client.accounts.services[service_id].secrets.get() except httpclient.HTTPError as exc: if exc.code == 404: # If error is a 404 then this means the service_id is not recognised. Raise this error immediately msg = ('Service {} cannot be found.'.format(service_id)) raise click.ClickException(click.style(msg, fg='red')) else: raise exc client_secrets = response['data'] if client_secrets: return client_secrets[0] return None
def __find_new(self, hueobjecttype): ''' Starts a search for new Hue objects ''' assert hueobjecttype in ['lights', 'sensors'], \ 'Unsupported object type {}'.format(hueobjecttype) url = '{}/{}'.format(self.API, hueobjecttype) return self._request( method='POST', url=url )
Starts a search for new Hue objects
Below is the the instruction that describes the task: ### Input: Starts a search for new Hue objects ### Response: def __find_new(self, hueobjecttype): ''' Starts a search for new Hue objects ''' assert hueobjecttype in ['lights', 'sensors'], \ 'Unsupported object type {}'.format(hueobjecttype) url = '{}/{}'.format(self.API, hueobjecttype) return self._request( method='POST', url=url )
def base_show_parser(): """Creates a parser with arguments specific to formatting a single resource. Returns: {ArgumentParser}: Base parser with default show args """ base_parser = ArgumentParser(add_help=False) base_parser.add_argument( '-k', '--key', type=str, help='show a single property from the block or header') base_parser.add_argument( '-F', '--format', action='store', default='yaml', choices=['yaml', 'json'], help='choose the output format (default: yaml)') return base_parser
Creates a parser with arguments specific to formatting a single resource. Returns: {ArgumentParser}: Base parser with default show args
Below is the the instruction that describes the task: ### Input: Creates a parser with arguments specific to formatting a single resource. Returns: {ArgumentParser}: Base parser with default show args ### Response: def base_show_parser(): """Creates a parser with arguments specific to formatting a single resource. Returns: {ArgumentParser}: Base parser with default show args """ base_parser = ArgumentParser(add_help=False) base_parser.add_argument( '-k', '--key', type=str, help='show a single property from the block or header') base_parser.add_argument( '-F', '--format', action='store', default='yaml', choices=['yaml', 'json'], help='choose the output format (default: yaml)') return base_parser