code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def compute_displays( self, program: Union[circuits.Circuit, schedules.Schedule], param_resolver: study.ParamResolver = study.ParamResolver({}), qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, initial_state: Union[int, np.ndarray] = 0, ) -> study.ComputeDisplaysResult: """Computes displays in the supplied Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. qubit_order: Determines the canonical ordering of the qubits used to define the order of amplitudes in the wave function. initial_state: If an int, the state is set to the computational basis state corresponding to this state. Otherwise if this is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. Returns: ComputeDisplaysResult for the simulation. """ return self.compute_displays_sweep( program, [param_resolver], qubit_order, initial_state)[0]
Computes displays in the supplied Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. qubit_order: Determines the canonical ordering of the qubits used to define the order of amplitudes in the wave function. initial_state: If an int, the state is set to the computational basis state corresponding to this state. Otherwise if this is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. Returns: ComputeDisplaysResult for the simulation.
Below is the the instruction that describes the task: ### Input: Computes displays in the supplied Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. qubit_order: Determines the canonical ordering of the qubits used to define the order of amplitudes in the wave function. initial_state: If an int, the state is set to the computational basis state corresponding to this state. Otherwise if this is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. Returns: ComputeDisplaysResult for the simulation. ### Response: def compute_displays( self, program: Union[circuits.Circuit, schedules.Schedule], param_resolver: study.ParamResolver = study.ParamResolver({}), qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, initial_state: Union[int, np.ndarray] = 0, ) -> study.ComputeDisplaysResult: """Computes displays in the supplied Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. qubit_order: Determines the canonical ordering of the qubits used to define the order of amplitudes in the wave function. initial_state: If an int, the state is set to the computational basis state corresponding to this state. Otherwise if this is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. Returns: ComputeDisplaysResult for the simulation. """ return self.compute_displays_sweep( program, [param_resolver], qubit_order, initial_state)[0]
def get_interval_timedelta(self): """ Spits out the timedelta in days. """ now_datetime = timezone.now() current_month_days = monthrange(now_datetime.year, now_datetime.month)[1] # Two weeks if self.interval == reminders_choices.INTERVAL_2_WEEKS: interval_timedelta = datetime.timedelta(days=14) # One month elif self.interval == reminders_choices.INTERVAL_ONE_MONTH: interval_timedelta = datetime.timedelta(days=current_month_days) # Three months elif self.interval == reminders_choices.INTERVAL_THREE_MONTHS: three_months = now_datetime + relativedelta(months=+3) interval_timedelta = three_months - now_datetime # Six months elif self.interval == reminders_choices.INTERVAL_SIX_MONTHS: six_months = now_datetime + relativedelta(months=+6) interval_timedelta = six_months - now_datetime # One year elif self.interval == reminders_choices.INTERVAL_ONE_YEAR: one_year = now_datetime + relativedelta(years=+1) interval_timedelta = one_year - now_datetime return interval_timedelta
Spits out the timedelta in days.
Below is the the instruction that describes the task: ### Input: Spits out the timedelta in days. ### Response: def get_interval_timedelta(self): """ Spits out the timedelta in days. """ now_datetime = timezone.now() current_month_days = monthrange(now_datetime.year, now_datetime.month)[1] # Two weeks if self.interval == reminders_choices.INTERVAL_2_WEEKS: interval_timedelta = datetime.timedelta(days=14) # One month elif self.interval == reminders_choices.INTERVAL_ONE_MONTH: interval_timedelta = datetime.timedelta(days=current_month_days) # Three months elif self.interval == reminders_choices.INTERVAL_THREE_MONTHS: three_months = now_datetime + relativedelta(months=+3) interval_timedelta = three_months - now_datetime # Six months elif self.interval == reminders_choices.INTERVAL_SIX_MONTHS: six_months = now_datetime + relativedelta(months=+6) interval_timedelta = six_months - now_datetime # One year elif self.interval == reminders_choices.INTERVAL_ONE_YEAR: one_year = now_datetime + relativedelta(years=+1) interval_timedelta = one_year - now_datetime return interval_timedelta
def document_did_save_notification(self, params): """ Handle the textDocument/didSave message received from an LSP server. """ text = None if 'text' in params: text = params['text'] params = { 'textDocument': { 'uri': path_as_uri(params['file']) } } if text is not None: params['text'] = text return params
Handle the textDocument/didSave message received from an LSP server.
Below is the the instruction that describes the task: ### Input: Handle the textDocument/didSave message received from an LSP server. ### Response: def document_did_save_notification(self, params): """ Handle the textDocument/didSave message received from an LSP server. """ text = None if 'text' in params: text = params['text'] params = { 'textDocument': { 'uri': path_as_uri(params['file']) } } if text is not None: params['text'] = text return params
def max_speed(self): """ Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed. """ (self._max_speed, value) = self.get_cached_attr_int(self._max_speed, 'max_speed') return value
Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed.
Below is the the instruction that describes the task: ### Input: Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed. ### Response: def max_speed(self): """ Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed. """ (self._max_speed, value) = self.get_cached_attr_int(self._max_speed, 'max_speed') return value
def remove_all(self, token): """Removes all occurrences of token :param token: string to remove :return: input without token """ out = self.string.replace(" ", token) # replace tokens while out.find(token + token) >= 0: # while there are tokens out = out.replace(token + token, token) return out
Removes all occurrences of token :param token: string to remove :return: input without token
Below is the the instruction that describes the task: ### Input: Removes all occurrences of token :param token: string to remove :return: input without token ### Response: def remove_all(self, token): """Removes all occurrences of token :param token: string to remove :return: input without token """ out = self.string.replace(" ", token) # replace tokens while out.find(token + token) >= 0: # while there are tokens out = out.replace(token + token, token) return out
def no_company_with_insufficient_companies_house_data(value): """ Confirms that the company number is not for for a company that Companies House does not hold information on. Args: value (string): The company number to check. Raises: django.forms.ValidationError """ for prefix, name in company_types_with_insufficient_companies_house_data: if value.upper().startswith(prefix): raise ValidationError( MESSAGE_INSUFFICIENT_DATA, params={'name': name} )
Confirms that the company number is not for for a company that Companies House does not hold information on. Args: value (string): The company number to check. Raises: django.forms.ValidationError
Below is the the instruction that describes the task: ### Input: Confirms that the company number is not for for a company that Companies House does not hold information on. Args: value (string): The company number to check. Raises: django.forms.ValidationError ### Response: def no_company_with_insufficient_companies_house_data(value): """ Confirms that the company number is not for for a company that Companies House does not hold information on. Args: value (string): The company number to check. Raises: django.forms.ValidationError """ for prefix, name in company_types_with_insufficient_companies_house_data: if value.upper().startswith(prefix): raise ValidationError( MESSAGE_INSUFFICIENT_DATA, params={'name': name} )
def _ctype_key_value(keys, vals): """ Returns ctype arrays for the key-value args, and the whether string keys are used. For internal use only. """ if isinstance(keys, (tuple, list)): assert(len(keys) == len(vals)) c_keys = [] c_vals = [] use_str_keys = None for key, val in zip(keys, vals): c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val) c_keys += c_key_i c_vals += c_val_i use_str_keys = str_keys_i if use_str_keys is None else use_str_keys assert(use_str_keys == str_keys_i), "inconsistent types of keys detected." c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \ else c_array(ctypes.c_int, c_keys) c_vals_arr = c_array(ctypes.c_void_p, c_vals) return (c_keys_arr, c_vals_arr, use_str_keys) assert(isinstance(keys, (int,) + string_types)), \ "unexpected type for keys: " + str(type(keys)) use_str_keys = isinstance(keys, string_types) if isinstance(vals, NDArray): c_keys = c_str_array([keys]) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys])) return (c_keys, c_handle_array([vals]), use_str_keys) else: for value in vals: assert(isinstance(value, NDArray)) c_keys = c_str_array([keys] * len(vals)) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals))) return (c_keys, c_handle_array(vals), use_str_keys)
Returns ctype arrays for the key-value args, and the whether string keys are used. For internal use only.
Below is the the instruction that describes the task: ### Input: Returns ctype arrays for the key-value args, and the whether string keys are used. For internal use only. ### Response: def _ctype_key_value(keys, vals): """ Returns ctype arrays for the key-value args, and the whether string keys are used. For internal use only. """ if isinstance(keys, (tuple, list)): assert(len(keys) == len(vals)) c_keys = [] c_vals = [] use_str_keys = None for key, val in zip(keys, vals): c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val) c_keys += c_key_i c_vals += c_val_i use_str_keys = str_keys_i if use_str_keys is None else use_str_keys assert(use_str_keys == str_keys_i), "inconsistent types of keys detected." c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \ else c_array(ctypes.c_int, c_keys) c_vals_arr = c_array(ctypes.c_void_p, c_vals) return (c_keys_arr, c_vals_arr, use_str_keys) assert(isinstance(keys, (int,) + string_types)), \ "unexpected type for keys: " + str(type(keys)) use_str_keys = isinstance(keys, string_types) if isinstance(vals, NDArray): c_keys = c_str_array([keys]) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys])) return (c_keys, c_handle_array([vals]), use_str_keys) else: for value in vals: assert(isinstance(value, NDArray)) c_keys = c_str_array([keys] * len(vals)) if use_str_keys \ else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals))) return (c_keys, c_handle_array(vals), use_str_keys)
def generate_query_batches( self, sql, params=None, param_types=None, partition_size_bytes=None, max_partitions=None, ): """Start a partitioned query operation. Uses the ``PartitionQuery`` API request to start a partitioned query operation. Returns a list of batch information needed to peform the actual queries. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of dict :returns: mappings of information used peform actual partitioned reads via :meth:`process_read_batch`. """ partitions = self._get_snapshot().partition_query( sql=sql, params=params, param_types=param_types, partition_size_bytes=partition_size_bytes, max_partitions=max_partitions, ) query_info = {"sql": sql} if params: query_info["params"] = params query_info["param_types"] = param_types for partition in partitions: yield {"partition": partition, "query": query_info}
Start a partitioned query operation. Uses the ``PartitionQuery`` API request to start a partitioned query operation. Returns a list of batch information needed to peform the actual queries. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of dict :returns: mappings of information used peform actual partitioned reads via :meth:`process_read_batch`.
Below is the the instruction that describes the task: ### Input: Start a partitioned query operation. Uses the ``PartitionQuery`` API request to start a partitioned query operation. Returns a list of batch information needed to peform the actual queries. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of dict :returns: mappings of information used peform actual partitioned reads via :meth:`process_read_batch`. ### Response: def generate_query_batches( self, sql, params=None, param_types=None, partition_size_bytes=None, max_partitions=None, ): """Start a partitioned query operation. Uses the ``PartitionQuery`` API request to start a partitioned query operation. Returns a list of batch information needed to peform the actual queries. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of dict :returns: mappings of information used peform actual partitioned reads via :meth:`process_read_batch`. """ partitions = self._get_snapshot().partition_query( sql=sql, params=params, param_types=param_types, partition_size_bytes=partition_size_bytes, max_partitions=max_partitions, ) query_info = {"sql": sql} if params: query_info["params"] = params query_info["param_types"] = param_types for partition in partitions: yield {"partition": partition, "query": query_info}
def parse(self, xml_data): """ Parse XML data """ # parse tree if isinstance(xml_data, string_types): # Presumably, this is textual xml data. try: root = ET.fromstring(xml_data) except StdlibParseError as e: raise ParseError(str(e)) else: # Otherwise, assume it has already been parsed into a tree root = xml_data # get type if 'type' in root.attrib: self.kind = root.attrib['type'] # parse component for c1 in root: # <id> if c1.tag == 'id': self.id = c1.text # <updatecontact> elif c1.tag == 'updatecontact' or c1.tag == 'update_contact': self.update_contact = c1.text # <metadata_license> elif c1.tag == 'metadata_license': self.metadata_license = c1.text # <releases> elif c1.tag == 'releases': for c2 in c1: if c2.tag == 'release': rel = Release() rel._parse_tree(c2) self.add_release(rel) # <reviews> elif c1.tag == 'reviews': for c2 in c1: if c2.tag == 'review': rev = Review() rev._parse_tree(c2) self.add_review(rev) # <screenshots> elif c1.tag == 'screenshots': for c2 in c1: if c2.tag == 'screenshot': ss = Screenshot() ss._parse_tree(c2) self.add_screenshot(ss) # <provides> elif c1.tag == 'provides': for c2 in c1: prov = Provide() prov._parse_tree(c2) self.add_provide(prov) # <requires> elif c1.tag == 'requires': for c2 in c1: req = Require() req._parse_tree(c2) self.add_require(req) # <kudos> elif c1.tag == 'kudos': for c2 in c1: if not c2.tag == 'kudo': continue self.kudos.append(c2.text) # <keywords> elif c1.tag == 'keywords': for c2 in c1: if not c2.tag == 'keyword': continue self.keywords.append(c2.text) # <categories> elif c1.tag == 'categories': for c2 in c1: if not c2.tag == 'category': continue self.categories.append(c2.text) # <custom> elif c1.tag == 'custom': for c2 in c1: if not c2.tag == 'value': continue if 'key' not in c2.attrib: continue self.custom[c2.attrib['key']] = c2.text # <project_license> elif c1.tag == 'project_license' or c1.tag == 'licence': self.project_license = c1.text # <developer_name> elif c1.tag == 'developer_name': self.developer_name = _join_lines(c1.text) # <name> elif c1.tag == 'name' and not self.name: self.name = _join_lines(c1.text) # <pkgname> elif c1.tag == 'pkgname' and not self.pkgname: self.pkgname = _join_lines(c1.text) # <summary> elif c1.tag == 'summary' and not self.summary: self.summary = _join_lines(c1.text) # <description> elif c1.tag == 'description' and not self.description: self.description = _parse_desc(c1) # <url> elif c1.tag == 'url': key = 'homepage' if 'type' in c1.attrib: key = c1.attrib['type'] self.urls[key] = c1.text elif c1.tag == 'icon': key = c1.attrib.pop('type', 'unknown') c1.attrib['value'] = c1.text self.icons[key] = self.icons.get(key, []) + [c1.attrib]
Parse XML data
Below is the the instruction that describes the task: ### Input: Parse XML data ### Response: def parse(self, xml_data): """ Parse XML data """ # parse tree if isinstance(xml_data, string_types): # Presumably, this is textual xml data. try: root = ET.fromstring(xml_data) except StdlibParseError as e: raise ParseError(str(e)) else: # Otherwise, assume it has already been parsed into a tree root = xml_data # get type if 'type' in root.attrib: self.kind = root.attrib['type'] # parse component for c1 in root: # <id> if c1.tag == 'id': self.id = c1.text # <updatecontact> elif c1.tag == 'updatecontact' or c1.tag == 'update_contact': self.update_contact = c1.text # <metadata_license> elif c1.tag == 'metadata_license': self.metadata_license = c1.text # <releases> elif c1.tag == 'releases': for c2 in c1: if c2.tag == 'release': rel = Release() rel._parse_tree(c2) self.add_release(rel) # <reviews> elif c1.tag == 'reviews': for c2 in c1: if c2.tag == 'review': rev = Review() rev._parse_tree(c2) self.add_review(rev) # <screenshots> elif c1.tag == 'screenshots': for c2 in c1: if c2.tag == 'screenshot': ss = Screenshot() ss._parse_tree(c2) self.add_screenshot(ss) # <provides> elif c1.tag == 'provides': for c2 in c1: prov = Provide() prov._parse_tree(c2) self.add_provide(prov) # <requires> elif c1.tag == 'requires': for c2 in c1: req = Require() req._parse_tree(c2) self.add_require(req) # <kudos> elif c1.tag == 'kudos': for c2 in c1: if not c2.tag == 'kudo': continue self.kudos.append(c2.text) # <keywords> elif c1.tag == 'keywords': for c2 in c1: if not c2.tag == 'keyword': continue self.keywords.append(c2.text) # <categories> elif c1.tag == 'categories': for c2 in c1: if not c2.tag == 'category': continue self.categories.append(c2.text) # <custom> elif c1.tag == 'custom': for c2 in c1: if not c2.tag == 'value': continue if 'key' not in c2.attrib: continue self.custom[c2.attrib['key']] = c2.text # <project_license> elif c1.tag == 'project_license' or c1.tag == 'licence': self.project_license = c1.text # <developer_name> elif c1.tag == 'developer_name': self.developer_name = _join_lines(c1.text) # <name> elif c1.tag == 'name' and not self.name: self.name = _join_lines(c1.text) # <pkgname> elif c1.tag == 'pkgname' and not self.pkgname: self.pkgname = _join_lines(c1.text) # <summary> elif c1.tag == 'summary' and not self.summary: self.summary = _join_lines(c1.text) # <description> elif c1.tag == 'description' and not self.description: self.description = _parse_desc(c1) # <url> elif c1.tag == 'url': key = 'homepage' if 'type' in c1.attrib: key = c1.attrib['type'] self.urls[key] = c1.text elif c1.tag == 'icon': key = c1.attrib.pop('type', 'unknown') c1.attrib['value'] = c1.text self.icons[key] = self.icons.get(key, []) + [c1.attrib]
def wrap_get_channel(cls, response): """Wrap the response from getting a channel into an instance and return it :param response: The response from getting a channel :type response: :class:`requests.Response` :returns: the new channel instance :rtype: :class:`list` of :class:`channel` :raises: None """ json = response.json() c = cls.wrap_json(json) return c
Wrap the response from getting a channel into an instance and return it :param response: The response from getting a channel :type response: :class:`requests.Response` :returns: the new channel instance :rtype: :class:`list` of :class:`channel` :raises: None
Below is the the instruction that describes the task: ### Input: Wrap the response from getting a channel into an instance and return it :param response: The response from getting a channel :type response: :class:`requests.Response` :returns: the new channel instance :rtype: :class:`list` of :class:`channel` :raises: None ### Response: def wrap_get_channel(cls, response): """Wrap the response from getting a channel into an instance and return it :param response: The response from getting a channel :type response: :class:`requests.Response` :returns: the new channel instance :rtype: :class:`list` of :class:`channel` :raises: None """ json = response.json() c = cls.wrap_json(json) return c
def get_segment_token_offsets(segment_token_list, token_map): """ given a list of token node IDs, returns the index of its first and last elements. this actually calculates the int indices, as there are weird formats like RS3, which use unordered / wrongly ordered IDs. Parameters ---------- segment_token_list : list of str sorted list of token IDs (i.e. the tokens that this segment spans) token_map : dict of (str, int) a map from token IDs to token indices Returns ------- first_token_index : int index of the first token of the segment last_token_index : int index of the last token of the segment """ token_indices = [token_map[token_id] for token_id in segment_token_list] # we need to foolproof this for nasty RS3 files or other input formats # with unordered or wrongly orderd IDs return min(token_indices), max(token_indices)
given a list of token node IDs, returns the index of its first and last elements. this actually calculates the int indices, as there are weird formats like RS3, which use unordered / wrongly ordered IDs. Parameters ---------- segment_token_list : list of str sorted list of token IDs (i.e. the tokens that this segment spans) token_map : dict of (str, int) a map from token IDs to token indices Returns ------- first_token_index : int index of the first token of the segment last_token_index : int index of the last token of the segment
Below is the the instruction that describes the task: ### Input: given a list of token node IDs, returns the index of its first and last elements. this actually calculates the int indices, as there are weird formats like RS3, which use unordered / wrongly ordered IDs. Parameters ---------- segment_token_list : list of str sorted list of token IDs (i.e. the tokens that this segment spans) token_map : dict of (str, int) a map from token IDs to token indices Returns ------- first_token_index : int index of the first token of the segment last_token_index : int index of the last token of the segment ### Response: def get_segment_token_offsets(segment_token_list, token_map): """ given a list of token node IDs, returns the index of its first and last elements. this actually calculates the int indices, as there are weird formats like RS3, which use unordered / wrongly ordered IDs. Parameters ---------- segment_token_list : list of str sorted list of token IDs (i.e. the tokens that this segment spans) token_map : dict of (str, int) a map from token IDs to token indices Returns ------- first_token_index : int index of the first token of the segment last_token_index : int index of the last token of the segment """ token_indices = [token_map[token_id] for token_id in segment_token_list] # we need to foolproof this for nasty RS3 files or other input formats # with unordered or wrongly orderd IDs return min(token_indices), max(token_indices)
def register(self, type, parser, composer, **meta): """Registers a parser and composer of a format. You can use this method to overwrite existing formats. :param type: The unique name of the format :param parser: The method to parse data as the format :param composer: The method to compose data as the format :param meta: The extra information associated with the format """ self.registered_formats[type] = { 'parser': parser, 'composer': composer, 'meta': meta, }
Registers a parser and composer of a format. You can use this method to overwrite existing formats. :param type: The unique name of the format :param parser: The method to parse data as the format :param composer: The method to compose data as the format :param meta: The extra information associated with the format
Below is the the instruction that describes the task: ### Input: Registers a parser and composer of a format. You can use this method to overwrite existing formats. :param type: The unique name of the format :param parser: The method to parse data as the format :param composer: The method to compose data as the format :param meta: The extra information associated with the format ### Response: def register(self, type, parser, composer, **meta): """Registers a parser and composer of a format. You can use this method to overwrite existing formats. :param type: The unique name of the format :param parser: The method to parse data as the format :param composer: The method to compose data as the format :param meta: The extra information associated with the format """ self.registered_formats[type] = { 'parser': parser, 'composer': composer, 'meta': meta, }
def computeSmartIndent(self, block, ch): """special rules: ;;; -> indent 0 ;; -> align with next line, if possible ; -> usually on the same line as code -> ignore """ if re.search(r'^\s*;;;', block.text()): return '' elif re.search(r'^\s*;;', block.text()): #try to align with the next line nextBlock = self._nextNonEmptyBlock(block) if nextBlock.isValid(): return self._blockIndent(nextBlock) try: foundBlock, foundColumn = self.findBracketBackward(block, 0, '(') except ValueError: return '' else: return self._increaseIndent(self._blockIndent(foundBlock))
special rules: ;;; -> indent 0 ;; -> align with next line, if possible ; -> usually on the same line as code -> ignore
Below is the the instruction that describes the task: ### Input: special rules: ;;; -> indent 0 ;; -> align with next line, if possible ; -> usually on the same line as code -> ignore ### Response: def computeSmartIndent(self, block, ch): """special rules: ;;; -> indent 0 ;; -> align with next line, if possible ; -> usually on the same line as code -> ignore """ if re.search(r'^\s*;;;', block.text()): return '' elif re.search(r'^\s*;;', block.text()): #try to align with the next line nextBlock = self._nextNonEmptyBlock(block) if nextBlock.isValid(): return self._blockIndent(nextBlock) try: foundBlock, foundColumn = self.findBracketBackward(block, 0, '(') except ValueError: return '' else: return self._increaseIndent(self._blockIndent(foundBlock))
def next(self): """ Sends a "next" command to the player. """ msg = cr.Message() msg.type = cr.NEXT self.send_message(msg)
Sends a "next" command to the player.
Below is the the instruction that describes the task: ### Input: Sends a "next" command to the player. ### Response: def next(self): """ Sends a "next" command to the player. """ msg = cr.Message() msg.type = cr.NEXT self.send_message(msg)
def phantomjs_get(url): """ Perform the request via PhantomJS. """ from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = config.USER_AGENT dcap["phantomjs.page.settings.loadImages"] = False driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=config.PHANTOMJS_BIN) logger.debug("PhantomJS get: %s", url) driver.get(url) time.sleep(10) # to follow redirects response = driver.page_source driver.quit() return response
Perform the request via PhantomJS.
Below is the the instruction that describes the task: ### Input: Perform the request via PhantomJS. ### Response: def phantomjs_get(url): """ Perform the request via PhantomJS. """ from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = config.USER_AGENT dcap["phantomjs.page.settings.loadImages"] = False driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=config.PHANTOMJS_BIN) logger.debug("PhantomJS get: %s", url) driver.get(url) time.sleep(10) # to follow redirects response = driver.page_source driver.quit() return response
def create_many(self, statements): """ Creates multiple statement entries. """ Statement = self.get_model('statement') Tag = self.get_model('tag') tag_cache = {} for statement in statements: statement_data = statement.serialize() tag_data = statement_data.pop('tags', []) statement_model_object = Statement(**statement_data) if not statement.search_text: statement_model_object.search_text = self.tagger.get_bigram_pair_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_model_object.search_in_response_to = self.tagger.get_bigram_pair_string(statement.in_response_to) statement_model_object.save() tags_to_add = [] for tag_name in tag_data: if tag_name in tag_cache: tag = tag_cache[tag_name] else: tag, _ = Tag.objects.get_or_create(name=tag_name) tag_cache[tag_name] = tag tags_to_add.append(tag) statement_model_object.tags.add(*tags_to_add)
Creates multiple statement entries.
Below is the the instruction that describes the task: ### Input: Creates multiple statement entries. ### Response: def create_many(self, statements): """ Creates multiple statement entries. """ Statement = self.get_model('statement') Tag = self.get_model('tag') tag_cache = {} for statement in statements: statement_data = statement.serialize() tag_data = statement_data.pop('tags', []) statement_model_object = Statement(**statement_data) if not statement.search_text: statement_model_object.search_text = self.tagger.get_bigram_pair_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_model_object.search_in_response_to = self.tagger.get_bigram_pair_string(statement.in_response_to) statement_model_object.save() tags_to_add = [] for tag_name in tag_data: if tag_name in tag_cache: tag = tag_cache[tag_name] else: tag, _ = Tag.objects.get_or_create(name=tag_name) tag_cache[tag_name] = tag tags_to_add.append(tag) statement_model_object.tags.add(*tags_to_add)
def previous_sibling(self): """The previous sibling statement. :returns: The previous sibling statement node. :rtype: NodeNG or None """ stmts = self.parent.child_sequence(self) index = stmts.index(self) if index >= 1: return stmts[index - 1] return None
The previous sibling statement. :returns: The previous sibling statement node. :rtype: NodeNG or None
Below is the the instruction that describes the task: ### Input: The previous sibling statement. :returns: The previous sibling statement node. :rtype: NodeNG or None ### Response: def previous_sibling(self): """The previous sibling statement. :returns: The previous sibling statement node. :rtype: NodeNG or None """ stmts = self.parent.child_sequence(self) index = stmts.index(self) if index >= 1: return stmts[index - 1] return None
def get_forecast(self) -> List[SmhiForecast]: """ Returns a list of forecasts. The first in list are the current one """ json_data = self._api.get_forecast_api(self._longitude, self._latitude) return _get_forecast(json_data)
Returns a list of forecasts. The first in list are the current one
Below is the the instruction that describes the task: ### Input: Returns a list of forecasts. The first in list are the current one ### Response: def get_forecast(self) -> List[SmhiForecast]: """ Returns a list of forecasts. The first in list are the current one """ json_data = self._api.get_forecast_api(self._longitude, self._latitude) return _get_forecast(json_data)
def getNextEyeLocation(self, currentEyeLoc): """ Generate next eye location based on current eye location. @param currentEyeLoc (numpy.array) Current coordinate describing the eye location in the world. @return (tuple) Contains: nextEyeLoc (numpy.array) Coordinate of the next eye location. eyeDiff (numpy.array) Vector describing change from currentEyeLoc to nextEyeLoc. """ possibleEyeLocs = [] for loc in self.spatialConfig: shift = abs(max(loc - currentEyeLoc)) if self.minDisplacement <= shift <= self.maxDisplacement: possibleEyeLocs.append(loc) nextEyeLoc = self.nupicRandomChoice(possibleEyeLocs) eyeDiff = nextEyeLoc - currentEyeLoc return nextEyeLoc, eyeDiff
Generate next eye location based on current eye location. @param currentEyeLoc (numpy.array) Current coordinate describing the eye location in the world. @return (tuple) Contains: nextEyeLoc (numpy.array) Coordinate of the next eye location. eyeDiff (numpy.array) Vector describing change from currentEyeLoc to nextEyeLoc.
Below is the the instruction that describes the task: ### Input: Generate next eye location based on current eye location. @param currentEyeLoc (numpy.array) Current coordinate describing the eye location in the world. @return (tuple) Contains: nextEyeLoc (numpy.array) Coordinate of the next eye location. eyeDiff (numpy.array) Vector describing change from currentEyeLoc to nextEyeLoc. ### Response: def getNextEyeLocation(self, currentEyeLoc): """ Generate next eye location based on current eye location. @param currentEyeLoc (numpy.array) Current coordinate describing the eye location in the world. @return (tuple) Contains: nextEyeLoc (numpy.array) Coordinate of the next eye location. eyeDiff (numpy.array) Vector describing change from currentEyeLoc to nextEyeLoc. """ possibleEyeLocs = [] for loc in self.spatialConfig: shift = abs(max(loc - currentEyeLoc)) if self.minDisplacement <= shift <= self.maxDisplacement: possibleEyeLocs.append(loc) nextEyeLoc = self.nupicRandomChoice(possibleEyeLocs) eyeDiff = nextEyeLoc - currentEyeLoc return nextEyeLoc, eyeDiff
def comment(self, format, *args): """ Add a comment to hash table before saving to disk. You can add as many comment lines as you like. These comment lines are discarded when loading the file. If you use a null format, all comments are deleted. """ return lib.zhash_comment(self._as_parameter_, format, *args)
Add a comment to hash table before saving to disk. You can add as many comment lines as you like. These comment lines are discarded when loading the file. If you use a null format, all comments are deleted.
Below is the the instruction that describes the task: ### Input: Add a comment to hash table before saving to disk. You can add as many comment lines as you like. These comment lines are discarded when loading the file. If you use a null format, all comments are deleted. ### Response: def comment(self, format, *args): """ Add a comment to hash table before saving to disk. You can add as many comment lines as you like. These comment lines are discarded when loading the file. If you use a null format, all comments are deleted. """ return lib.zhash_comment(self._as_parameter_, format, *args)
def load_schema(name): """ Load a schema from ./schemas/``name``.json and return it. """ data = pkgutil.get_data('jsonschema', "schemas/{0}.json".format(name)) return json.loads(data.decode("utf-8"))
Load a schema from ./schemas/``name``.json and return it.
Below is the the instruction that describes the task: ### Input: Load a schema from ./schemas/``name``.json and return it. ### Response: def load_schema(name): """ Load a schema from ./schemas/``name``.json and return it. """ data = pkgutil.get_data('jsonschema', "schemas/{0}.json".format(name)) return json.loads(data.decode("utf-8"))
def find_output_with_ifo(self, ifo): """ Find all files who have ifo = ifo """ # Enforce upper case ifo = ifo.upper() return FileList([i for i in self if ifo in i.ifo_list])
Find all files who have ifo = ifo
Below is the the instruction that describes the task: ### Input: Find all files who have ifo = ifo ### Response: def find_output_with_ifo(self, ifo): """ Find all files who have ifo = ifo """ # Enforce upper case ifo = ifo.upper() return FileList([i for i in self if ifo in i.ifo_list])
def union(self, second_iterable, selector=identity): '''Returns those elements which are either in the source sequence or in the second_iterable, or in both. Note: This method uses deferred execution. Args: second_iterable: Elements from this sequence are returns if they are not also in the source sequence. selector: An optional single argument function which is used to project the elements in the source and second_iterables prior to comparing them. If omitted the identity function will be used. Returns: A sequence containing all elements in the source sequence and second sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If the second_iterable is not in fact iterable. TypeError: If the selector is not callable. ''' if self.closed(): raise ValueError("Attempt to call union() on a closed Queryable.") if not is_iterable(second_iterable): raise TypeError("Cannot compute union() with second_iterable of " "non-iterable {0}".format(str(type(second_iterable))[7: -1])) return self._create(itertools.chain(self, second_iterable)).distinct(selector)
Returns those elements which are either in the source sequence or in the second_iterable, or in both. Note: This method uses deferred execution. Args: second_iterable: Elements from this sequence are returns if they are not also in the source sequence. selector: An optional single argument function which is used to project the elements in the source and second_iterables prior to comparing them. If omitted the identity function will be used. Returns: A sequence containing all elements in the source sequence and second sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If the second_iterable is not in fact iterable. TypeError: If the selector is not callable.
Below is the the instruction that describes the task: ### Input: Returns those elements which are either in the source sequence or in the second_iterable, or in both. Note: This method uses deferred execution. Args: second_iterable: Elements from this sequence are returns if they are not also in the source sequence. selector: An optional single argument function which is used to project the elements in the source and second_iterables prior to comparing them. If omitted the identity function will be used. Returns: A sequence containing all elements in the source sequence and second sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If the second_iterable is not in fact iterable. TypeError: If the selector is not callable. ### Response: def union(self, second_iterable, selector=identity): '''Returns those elements which are either in the source sequence or in the second_iterable, or in both. Note: This method uses deferred execution. Args: second_iterable: Elements from this sequence are returns if they are not also in the source sequence. selector: An optional single argument function which is used to project the elements in the source and second_iterables prior to comparing them. If omitted the identity function will be used. Returns: A sequence containing all elements in the source sequence and second sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If the second_iterable is not in fact iterable. TypeError: If the selector is not callable. ''' if self.closed(): raise ValueError("Attempt to call union() on a closed Queryable.") if not is_iterable(second_iterable): raise TypeError("Cannot compute union() with second_iterable of " "non-iterable {0}".format(str(type(second_iterable))[7: -1])) return self._create(itertools.chain(self, second_iterable)).distinct(selector)
def devices(self): """ Returns a list of all :class:`~plexapi.myplex.MyPlexDevice` objects connected to the server. """ data = self.query(MyPlexDevice.key) return [MyPlexDevice(self, elem) for elem in data]
Returns a list of all :class:`~plexapi.myplex.MyPlexDevice` objects connected to the server.
Below is the the instruction that describes the task: ### Input: Returns a list of all :class:`~plexapi.myplex.MyPlexDevice` objects connected to the server. ### Response: def devices(self): """ Returns a list of all :class:`~plexapi.myplex.MyPlexDevice` objects connected to the server. """ data = self.query(MyPlexDevice.key) return [MyPlexDevice(self, elem) for elem in data]
def CaptureNamedVariable(self, name, value, depth, limits): """Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name. """ if not hasattr(name, '__dict__'): name = str(name) else: # TODO(vlif): call str(name) with immutability verifier here. name = str(id(name)) self._total_size += len(name) v = (self.CheckDataVisiblity(value) or self.CaptureVariable(value, depth, limits)) v['name'] = name return v
Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name.
Below is the the instruction that describes the task: ### Input: Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name. ### Response: def CaptureNamedVariable(self, name, value, depth, limits): """Appends name to the product of CaptureVariable. Args: name: name of the variable. value: data to capture depth: nested depth of dictionaries and vectors so far. limits: Per-object limits for capturing variable data. Returns: Formatted captured data as per Variable proto with name. """ if not hasattr(name, '__dict__'): name = str(name) else: # TODO(vlif): call str(name) with immutability verifier here. name = str(id(name)) self._total_size += len(name) v = (self.CheckDataVisiblity(value) or self.CaptureVariable(value, depth, limits)) v['name'] = name return v
def ctypes2numpy(cptr, length, dtype): """Convert a ctypes pointer array to a numpy array. """ NUMPY_TO_CTYPES_MAPPING = { np.float32: ctypes.c_float, np.uint32: ctypes.c_uint, } if dtype not in NUMPY_TO_CTYPES_MAPPING: raise RuntimeError('Supported types: {}'.format(NUMPY_TO_CTYPES_MAPPING.keys())) ctype = NUMPY_TO_CTYPES_MAPPING[dtype] if not isinstance(cptr, ctypes.POINTER(ctype)): raise RuntimeError('expected {} pointer'.format(ctype)) res = np.zeros(length, dtype=dtype) if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]): raise RuntimeError('memmove failed') return res
Convert a ctypes pointer array to a numpy array.
Below is the the instruction that describes the task: ### Input: Convert a ctypes pointer array to a numpy array. ### Response: def ctypes2numpy(cptr, length, dtype): """Convert a ctypes pointer array to a numpy array. """ NUMPY_TO_CTYPES_MAPPING = { np.float32: ctypes.c_float, np.uint32: ctypes.c_uint, } if dtype not in NUMPY_TO_CTYPES_MAPPING: raise RuntimeError('Supported types: {}'.format(NUMPY_TO_CTYPES_MAPPING.keys())) ctype = NUMPY_TO_CTYPES_MAPPING[dtype] if not isinstance(cptr, ctypes.POINTER(ctype)): raise RuntimeError('expected {} pointer'.format(ctype)) res = np.zeros(length, dtype=dtype) if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]): raise RuntimeError('memmove failed') return res
def _match_error_to_data_set(x, ex): """ Inflates ex to match the dimensionality of x, "intelligently". x is assumed to be a 2D array. """ # Simplest case, ex is None or a number if not _fun.is_iterable(ex): # Just make a matched list of Nones if ex is None: ex = [ex]*len(x) # Make arrays of numbers if _fun.is_a_number(ex): value = ex # temporary storage ex = [] for n in range(len(x)): ex.append([value]*len(x[n])) # Otherwise, ex is iterable # Default behavior: If the elements are all numbers and the length matches # that of the first x-array, assume this is meant to match all the x # data sets if _fun.elements_are_numbers(ex) and len(ex) == len(x[0]): ex = [ex]*len(x) # The user may specify a list of some iterable and some not. Assume # in this case that at least the lists are the same length for n in range(len(x)): # do nothing to the None's # Inflate single numbers to match if _fun.is_a_number(ex[n]): ex[n] = [ex[n]]*len(x[n]) return ex
Inflates ex to match the dimensionality of x, "intelligently". x is assumed to be a 2D array.
Below is the the instruction that describes the task: ### Input: Inflates ex to match the dimensionality of x, "intelligently". x is assumed to be a 2D array. ### Response: def _match_error_to_data_set(x, ex): """ Inflates ex to match the dimensionality of x, "intelligently". x is assumed to be a 2D array. """ # Simplest case, ex is None or a number if not _fun.is_iterable(ex): # Just make a matched list of Nones if ex is None: ex = [ex]*len(x) # Make arrays of numbers if _fun.is_a_number(ex): value = ex # temporary storage ex = [] for n in range(len(x)): ex.append([value]*len(x[n])) # Otherwise, ex is iterable # Default behavior: If the elements are all numbers and the length matches # that of the first x-array, assume this is meant to match all the x # data sets if _fun.elements_are_numbers(ex) and len(ex) == len(x[0]): ex = [ex]*len(x) # The user may specify a list of some iterable and some not. Assume # in this case that at least the lists are the same length for n in range(len(x)): # do nothing to the None's # Inflate single numbers to match if _fun.is_a_number(ex[n]): ex[n] = [ex[n]]*len(x[n]) return ex
def _estimate_strains(self): """Compute an estimate of the strains.""" # Estimate the strain based on the PGV and shear-wave velocity for l in self._profile: l.reset() l.strain = self._motion.pgv / l.initial_shear_vel
Compute an estimate of the strains.
Below is the the instruction that describes the task: ### Input: Compute an estimate of the strains. ### Response: def _estimate_strains(self): """Compute an estimate of the strains.""" # Estimate the strain based on the PGV and shear-wave velocity for l in self._profile: l.reset() l.strain = self._motion.pgv / l.initial_shear_vel
def get_resource_type_from_included_serializer(self): """ Check to see it this resource has a different resource_name when included and return that name, or None """ field_name = self.field_name or self.parent.field_name parent = self.get_parent_serializer() if parent is not None: # accept both singular and plural versions of field_name field_names = [ inflection.singularize(field_name), inflection.pluralize(field_name) ] includes = get_included_serializers(parent) for field in field_names: if field in includes.keys(): return get_resource_type_from_serializer(includes[field]) return None
Check to see it this resource has a different resource_name when included and return that name, or None
Below is the the instruction that describes the task: ### Input: Check to see it this resource has a different resource_name when included and return that name, or None ### Response: def get_resource_type_from_included_serializer(self): """ Check to see it this resource has a different resource_name when included and return that name, or None """ field_name = self.field_name or self.parent.field_name parent = self.get_parent_serializer() if parent is not None: # accept both singular and plural versions of field_name field_names = [ inflection.singularize(field_name), inflection.pluralize(field_name) ] includes = get_included_serializers(parent) for field in field_names: if field in includes.keys(): return get_resource_type_from_serializer(includes[field]) return None
def pmll(self,*args,**kwargs): """ NAME: pmll PURPOSE: return proper motion in Galactic longitude (in mas/yr) INPUT: t - (optional) time at which to get pmll (can be Quantity) v obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_l(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU) """ out= self._orb.pmll(*args,**kwargs) if len(out) == 1: return out[0] else: return out
NAME: pmll PURPOSE: return proper motion in Galactic longitude (in mas/yr) INPUT: t - (optional) time at which to get pmll (can be Quantity) v obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_l(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU)
Below is the the instruction that describes the task: ### Input: NAME: pmll PURPOSE: return proper motion in Galactic longitude (in mas/yr) INPUT: t - (optional) time at which to get pmll (can be Quantity) v obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_l(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU) ### Response: def pmll(self,*args,**kwargs): """ NAME: pmll PURPOSE: return proper motion in Galactic longitude (in mas/yr) INPUT: t - (optional) time at which to get pmll (can be Quantity) v obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantities) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_l(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU) """ out= self._orb.pmll(*args,**kwargs) if len(out) == 1: return out[0] else: return out
def Register(self, a, b, migrated_entity): """Registers a merge mapping. If a and b are both not None, this means that entities a and b were merged to produce migrated_entity. If one of a or b are not None, then it means it was not merged but simply migrated. The effect of a call to register is to update a_merge_map and b_merge_map according to the merge. Also the private attributes _migrated_entity of a and b are set to migrated_entity. Args: a: The entity from the old feed or None. b: The entity from the new feed or None. migrated_entity: The migrated entity. """ # There are a few places where code needs to find the corresponding # migrated entity of an object without knowing in which original schedule # the entity started. With a_merge_map and b_merge_map both have to be # checked. Use of the _migrated_entity attribute allows the migrated entity # to be directly found without the schedule. The merge maps also require # that all objects be hashable. GenericGTFSObject is at the moment, but # this is a bug. See comment in transitfeed.GenericGTFSObject. if a is not None: self.a_merge_map[a] = migrated_entity a._migrated_entity = migrated_entity if b is not None: self.b_merge_map[b] = migrated_entity b._migrated_entity = migrated_entity
Registers a merge mapping. If a and b are both not None, this means that entities a and b were merged to produce migrated_entity. If one of a or b are not None, then it means it was not merged but simply migrated. The effect of a call to register is to update a_merge_map and b_merge_map according to the merge. Also the private attributes _migrated_entity of a and b are set to migrated_entity. Args: a: The entity from the old feed or None. b: The entity from the new feed or None. migrated_entity: The migrated entity.
Below is the the instruction that describes the task: ### Input: Registers a merge mapping. If a and b are both not None, this means that entities a and b were merged to produce migrated_entity. If one of a or b are not None, then it means it was not merged but simply migrated. The effect of a call to register is to update a_merge_map and b_merge_map according to the merge. Also the private attributes _migrated_entity of a and b are set to migrated_entity. Args: a: The entity from the old feed or None. b: The entity from the new feed or None. migrated_entity: The migrated entity. ### Response: def Register(self, a, b, migrated_entity): """Registers a merge mapping. If a and b are both not None, this means that entities a and b were merged to produce migrated_entity. If one of a or b are not None, then it means it was not merged but simply migrated. The effect of a call to register is to update a_merge_map and b_merge_map according to the merge. Also the private attributes _migrated_entity of a and b are set to migrated_entity. Args: a: The entity from the old feed or None. b: The entity from the new feed or None. migrated_entity: The migrated entity. """ # There are a few places where code needs to find the corresponding # migrated entity of an object without knowing in which original schedule # the entity started. With a_merge_map and b_merge_map both have to be # checked. Use of the _migrated_entity attribute allows the migrated entity # to be directly found without the schedule. The merge maps also require # that all objects be hashable. GenericGTFSObject is at the moment, but # this is a bug. See comment in transitfeed.GenericGTFSObject. if a is not None: self.a_merge_map[a] = migrated_entity a._migrated_entity = migrated_entity if b is not None: self.b_merge_map[b] = migrated_entity b._migrated_entity = migrated_entity
def hill_climbing(problem, iterations_limit=0, viewer=None): ''' Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=True, viewer=viewer)
Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value.
Below is the the instruction that describes the task: ### Input: Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ### Response: def hill_climbing(problem, iterations_limit=0, viewer=None): ''' Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=True, viewer=viewer)
def Buscar(self, nro_doc, tipo_doc=80): "Devuelve True si fue encontrado y establece atributos con datos" # cuit: codigo único de identificación tributaria del contribuyente # (sin guiones) self.cursor.execute("SELECT * FROM padron WHERE " " tipo_doc=? AND nro_doc=?", [tipo_doc, nro_doc]) row = self.cursor.fetchone() for key in [k for k, l, t, d in FORMATO]: if row: val = row[key] if not isinstance(val, basestring): val = str(row[key]) setattr(self, key, val) else: setattr(self, key, '') if self.tipo_doc == 80: self.cuit = self.nro_doc elif self.tipo_doc == 96: self.dni = self.nro_doc # determinar categoría de IVA (tentativa) try: cat_iva = int(self.cat_iva) except ValueError: cat_iva = None if cat_iva: pass elif self.imp_iva in ('AC', 'S'): self.cat_iva = 1 # RI elif self.imp_iva == 'EX': self.cat_iva = 4 # EX elif self.monotributo: self.cat_iva = 6 # MT else: self.cat_iva = 5 # CF return True if row else False
Devuelve True si fue encontrado y establece atributos con datos
Below is the the instruction that describes the task: ### Input: Devuelve True si fue encontrado y establece atributos con datos ### Response: def Buscar(self, nro_doc, tipo_doc=80): "Devuelve True si fue encontrado y establece atributos con datos" # cuit: codigo único de identificación tributaria del contribuyente # (sin guiones) self.cursor.execute("SELECT * FROM padron WHERE " " tipo_doc=? AND nro_doc=?", [tipo_doc, nro_doc]) row = self.cursor.fetchone() for key in [k for k, l, t, d in FORMATO]: if row: val = row[key] if not isinstance(val, basestring): val = str(row[key]) setattr(self, key, val) else: setattr(self, key, '') if self.tipo_doc == 80: self.cuit = self.nro_doc elif self.tipo_doc == 96: self.dni = self.nro_doc # determinar categoría de IVA (tentativa) try: cat_iva = int(self.cat_iva) except ValueError: cat_iva = None if cat_iva: pass elif self.imp_iva in ('AC', 'S'): self.cat_iva = 1 # RI elif self.imp_iva == 'EX': self.cat_iva = 4 # EX elif self.monotributo: self.cat_iva = 6 # MT else: self.cat_iva = 5 # CF return True if row else False
def update_port_ip_address(self): """Find the ip address that assinged to a port via DHCP The port database will be updated with the ip address. """ leases = None req = dict(ip='0.0.0.0') instances = self.get_vms_for_this_req(**req) if instances is None: return for vm in instances: if not leases: # For the first time finding the leases file. leases = self._get_ip_leases() if not leases: # File does not exist. return for line in leases: if line.startswith('lease') and line.endswith('{\n'): ip_addr = line.split()[1] if 'hardware ethernet' in line: if vm.mac == line.replace(';', '').split()[2]: LOG.info('Find IP address %(ip)s for %(mac)s', {'ip': ip_addr, 'mac': vm.mac}) try: rule_info = dict(ip=ip_addr, mac=vm.mac, port=vm.port_id, status='up') self.neutron_event.update_ip_rule(str(vm.host), str(rule_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error("RPC error: Failed to update" "rules.") else: params = dict(columns=dict(ip=ip_addr)) self.update_vm_db(vm.port_id, **params) # Send update to the agent. vm_info = dict(status=vm.status, vm_mac=vm.mac, segmentation_id=vm.segmentation_id, host=vm.host, port_uuid=vm.port_id, net_uuid=vm.network_id, oui=dict(ip_addr=ip_addr, vm_name=vm.name, vm_uuid=vm.instance_id, gw_mac=vm.gw_mac, fwd_mod=vm.fwd_mod, oui_id='cisco')) try: self.neutron_event.send_vm_info(vm.host, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error('Failed to send VM info to ' 'agent.')
Find the ip address that assinged to a port via DHCP The port database will be updated with the ip address.
Below is the the instruction that describes the task: ### Input: Find the ip address that assinged to a port via DHCP The port database will be updated with the ip address. ### Response: def update_port_ip_address(self): """Find the ip address that assinged to a port via DHCP The port database will be updated with the ip address. """ leases = None req = dict(ip='0.0.0.0') instances = self.get_vms_for_this_req(**req) if instances is None: return for vm in instances: if not leases: # For the first time finding the leases file. leases = self._get_ip_leases() if not leases: # File does not exist. return for line in leases: if line.startswith('lease') and line.endswith('{\n'): ip_addr = line.split()[1] if 'hardware ethernet' in line: if vm.mac == line.replace(';', '').split()[2]: LOG.info('Find IP address %(ip)s for %(mac)s', {'ip': ip_addr, 'mac': vm.mac}) try: rule_info = dict(ip=ip_addr, mac=vm.mac, port=vm.port_id, status='up') self.neutron_event.update_ip_rule(str(vm.host), str(rule_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error("RPC error: Failed to update" "rules.") else: params = dict(columns=dict(ip=ip_addr)) self.update_vm_db(vm.port_id, **params) # Send update to the agent. vm_info = dict(status=vm.status, vm_mac=vm.mac, segmentation_id=vm.segmentation_id, host=vm.host, port_uuid=vm.port_id, net_uuid=vm.network_id, oui=dict(ip_addr=ip_addr, vm_name=vm.name, vm_uuid=vm.instance_id, gw_mac=vm.gw_mac, fwd_mod=vm.fwd_mod, oui_id='cisco')) try: self.neutron_event.send_vm_info(vm.host, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error('Failed to send VM info to ' 'agent.')
def set_scanner_alert_threshold(self, scanner_ids, alert_threshold): """Set the alert theshold for the given policies.""" for scanner_id in scanner_ids: self.logger.debug('Setting alert threshold for scanner {0} to {1}'.format(scanner_id, alert_threshold)) result = self.zap.ascan.set_scanner_alert_threshold(scanner_id, alert_threshold) if result != 'OK': raise ZAPError('Error setting alert threshold for scanner with ID {0}: {1}'.format(scanner_id, result))
Set the alert theshold for the given policies.
Below is the the instruction that describes the task: ### Input: Set the alert theshold for the given policies. ### Response: def set_scanner_alert_threshold(self, scanner_ids, alert_threshold): """Set the alert theshold for the given policies.""" for scanner_id in scanner_ids: self.logger.debug('Setting alert threshold for scanner {0} to {1}'.format(scanner_id, alert_threshold)) result = self.zap.ascan.set_scanner_alert_threshold(scanner_id, alert_threshold) if result != 'OK': raise ZAPError('Error setting alert threshold for scanner with ID {0}: {1}'.format(scanner_id, result))
def connections(self): """Get list of connections.""" self._check_session() status, data = self._rest.get_request('connections') return data
Get list of connections.
Below is the the instruction that describes the task: ### Input: Get list of connections. ### Response: def connections(self): """Get list of connections.""" self._check_session() status, data = self._rest.get_request('connections') return data
def merge(left, right): """ Merge two mappings objects together, combining overlapping Mappings, and favoring right-values left: The left Mapping object. right: The right (favored) Mapping object. NOTE: This is not commutative (merge(a,b) != merge(b,a)). """ merged = {} left_keys = frozenset(left) right_keys = frozenset(right) # Items only in the left Mapping for key in left_keys - right_keys: merged[key] = left[key] # Items only in the right Mapping for key in right_keys - left_keys: merged[key] = right[key] # in both for key in left_keys & right_keys: left_value = left[key] right_value = right[key] if (isinstance(left_value, Mapping) and isinstance(right_value, Mapping)): # recursive merge merged[key] = merge(left_value, right_value) else: # overwrite with right value merged[key] = right_value return merged
Merge two mappings objects together, combining overlapping Mappings, and favoring right-values left: The left Mapping object. right: The right (favored) Mapping object. NOTE: This is not commutative (merge(a,b) != merge(b,a)).
Below is the the instruction that describes the task: ### Input: Merge two mappings objects together, combining overlapping Mappings, and favoring right-values left: The left Mapping object. right: The right (favored) Mapping object. NOTE: This is not commutative (merge(a,b) != merge(b,a)). ### Response: def merge(left, right): """ Merge two mappings objects together, combining overlapping Mappings, and favoring right-values left: The left Mapping object. right: The right (favored) Mapping object. NOTE: This is not commutative (merge(a,b) != merge(b,a)). """ merged = {} left_keys = frozenset(left) right_keys = frozenset(right) # Items only in the left Mapping for key in left_keys - right_keys: merged[key] = left[key] # Items only in the right Mapping for key in right_keys - left_keys: merged[key] = right[key] # in both for key in left_keys & right_keys: left_value = left[key] right_value = right[key] if (isinstance(left_value, Mapping) and isinstance(right_value, Mapping)): # recursive merge merged[key] = merge(left_value, right_value) else: # overwrite with right value merged[key] = right_value return merged
def serialize_formula(formula): r'''Basic formula serializer to construct a consistently-formatted formula. This is necessary for handling user-supplied formulas, which are not always well formatted. Performs no sanity checking that elements are actually elements. Parameters ---------- formula : str Formula string as parseable by the method nested_formula_parser, [-] Returns ------- formula : str A consistently formatted formula to describe a molecular formula, [-] Notes ----- Examples -------- >>> serialize_formula('Pd(NH3)4+3') 'H12N4Pd+3' ''' charge = charge_from_formula(formula) element_dict = nested_formula_parser(formula) base = atoms_to_Hill(element_dict) if charge == 0: pass elif charge > 0: if charge == 1: base += '+' else: base += '+' + str(charge) elif charge < 0: if charge == -1: base += '-' else: base += str(charge) return base
r'''Basic formula serializer to construct a consistently-formatted formula. This is necessary for handling user-supplied formulas, which are not always well formatted. Performs no sanity checking that elements are actually elements. Parameters ---------- formula : str Formula string as parseable by the method nested_formula_parser, [-] Returns ------- formula : str A consistently formatted formula to describe a molecular formula, [-] Notes ----- Examples -------- >>> serialize_formula('Pd(NH3)4+3') 'H12N4Pd+3'
Below is the the instruction that describes the task: ### Input: r'''Basic formula serializer to construct a consistently-formatted formula. This is necessary for handling user-supplied formulas, which are not always well formatted. Performs no sanity checking that elements are actually elements. Parameters ---------- formula : str Formula string as parseable by the method nested_formula_parser, [-] Returns ------- formula : str A consistently formatted formula to describe a molecular formula, [-] Notes ----- Examples -------- >>> serialize_formula('Pd(NH3)4+3') 'H12N4Pd+3' ### Response: def serialize_formula(formula): r'''Basic formula serializer to construct a consistently-formatted formula. This is necessary for handling user-supplied formulas, which are not always well formatted. Performs no sanity checking that elements are actually elements. Parameters ---------- formula : str Formula string as parseable by the method nested_formula_parser, [-] Returns ------- formula : str A consistently formatted formula to describe a molecular formula, [-] Notes ----- Examples -------- >>> serialize_formula('Pd(NH3)4+3') 'H12N4Pd+3' ''' charge = charge_from_formula(formula) element_dict = nested_formula_parser(formula) base = atoms_to_Hill(element_dict) if charge == 0: pass elif charge > 0: if charge == 1: base += '+' else: base += '+' + str(charge) elif charge < 0: if charge == -1: base += '-' else: base += str(charge) return base
def append_processor(self, proc, source_proc=None): "Append a new processor to the pipe" if source_proc is None and len(self.processors): source_proc = self.processors[0] if source_proc and not isinstance(source_proc, Processor): raise TypeError('source_proc must be a Processor or None') if not isinstance(proc, Processor): raise TypeError('proc must be a Processor or None') if proc.type == 'decoder' and len(self.processors): raise ValueError('Only the first processor in a pipe could be a Decoder') # TODO : check if the processor is already in the pipe if source_proc: for child in self._graph.neighbors_iter(source_proc.uuid()): child_proc = self._graph.node[child]['processor'] if proc == child_proc: proc._uuid = child_proc.uuid() proc.process_pipe = self break if not self._graph.has_node(proc.uuid()): self.processors.append(proc) # Add processor to the pipe self._graph.add_node(proc.uuid(), processor=proc, id=proc.id()) if source_proc: self._graph.add_edge(self.processors[0].uuid(), proc.uuid(), type='audio_source') proc.process_pipe = self # Add an edge between each parent and proc for parent in proc.parents.values(): self._graph.add_edge(parent.uuid(), proc.uuid(), type='data_source')
Append a new processor to the pipe
Below is the the instruction that describes the task: ### Input: Append a new processor to the pipe ### Response: def append_processor(self, proc, source_proc=None): "Append a new processor to the pipe" if source_proc is None and len(self.processors): source_proc = self.processors[0] if source_proc and not isinstance(source_proc, Processor): raise TypeError('source_proc must be a Processor or None') if not isinstance(proc, Processor): raise TypeError('proc must be a Processor or None') if proc.type == 'decoder' and len(self.processors): raise ValueError('Only the first processor in a pipe could be a Decoder') # TODO : check if the processor is already in the pipe if source_proc: for child in self._graph.neighbors_iter(source_proc.uuid()): child_proc = self._graph.node[child]['processor'] if proc == child_proc: proc._uuid = child_proc.uuid() proc.process_pipe = self break if not self._graph.has_node(proc.uuid()): self.processors.append(proc) # Add processor to the pipe self._graph.add_node(proc.uuid(), processor=proc, id=proc.id()) if source_proc: self._graph.add_edge(self.processors[0].uuid(), proc.uuid(), type='audio_source') proc.process_pipe = self # Add an edge between each parent and proc for parent in proc.parents.values(): self._graph.add_edge(parent.uuid(), proc.uuid(), type='data_source')
def run_thermal_displacements(self, t_min=0, t_max=1000, t_step=10, temperatures=None, direction=None, freq_min=None, freq_max=None): """Prepare thermal displacements calculation Parameters ---------- t_min, t_max, t_step : float, optional Minimum and maximum temperatures and the interval in this temperature range. Default valuues are 0, 1000, and 10. temperatures : array_like, optional Temperature points where thermal properties are calculated. When this is set, t_min, t_max, and t_step are ignored. direction : array_like, optional Projection direction in reduced coordinates. Default is None, i.e., no projection. dtype=float, shape=(3,) freq_min, freq_max : float, optional Phonon frequencies larger than freq_min and smaller than freq_max are included. Default is None, i.e., all phonons. """ if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) if self._mesh is None: msg = ("run_mesh has to be done.") raise RuntimeError(msg) mesh_nums = self._mesh.mesh_numbers ir_grid_points = self._mesh.ir_grid_points if not self._mesh.with_eigenvectors: msg = ("run_mesh has to be done with with_eigenvectors=True.") raise RuntimeError(msg) if np.prod(mesh_nums) != len(ir_grid_points): msg = ("run_mesh has to be done with is_mesh_symmetry=False.") raise RuntimeError(msg) if direction is not None: projection_direction = np.dot(direction, self._primitive.get_cell()) td = ThermalDisplacements( self._mesh, projection_direction=projection_direction, freq_min=freq_min, freq_max=freq_max) else: td = ThermalDisplacements(self._mesh, freq_min=freq_min, freq_max=freq_max) if temperatures is None: td.set_temperature_range(t_min, t_max, t_step) else: td.set_temperatures(temperatures) td.run() self._thermal_displacements = td
Prepare thermal displacements calculation Parameters ---------- t_min, t_max, t_step : float, optional Minimum and maximum temperatures and the interval in this temperature range. Default valuues are 0, 1000, and 10. temperatures : array_like, optional Temperature points where thermal properties are calculated. When this is set, t_min, t_max, and t_step are ignored. direction : array_like, optional Projection direction in reduced coordinates. Default is None, i.e., no projection. dtype=float, shape=(3,) freq_min, freq_max : float, optional Phonon frequencies larger than freq_min and smaller than freq_max are included. Default is None, i.e., all phonons.
Below is the the instruction that describes the task: ### Input: Prepare thermal displacements calculation Parameters ---------- t_min, t_max, t_step : float, optional Minimum and maximum temperatures and the interval in this temperature range. Default valuues are 0, 1000, and 10. temperatures : array_like, optional Temperature points where thermal properties are calculated. When this is set, t_min, t_max, and t_step are ignored. direction : array_like, optional Projection direction in reduced coordinates. Default is None, i.e., no projection. dtype=float, shape=(3,) freq_min, freq_max : float, optional Phonon frequencies larger than freq_min and smaller than freq_max are included. Default is None, i.e., all phonons. ### Response: def run_thermal_displacements(self, t_min=0, t_max=1000, t_step=10, temperatures=None, direction=None, freq_min=None, freq_max=None): """Prepare thermal displacements calculation Parameters ---------- t_min, t_max, t_step : float, optional Minimum and maximum temperatures and the interval in this temperature range. Default valuues are 0, 1000, and 10. temperatures : array_like, optional Temperature points where thermal properties are calculated. When this is set, t_min, t_max, and t_step are ignored. direction : array_like, optional Projection direction in reduced coordinates. Default is None, i.e., no projection. dtype=float, shape=(3,) freq_min, freq_max : float, optional Phonon frequencies larger than freq_min and smaller than freq_max are included. Default is None, i.e., all phonons. """ if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) if self._mesh is None: msg = ("run_mesh has to be done.") raise RuntimeError(msg) mesh_nums = self._mesh.mesh_numbers ir_grid_points = self._mesh.ir_grid_points if not self._mesh.with_eigenvectors: msg = ("run_mesh has to be done with with_eigenvectors=True.") raise RuntimeError(msg) if np.prod(mesh_nums) != len(ir_grid_points): msg = ("run_mesh has to be done with is_mesh_symmetry=False.") raise RuntimeError(msg) if direction is not None: projection_direction = np.dot(direction, self._primitive.get_cell()) td = ThermalDisplacements( self._mesh, projection_direction=projection_direction, freq_min=freq_min, freq_max=freq_max) else: td = ThermalDisplacements(self._mesh, freq_min=freq_min, freq_max=freq_max) if temperatures is None: td.set_temperature_range(t_min, t_max, t_step) else: td.set_temperatures(temperatures) td.run() self._thermal_displacements = td
async def get_edit(self, message=None, *, timeout=None): """ Awaits for an edit after the last message to arrive. The arguments are the same as those for `get_response`. """ start_time = time.time() target_id = self._get_message_id(message) target_date = self._edit_dates.get(target_id, 0) earliest_edit = min( (x for x in self._incoming if x.edit_date and x.id > target_id and x.edit_date.timestamp() > target_date ), key=lambda x: x.edit_date.timestamp(), default=None ) if earliest_edit and earliest_edit.edit_date.timestamp() > target_date: self._edit_dates[target_id] = earliest_edit.edit_date.timestamp() return earliest_edit # Otherwise the next incoming response will be the one to use future = asyncio.Future(loop=self._client.loop) self._pending_edits[target_id] = future return await self._get_result(future, start_time, timeout)
Awaits for an edit after the last message to arrive. The arguments are the same as those for `get_response`.
Below is the the instruction that describes the task: ### Input: Awaits for an edit after the last message to arrive. The arguments are the same as those for `get_response`. ### Response: async def get_edit(self, message=None, *, timeout=None): """ Awaits for an edit after the last message to arrive. The arguments are the same as those for `get_response`. """ start_time = time.time() target_id = self._get_message_id(message) target_date = self._edit_dates.get(target_id, 0) earliest_edit = min( (x for x in self._incoming if x.edit_date and x.id > target_id and x.edit_date.timestamp() > target_date ), key=lambda x: x.edit_date.timestamp(), default=None ) if earliest_edit and earliest_edit.edit_date.timestamp() > target_date: self._edit_dates[target_id] = earliest_edit.edit_date.timestamp() return earliest_edit # Otherwise the next incoming response will be the one to use future = asyncio.Future(loop=self._client.loop) self._pending_edits[target_id] = future return await self._get_result(future, start_time, timeout)
def get_handler(self, handler_input, exception): # type: (Input, Exception) -> Union[AbstractExceptionHandler, None] """Get the exception handler that can handle the input and exception. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :param exception: Exception thrown by :py:class:`ask_sdk_runtime.dispatch.GenericRequestDispatcher` dispatch method. :type exception: Exception :return: Exception Handler that can handle the input or None. :rtype: Union[None, ask_sdk_runtime.dispatch_components.exception_components.AbstractExceptionHandler] """ for handler in self.exception_handlers: if handler.can_handle( handler_input=handler_input, exception=exception): return handler return None
Get the exception handler that can handle the input and exception. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :param exception: Exception thrown by :py:class:`ask_sdk_runtime.dispatch.GenericRequestDispatcher` dispatch method. :type exception: Exception :return: Exception Handler that can handle the input or None. :rtype: Union[None, ask_sdk_runtime.dispatch_components.exception_components.AbstractExceptionHandler]
Below is the the instruction that describes the task: ### Input: Get the exception handler that can handle the input and exception. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :param exception: Exception thrown by :py:class:`ask_sdk_runtime.dispatch.GenericRequestDispatcher` dispatch method. :type exception: Exception :return: Exception Handler that can handle the input or None. :rtype: Union[None, ask_sdk_runtime.dispatch_components.exception_components.AbstractExceptionHandler] ### Response: def get_handler(self, handler_input, exception): # type: (Input, Exception) -> Union[AbstractExceptionHandler, None] """Get the exception handler that can handle the input and exception. :param handler_input: Generic input passed to the dispatcher. :type handler_input: Input :param exception: Exception thrown by :py:class:`ask_sdk_runtime.dispatch.GenericRequestDispatcher` dispatch method. :type exception: Exception :return: Exception Handler that can handle the input or None. :rtype: Union[None, ask_sdk_runtime.dispatch_components.exception_components.AbstractExceptionHandler] """ for handler in self.exception_handlers: if handler.can_handle( handler_input=handler_input, exception=exception): return handler return None
def get_bucket(bucket_name, include_created=None, flags=FLAGS.ALL ^ FLAGS.CREATED_DATE, **conn): """ Orchestrates all the calls required to fully build out an S3 bucket in the following format: { "Arn": ..., "Name": ..., "Region": ..., "Owner": ..., "Grants": ..., "GrantReferences": ..., "LifecycleRules": ..., "Logging": ..., "Policy": ..., "Tags": ..., "Versioning": ..., "Website": ..., "Cors": ..., "Notifications": ..., "Acceleration": ..., "Replication": ..., "CreationDate": ..., "AnalyticsConfigurations": ..., "MetricsConfigurations": ..., "InventoryConfigurations": ..., "_version": 9 } NOTE: "GrantReferences" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it :param include_created: legacy param moved to FLAGS. :param bucket_name: str bucket name :param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow and expensive process. :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out bucket. """ if type(include_created) is bool: # coerce the legacy param "include_created" into the flags param. if include_created: flags = flags | FLAGS.CREATED_DATE else: flags = flags & ~FLAGS.CREATED_DATE region = get_bucket_region(Bucket=bucket_name, **conn) if not region: return dict(Error='Unauthorized') conn['region'] = region return registry.build_out(flags, bucket_name, **conn)
Orchestrates all the calls required to fully build out an S3 bucket in the following format: { "Arn": ..., "Name": ..., "Region": ..., "Owner": ..., "Grants": ..., "GrantReferences": ..., "LifecycleRules": ..., "Logging": ..., "Policy": ..., "Tags": ..., "Versioning": ..., "Website": ..., "Cors": ..., "Notifications": ..., "Acceleration": ..., "Replication": ..., "CreationDate": ..., "AnalyticsConfigurations": ..., "MetricsConfigurations": ..., "InventoryConfigurations": ..., "_version": 9 } NOTE: "GrantReferences" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it :param include_created: legacy param moved to FLAGS. :param bucket_name: str bucket name :param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow and expensive process. :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out bucket.
Below is the the instruction that describes the task: ### Input: Orchestrates all the calls required to fully build out an S3 bucket in the following format: { "Arn": ..., "Name": ..., "Region": ..., "Owner": ..., "Grants": ..., "GrantReferences": ..., "LifecycleRules": ..., "Logging": ..., "Policy": ..., "Tags": ..., "Versioning": ..., "Website": ..., "Cors": ..., "Notifications": ..., "Acceleration": ..., "Replication": ..., "CreationDate": ..., "AnalyticsConfigurations": ..., "MetricsConfigurations": ..., "InventoryConfigurations": ..., "_version": 9 } NOTE: "GrantReferences" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it :param include_created: legacy param moved to FLAGS. :param bucket_name: str bucket name :param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow and expensive process. :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out bucket. ### Response: def get_bucket(bucket_name, include_created=None, flags=FLAGS.ALL ^ FLAGS.CREATED_DATE, **conn): """ Orchestrates all the calls required to fully build out an S3 bucket in the following format: { "Arn": ..., "Name": ..., "Region": ..., "Owner": ..., "Grants": ..., "GrantReferences": ..., "LifecycleRules": ..., "Logging": ..., "Policy": ..., "Tags": ..., "Versioning": ..., "Website": ..., "Cors": ..., "Notifications": ..., "Acceleration": ..., "Replication": ..., "CreationDate": ..., "AnalyticsConfigurations": ..., "MetricsConfigurations": ..., "InventoryConfigurations": ..., "_version": 9 } NOTE: "GrantReferences" is an ephemeral field that is not guaranteed to be consistent -- do not base logic off of it :param include_created: legacy param moved to FLAGS. :param bucket_name: str bucket name :param flags: By default, set to ALL fields except for FLAGS.CREATED_DATE as obtaining that information is a slow and expensive process. :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out bucket. """ if type(include_created) is bool: # coerce the legacy param "include_created" into the flags param. if include_created: flags = flags | FLAGS.CREATED_DATE else: flags = flags & ~FLAGS.CREATED_DATE region = get_bucket_region(Bucket=bucket_name, **conn) if not region: return dict(Error='Unauthorized') conn['region'] = region return registry.build_out(flags, bucket_name, **conn)
def to_bytes(self, frame, state): """ Convert a single frame into bytes that can be transmitted on the stream. :param frame: The frame to convert. Should be the same type of object returned by ``to_frame()``. :param state: An instance of ``FramerState``. This object may be used to track information across calls to the method. :returns: Bytes that may be transmitted on the stream. """ # Encode the frame and append the delimiter return six.binary_type(self.variant.encode( six.binary_type(frame))) + b'\0'
Convert a single frame into bytes that can be transmitted on the stream. :param frame: The frame to convert. Should be the same type of object returned by ``to_frame()``. :param state: An instance of ``FramerState``. This object may be used to track information across calls to the method. :returns: Bytes that may be transmitted on the stream.
Below is the the instruction that describes the task: ### Input: Convert a single frame into bytes that can be transmitted on the stream. :param frame: The frame to convert. Should be the same type of object returned by ``to_frame()``. :param state: An instance of ``FramerState``. This object may be used to track information across calls to the method. :returns: Bytes that may be transmitted on the stream. ### Response: def to_bytes(self, frame, state): """ Convert a single frame into bytes that can be transmitted on the stream. :param frame: The frame to convert. Should be the same type of object returned by ``to_frame()``. :param state: An instance of ``FramerState``. This object may be used to track information across calls to the method. :returns: Bytes that may be transmitted on the stream. """ # Encode the frame and append the delimiter return six.binary_type(self.variant.encode( six.binary_type(frame))) + b'\0'
def team_info(): """Returns a list of team information dictionaries""" teams = __get_league_object().find('teams').findall('team') output = [] for team in teams: info = {} for x in team.attrib: info[x] = team.attrib[x] output.append(info) return output
Returns a list of team information dictionaries
Below is the the instruction that describes the task: ### Input: Returns a list of team information dictionaries ### Response: def team_info(): """Returns a list of team information dictionaries""" teams = __get_league_object().find('teams').findall('team') output = [] for team in teams: info = {} for x in team.attrib: info[x] = team.attrib[x] output.append(info) return output
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ sites.vs30 = 600 * np.ones(len(sites.vs30)) mean, stddevs = super(AkkarBommer2010SWISS01, self).\ get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) tau_ss = 'tau' log_phi_ss = np.log(10) mean, stddevs = _apply_adjustments( AkkarBommer2010.COEFFS, self.COEFFS_FS_ROCK[imt], tau_ss, mean, stddevs, sites, rup, dists.rjb, imt, stddev_types, log_phi_ss) return mean, np.log(10 ** np.array(stddevs))
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
Below is the the instruction that describes the task: ### Input: See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. ### Response: def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ sites.vs30 = 600 * np.ones(len(sites.vs30)) mean, stddevs = super(AkkarBommer2010SWISS01, self).\ get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) tau_ss = 'tau' log_phi_ss = np.log(10) mean, stddevs = _apply_adjustments( AkkarBommer2010.COEFFS, self.COEFFS_FS_ROCK[imt], tau_ss, mean, stddevs, sites, rup, dists.rjb, imt, stddev_types, log_phi_ss) return mean, np.log(10 ** np.array(stddevs))
def get(cls, uni_char): """Return the general category code (as Unicode string) for the given Unicode character""" uni_char = unicod(uni_char) # Force to Unicode return unicod(unicodedata.category(uni_char))
Return the general category code (as Unicode string) for the given Unicode character
Below is the the instruction that describes the task: ### Input: Return the general category code (as Unicode string) for the given Unicode character ### Response: def get(cls, uni_char): """Return the general category code (as Unicode string) for the given Unicode character""" uni_char = unicod(uni_char) # Force to Unicode return unicod(unicodedata.category(uni_char))
def predict(self, y, t=None, return_cov=True, return_var=False): """ Compute the conditional predictive distribution of the model You must call :func:`GP.compute` before this method. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. return_cov (Optional[bool]): If ``True``, the full covariance matrix is computed and returned. Otherwise, only the mean prediction is computed. (default: ``True``) return_var (Optional[bool]): If ``True``, only return the diagonal of the predictive covariance; this will be faster to compute than the full covariance matrix. This overrides ``return_cov`` so, if both are set to ``True``, only the diagonal is computed. (default: ``False``) Returns: ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of ``return_cov`` and ``return_var``. These output values are: (a) **mu** ``(ntest,)``: mean of the predictive distribution, (b) **cov** ``(ntest, ntest)``: the predictive covariance matrix, and (c) **var** ``(ntest,)``: the diagonal elements of ``cov``. Raises: ValueError: For mismatched dimensions. """ y = self._process_input(y) if len(y.shape) > 1: raise ValueError("dimension mismatch") if t is None: xs = self._t else: xs = np.ascontiguousarray(t, dtype=float) if len(xs.shape) > 1: raise ValueError("dimension mismatch") # Make sure that the model is computed self._recompute() # Compute the predictive mean. resid = y - self.mean.get_value(self._t) if t is None: alpha = self.solver.solve(resid).flatten() alpha = resid - (self._yerr**2 + self.kernel.jitter) * alpha elif not len(self._A): alpha = self.solver.predict(resid, xs) else: Kxs = self.get_matrix(xs, self._t) alpha = np.dot(Kxs, alpha) mu = self.mean.get_value(xs) + alpha if not (return_var or return_cov): return mu # Predictive variance. Kxs = self.get_matrix(xs, self._t) KxsT = np.ascontiguousarray(Kxs.T, dtype=np.float64) if return_var: var = -np.sum(KxsT*self.apply_inverse(KxsT), axis=0) var += self.kernel.get_value(0.0) return mu, var # Predictive covariance cov = self.kernel.get_value(xs[:, None] - xs[None, :]) cov -= np.dot(Kxs, self.apply_inverse(KxsT)) return mu, cov
Compute the conditional predictive distribution of the model You must call :func:`GP.compute` before this method. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. return_cov (Optional[bool]): If ``True``, the full covariance matrix is computed and returned. Otherwise, only the mean prediction is computed. (default: ``True``) return_var (Optional[bool]): If ``True``, only return the diagonal of the predictive covariance; this will be faster to compute than the full covariance matrix. This overrides ``return_cov`` so, if both are set to ``True``, only the diagonal is computed. (default: ``False``) Returns: ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of ``return_cov`` and ``return_var``. These output values are: (a) **mu** ``(ntest,)``: mean of the predictive distribution, (b) **cov** ``(ntest, ntest)``: the predictive covariance matrix, and (c) **var** ``(ntest,)``: the diagonal elements of ``cov``. Raises: ValueError: For mismatched dimensions.
Below is the the instruction that describes the task: ### Input: Compute the conditional predictive distribution of the model You must call :func:`GP.compute` before this method. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. return_cov (Optional[bool]): If ``True``, the full covariance matrix is computed and returned. Otherwise, only the mean prediction is computed. (default: ``True``) return_var (Optional[bool]): If ``True``, only return the diagonal of the predictive covariance; this will be faster to compute than the full covariance matrix. This overrides ``return_cov`` so, if both are set to ``True``, only the diagonal is computed. (default: ``False``) Returns: ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of ``return_cov`` and ``return_var``. These output values are: (a) **mu** ``(ntest,)``: mean of the predictive distribution, (b) **cov** ``(ntest, ntest)``: the predictive covariance matrix, and (c) **var** ``(ntest,)``: the diagonal elements of ``cov``. Raises: ValueError: For mismatched dimensions. ### Response: def predict(self, y, t=None, return_cov=True, return_var=False): """ Compute the conditional predictive distribution of the model You must call :func:`GP.compute` before this method. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. t (Optional[array[ntest]]): The independent coordinates where the prediction should be made. If this is omitted the coordinates will be assumed to be ``x`` from :func:`GP.compute` and an efficient method will be used to compute the prediction. return_cov (Optional[bool]): If ``True``, the full covariance matrix is computed and returned. Otherwise, only the mean prediction is computed. (default: ``True``) return_var (Optional[bool]): If ``True``, only return the diagonal of the predictive covariance; this will be faster to compute than the full covariance matrix. This overrides ``return_cov`` so, if both are set to ``True``, only the diagonal is computed. (default: ``False``) Returns: ``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of ``return_cov`` and ``return_var``. These output values are: (a) **mu** ``(ntest,)``: mean of the predictive distribution, (b) **cov** ``(ntest, ntest)``: the predictive covariance matrix, and (c) **var** ``(ntest,)``: the diagonal elements of ``cov``. Raises: ValueError: For mismatched dimensions. """ y = self._process_input(y) if len(y.shape) > 1: raise ValueError("dimension mismatch") if t is None: xs = self._t else: xs = np.ascontiguousarray(t, dtype=float) if len(xs.shape) > 1: raise ValueError("dimension mismatch") # Make sure that the model is computed self._recompute() # Compute the predictive mean. resid = y - self.mean.get_value(self._t) if t is None: alpha = self.solver.solve(resid).flatten() alpha = resid - (self._yerr**2 + self.kernel.jitter) * alpha elif not len(self._A): alpha = self.solver.predict(resid, xs) else: Kxs = self.get_matrix(xs, self._t) alpha = np.dot(Kxs, alpha) mu = self.mean.get_value(xs) + alpha if not (return_var or return_cov): return mu # Predictive variance. Kxs = self.get_matrix(xs, self._t) KxsT = np.ascontiguousarray(Kxs.T, dtype=np.float64) if return_var: var = -np.sum(KxsT*self.apply_inverse(KxsT), axis=0) var += self.kernel.get_value(0.0) return mu, var # Predictive covariance cov = self.kernel.get_value(xs[:, None] - xs[None, :]) cov -= np.dot(Kxs, self.apply_inverse(KxsT)) return mu, cov
def _top_element(self): """Returns top XML element.""" top = etree.Element("testsuites") comment = etree.Comment("Generated for testrun {}".format(self.testrun_id)) top.append(comment) return top
Returns top XML element.
Below is the the instruction that describes the task: ### Input: Returns top XML element. ### Response: def _top_element(self): """Returns top XML element.""" top = etree.Element("testsuites") comment = etree.Comment("Generated for testrun {}".format(self.testrun_id)) top.append(comment) return top
def do_init(self, fs_settings, global_quota): fs_settings = deepcopy(fs_settings) # because we store some of the info, we need a deep copy ''' If the same restrictions are applied for many destinations, we use the same job to avoid processing files twice ''' for sender_spec in fs_settings.sender_specs: restrictions = sender_spec.restrictions if restrictions in self.restriction_to_job: self.restriction_to_job[restrictions].add_destinations(sender_spec.destinations) else: compressor = _CompressorJob( next_task=self.get_next_task(), sender_spec=sender_spec, tmp_file_parts_basepath=fs_settings.tmp_file_parts_basepath, should_split_small_files=fs_settings.should_split_small_files, global_quota=global_quota) self.restriction_to_job[restrictions] = compressor compressor.register(self)
If the same restrictions are applied for many destinations, we use the same job to avoid processing files twice
Below is the the instruction that describes the task: ### Input: If the same restrictions are applied for many destinations, we use the same job to avoid processing files twice ### Response: def do_init(self, fs_settings, global_quota): fs_settings = deepcopy(fs_settings) # because we store some of the info, we need a deep copy ''' If the same restrictions are applied for many destinations, we use the same job to avoid processing files twice ''' for sender_spec in fs_settings.sender_specs: restrictions = sender_spec.restrictions if restrictions in self.restriction_to_job: self.restriction_to_job[restrictions].add_destinations(sender_spec.destinations) else: compressor = _CompressorJob( next_task=self.get_next_task(), sender_spec=sender_spec, tmp_file_parts_basepath=fs_settings.tmp_file_parts_basepath, should_split_small_files=fs_settings.should_split_small_files, global_quota=global_quota) self.restriction_to_job[restrictions] = compressor compressor.register(self)
def task_list( limit, filter_task_id, filter_status, filter_type, filter_label, filter_not_label, inexact, filter_requested_after, filter_requested_before, filter_completed_after, filter_completed_before, ): """ Executor for `globus task-list` """ def _process_filterval(prefix, value, default=None): if value: if isinstance(value, six.string_types): return "{}:{}/".format(prefix, value) return "{}:{}/".format(prefix, ",".join(str(x) for x in value)) else: return default or "" # make filter string filter_string = "" filter_string += _process_filterval("task_id", filter_task_id) filter_string += _process_filterval("status", filter_status) filter_string += _process_filterval( "type", filter_type, default="type:TRANSFER,DELETE/" ) # combine data into one list for easier processing if inexact: label_data = ["~" + s for s in filter_label] + [ "!~" + s for s in filter_not_label ] else: label_data = ["=" + s for s in filter_label] + [ "!" + s for s in filter_not_label ] filter_string += _process_filterval("label", label_data) filter_string += _process_filterval( "request_time", [(filter_requested_after or ""), (filter_requested_before or "")], ) filter_string += _process_filterval( "completion_time", [(filter_completed_after or ""), (filter_completed_before or "")], ) client = get_client() task_iterator = client.task_list( num_results=limit, filter=filter_string[:-1] ) # ignore trailing / fields = [ ("Task ID", "task_id"), ("Status", "status"), ("Type", "type"), ("Source Display Name", "source_endpoint_display_name"), ("Dest Display Name", "destination_endpoint_display_name"), ("Label", "label"), ] formatted_print( task_iterator, fields=fields, json_converter=iterable_response_to_dict )
Executor for `globus task-list`
Below is the the instruction that describes the task: ### Input: Executor for `globus task-list` ### Response: def task_list( limit, filter_task_id, filter_status, filter_type, filter_label, filter_not_label, inexact, filter_requested_after, filter_requested_before, filter_completed_after, filter_completed_before, ): """ Executor for `globus task-list` """ def _process_filterval(prefix, value, default=None): if value: if isinstance(value, six.string_types): return "{}:{}/".format(prefix, value) return "{}:{}/".format(prefix, ",".join(str(x) for x in value)) else: return default or "" # make filter string filter_string = "" filter_string += _process_filterval("task_id", filter_task_id) filter_string += _process_filterval("status", filter_status) filter_string += _process_filterval( "type", filter_type, default="type:TRANSFER,DELETE/" ) # combine data into one list for easier processing if inexact: label_data = ["~" + s for s in filter_label] + [ "!~" + s for s in filter_not_label ] else: label_data = ["=" + s for s in filter_label] + [ "!" + s for s in filter_not_label ] filter_string += _process_filterval("label", label_data) filter_string += _process_filterval( "request_time", [(filter_requested_after or ""), (filter_requested_before or "")], ) filter_string += _process_filterval( "completion_time", [(filter_completed_after or ""), (filter_completed_before or "")], ) client = get_client() task_iterator = client.task_list( num_results=limit, filter=filter_string[:-1] ) # ignore trailing / fields = [ ("Task ID", "task_id"), ("Status", "status"), ("Type", "type"), ("Source Display Name", "source_endpoint_display_name"), ("Dest Display Name", "destination_endpoint_display_name"), ("Label", "label"), ] formatted_print( task_iterator, fields=fields, json_converter=iterable_response_to_dict )
def run_powerflow(self, session, method='onthefly', export_pypsa=False, debug=False): """ Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process """ if method == 'db': # Empty tables pypsa_io.delete_powerflow_tables(session) for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='db', export_pypsa_dir=export_pypsa_dir, debug=debug) elif method == 'onthefly': for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='onthefly', export_pypsa_dir=export_pypsa_dir, debug=debug)
Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process
Below is the the instruction that describes the task: ### Input: Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process ### Response: def run_powerflow(self, session, method='onthefly', export_pypsa=False, debug=False): """ Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process """ if method == 'db': # Empty tables pypsa_io.delete_powerflow_tables(session) for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='db', export_pypsa_dir=export_pypsa_dir, debug=debug) elif method == 'onthefly': for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='onthefly', export_pypsa_dir=export_pypsa_dir, debug=debug)
def join(self): """ Waits until the state finished execution. """ if self.thread: self.thread.join() self.thread = None else: logger.debug("Cannot join {0}, as the state hasn't been started, yet or is already finished!".format(self))
Waits until the state finished execution.
Below is the the instruction that describes the task: ### Input: Waits until the state finished execution. ### Response: def join(self): """ Waits until the state finished execution. """ if self.thread: self.thread.join() self.thread = None else: logger.debug("Cannot join {0}, as the state hasn't been started, yet or is already finished!".format(self))
def stop(ctx, **kwargs): """ stop a vaping process """ update_context(ctx, kwargs) daemon = mk_daemon(ctx) daemon.stop()
stop a vaping process
Below is the the instruction that describes the task: ### Input: stop a vaping process ### Response: def stop(ctx, **kwargs): """ stop a vaping process """ update_context(ctx, kwargs) daemon = mk_daemon(ctx) daemon.stop()
def create_certificate(self, cert_info, request=False, valid_from=0, valid_to=315360000, sn=1, key_length=1024, hash_alg="sha256", write_to_file=False, cert_dir="", cipher_passphrase=None): """ Can create certificate requests, to be signed later by another certificate with the method create_cert_signed_certificate. If request is True. Can also create self signed root certificates if request is False. This is default behaviour. :param cert_info: Contains information about the certificate. Is a dictionary that must contain the keys: cn = Common name. This part must match the host being authenticated country_code = Two letter description of the country. state = State city = City organization = Organization, can be a company name. organization_unit = A unit at the organization, can be a department. Example: cert_info_ca = { "cn": "company.com", "country_code": "se", "state": "AC", "city": "Dorotea", "organization": "Company", "organization_unit": "Sales" } :param request: True if this is a request for certificate, that should be signed. False if this is a self signed certificate, root certificate. :param valid_from: When the certificate starts to be valid. Amount of seconds from when the certificate is generated. :param valid_to: How long the certificate will be valid from when it is generated. The value is in seconds. Default is 315360000 seconds, a.k.a 10 years. :param sn: Serial number for the certificate. Default is 1. :param key_length: Length of the key to be generated. Defaults to 1024. :param hash_alg: Hash algorithm to use for the key. Default is sha256. :param write_to_file: True if you want to write the certificate to a file. The method will then return a tuple with path to certificate file and path to key file. False if you want to get the result as strings. The method will then return a tuple with the certificate string and the key as string. WILL OVERWRITE ALL EXISTING FILES WITHOUT ASKING! :param cert_dir: Where to save the files if write_to_file is true. :param cipher_passphrase A dictionary with cipher and passphrase. Example:: {"cipher": "blowfish", "passphrase": "qwerty"} :return: string representation of certificate, string representation of private key if write_to_file parameter is False otherwise path to certificate file, path to private key file """ cn = cert_info["cn"] c_f = None k_f = None if write_to_file: cert_file = "%s.crt" % cn key_file = "%s.key" % cn try: remove(cert_file) except: pass try: remove(key_file) except: pass c_f = join(cert_dir, cert_file) k_f = join(cert_dir, key_file) # create a key pair k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, key_length) # create a self-signed cert cert = crypto.X509() if request: cert = crypto.X509Req() if (len(cert_info["country_code"]) != 2): raise WrongInput("Country code must be two letters!") cert.get_subject().C = cert_info["country_code"] cert.get_subject().ST = cert_info["state"] cert.get_subject().L = cert_info["city"] cert.get_subject().O = cert_info["organization"] cert.get_subject().OU = cert_info["organization_unit"] cert.get_subject().CN = cn if not request: cert.set_serial_number(sn) cert.gmtime_adj_notBefore(valid_from) #Valid before present time cert.gmtime_adj_notAfter(valid_to) #3 650 days cert.set_issuer(cert.get_subject()) cert.set_pubkey(k) cert.sign(k, hash_alg) try: if request: tmp_cert = crypto.dump_certificate_request(crypto.FILETYPE_PEM, cert) else: tmp_cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) tmp_key = None if cipher_passphrase is not None: passphrase = cipher_passphrase["passphrase"] if isinstance(cipher_passphrase["passphrase"], six.string_types): passphrase = passphrase.encode('utf-8') tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k, cipher_passphrase["cipher"], passphrase) else: tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k) if write_to_file: with open(c_f, 'wt') as fc: fc.write(tmp_cert.decode('utf-8')) with open(k_f, 'wt') as fk: fk.write(tmp_key.decode('utf-8')) return c_f, k_f return tmp_cert, tmp_key except Exception as ex: raise CertificateError("Certificate cannot be generated.", ex)
Can create certificate requests, to be signed later by another certificate with the method create_cert_signed_certificate. If request is True. Can also create self signed root certificates if request is False. This is default behaviour. :param cert_info: Contains information about the certificate. Is a dictionary that must contain the keys: cn = Common name. This part must match the host being authenticated country_code = Two letter description of the country. state = State city = City organization = Organization, can be a company name. organization_unit = A unit at the organization, can be a department. Example: cert_info_ca = { "cn": "company.com", "country_code": "se", "state": "AC", "city": "Dorotea", "organization": "Company", "organization_unit": "Sales" } :param request: True if this is a request for certificate, that should be signed. False if this is a self signed certificate, root certificate. :param valid_from: When the certificate starts to be valid. Amount of seconds from when the certificate is generated. :param valid_to: How long the certificate will be valid from when it is generated. The value is in seconds. Default is 315360000 seconds, a.k.a 10 years. :param sn: Serial number for the certificate. Default is 1. :param key_length: Length of the key to be generated. Defaults to 1024. :param hash_alg: Hash algorithm to use for the key. Default is sha256. :param write_to_file: True if you want to write the certificate to a file. The method will then return a tuple with path to certificate file and path to key file. False if you want to get the result as strings. The method will then return a tuple with the certificate string and the key as string. WILL OVERWRITE ALL EXISTING FILES WITHOUT ASKING! :param cert_dir: Where to save the files if write_to_file is true. :param cipher_passphrase A dictionary with cipher and passphrase. Example:: {"cipher": "blowfish", "passphrase": "qwerty"} :return: string representation of certificate, string representation of private key if write_to_file parameter is False otherwise path to certificate file, path to private key file
Below is the the instruction that describes the task: ### Input: Can create certificate requests, to be signed later by another certificate with the method create_cert_signed_certificate. If request is True. Can also create self signed root certificates if request is False. This is default behaviour. :param cert_info: Contains information about the certificate. Is a dictionary that must contain the keys: cn = Common name. This part must match the host being authenticated country_code = Two letter description of the country. state = State city = City organization = Organization, can be a company name. organization_unit = A unit at the organization, can be a department. Example: cert_info_ca = { "cn": "company.com", "country_code": "se", "state": "AC", "city": "Dorotea", "organization": "Company", "organization_unit": "Sales" } :param request: True if this is a request for certificate, that should be signed. False if this is a self signed certificate, root certificate. :param valid_from: When the certificate starts to be valid. Amount of seconds from when the certificate is generated. :param valid_to: How long the certificate will be valid from when it is generated. The value is in seconds. Default is 315360000 seconds, a.k.a 10 years. :param sn: Serial number for the certificate. Default is 1. :param key_length: Length of the key to be generated. Defaults to 1024. :param hash_alg: Hash algorithm to use for the key. Default is sha256. :param write_to_file: True if you want to write the certificate to a file. The method will then return a tuple with path to certificate file and path to key file. False if you want to get the result as strings. The method will then return a tuple with the certificate string and the key as string. WILL OVERWRITE ALL EXISTING FILES WITHOUT ASKING! :param cert_dir: Where to save the files if write_to_file is true. :param cipher_passphrase A dictionary with cipher and passphrase. Example:: {"cipher": "blowfish", "passphrase": "qwerty"} :return: string representation of certificate, string representation of private key if write_to_file parameter is False otherwise path to certificate file, path to private key file ### Response: def create_certificate(self, cert_info, request=False, valid_from=0, valid_to=315360000, sn=1, key_length=1024, hash_alg="sha256", write_to_file=False, cert_dir="", cipher_passphrase=None): """ Can create certificate requests, to be signed later by another certificate with the method create_cert_signed_certificate. If request is True. Can also create self signed root certificates if request is False. This is default behaviour. :param cert_info: Contains information about the certificate. Is a dictionary that must contain the keys: cn = Common name. This part must match the host being authenticated country_code = Two letter description of the country. state = State city = City organization = Organization, can be a company name. organization_unit = A unit at the organization, can be a department. Example: cert_info_ca = { "cn": "company.com", "country_code": "se", "state": "AC", "city": "Dorotea", "organization": "Company", "organization_unit": "Sales" } :param request: True if this is a request for certificate, that should be signed. False if this is a self signed certificate, root certificate. :param valid_from: When the certificate starts to be valid. Amount of seconds from when the certificate is generated. :param valid_to: How long the certificate will be valid from when it is generated. The value is in seconds. Default is 315360000 seconds, a.k.a 10 years. :param sn: Serial number for the certificate. Default is 1. :param key_length: Length of the key to be generated. Defaults to 1024. :param hash_alg: Hash algorithm to use for the key. Default is sha256. :param write_to_file: True if you want to write the certificate to a file. The method will then return a tuple with path to certificate file and path to key file. False if you want to get the result as strings. The method will then return a tuple with the certificate string and the key as string. WILL OVERWRITE ALL EXISTING FILES WITHOUT ASKING! :param cert_dir: Where to save the files if write_to_file is true. :param cipher_passphrase A dictionary with cipher and passphrase. Example:: {"cipher": "blowfish", "passphrase": "qwerty"} :return: string representation of certificate, string representation of private key if write_to_file parameter is False otherwise path to certificate file, path to private key file """ cn = cert_info["cn"] c_f = None k_f = None if write_to_file: cert_file = "%s.crt" % cn key_file = "%s.key" % cn try: remove(cert_file) except: pass try: remove(key_file) except: pass c_f = join(cert_dir, cert_file) k_f = join(cert_dir, key_file) # create a key pair k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, key_length) # create a self-signed cert cert = crypto.X509() if request: cert = crypto.X509Req() if (len(cert_info["country_code"]) != 2): raise WrongInput("Country code must be two letters!") cert.get_subject().C = cert_info["country_code"] cert.get_subject().ST = cert_info["state"] cert.get_subject().L = cert_info["city"] cert.get_subject().O = cert_info["organization"] cert.get_subject().OU = cert_info["organization_unit"] cert.get_subject().CN = cn if not request: cert.set_serial_number(sn) cert.gmtime_adj_notBefore(valid_from) #Valid before present time cert.gmtime_adj_notAfter(valid_to) #3 650 days cert.set_issuer(cert.get_subject()) cert.set_pubkey(k) cert.sign(k, hash_alg) try: if request: tmp_cert = crypto.dump_certificate_request(crypto.FILETYPE_PEM, cert) else: tmp_cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) tmp_key = None if cipher_passphrase is not None: passphrase = cipher_passphrase["passphrase"] if isinstance(cipher_passphrase["passphrase"], six.string_types): passphrase = passphrase.encode('utf-8') tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k, cipher_passphrase["cipher"], passphrase) else: tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k) if write_to_file: with open(c_f, 'wt') as fc: fc.write(tmp_cert.decode('utf-8')) with open(k_f, 'wt') as fk: fk.write(tmp_key.decode('utf-8')) return c_f, k_f return tmp_cert, tmp_key except Exception as ex: raise CertificateError("Certificate cannot be generated.", ex)
def _access(self): # pragma: no cover """ Get the HTTP code status. :return: The matched HTTP status code. :rtype: int|None """ try: # We try to get the HTTP status code. if PyFunceble.INTERN["to_test_type"] == "url": # We are globally testing a URL. # We get the head of the URL. req = PyFunceble.requests.head( self.to_get, timeout=PyFunceble.CONFIGURATION["seconds_before_http_timeout"], headers=self.headers, verify=PyFunceble.CONFIGURATION["verify_ssl_certificate"], ) else: # We are not globally testing a URL. # We get the head of the constructed URL. req = PyFunceble.requests.head( self.to_get, timeout=PyFunceble.CONFIGURATION["seconds_before_http_timeout"], headers=self.headers, ) # And we try to get the status code. return req.status_code except ( PyFunceble.requests.exceptions.InvalidURL, PyFunceble.socket.timeout, PyFunceble.requests.exceptions.Timeout, PyFunceble.requests.ConnectionError, urllib3_exceptions.InvalidHeader, UnicodeDecodeError, # The probability that this happend in production is minimal. ): # If one of the listed exception is matched, that means that something # went wrong and we were unable to extract the status code. # We return None. return None
Get the HTTP code status. :return: The matched HTTP status code. :rtype: int|None
Below is the the instruction that describes the task: ### Input: Get the HTTP code status. :return: The matched HTTP status code. :rtype: int|None ### Response: def _access(self): # pragma: no cover """ Get the HTTP code status. :return: The matched HTTP status code. :rtype: int|None """ try: # We try to get the HTTP status code. if PyFunceble.INTERN["to_test_type"] == "url": # We are globally testing a URL. # We get the head of the URL. req = PyFunceble.requests.head( self.to_get, timeout=PyFunceble.CONFIGURATION["seconds_before_http_timeout"], headers=self.headers, verify=PyFunceble.CONFIGURATION["verify_ssl_certificate"], ) else: # We are not globally testing a URL. # We get the head of the constructed URL. req = PyFunceble.requests.head( self.to_get, timeout=PyFunceble.CONFIGURATION["seconds_before_http_timeout"], headers=self.headers, ) # And we try to get the status code. return req.status_code except ( PyFunceble.requests.exceptions.InvalidURL, PyFunceble.socket.timeout, PyFunceble.requests.exceptions.Timeout, PyFunceble.requests.ConnectionError, urllib3_exceptions.InvalidHeader, UnicodeDecodeError, # The probability that this happend in production is minimal. ): # If one of the listed exception is matched, that means that something # went wrong and we were unable to extract the status code. # We return None. return None
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return Swauth(app, conf) return auth_filter
Returns a WSGI filter app for use with paste.deploy.
Below is the the instruction that describes the task: ### Input: Returns a WSGI filter app for use with paste.deploy. ### Response: def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return Swauth(app, conf) return auth_filter
def create_authenticate_message(self, user_name, password, domain_name=None, workstation=None, server_certificate_hash=None): """ Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received :param user_name: The user name of the user we are trying to authenticate with :param password: The password of the user we are trying to authenticate with :param domain_name: The domain name of the user account we are authenticated with, default is None :param workstation: The workstation we are using to authenticate with, default is None :param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage for more details :return: A base64 encoded string of the AUTHENTICATE_MESSAGE """ self.authenticate_message = AuthenticateMessage(user_name, password, domain_name, workstation, self.challenge_message, self.ntlm_compatibility, server_certificate_hash) self.authenticate_message.add_mic(self.negotiate_message, self.challenge_message) # Setups up the session_security context used to sign and seal messages if wanted if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL or self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN: self.session_security = SessionSecurity(struct.unpack("<I", self.authenticate_message.negotiate_flags)[0], self.authenticate_message.exported_session_key) return base64.b64encode(self.authenticate_message.get_data())
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received :param user_name: The user name of the user we are trying to authenticate with :param password: The password of the user we are trying to authenticate with :param domain_name: The domain name of the user account we are authenticated with, default is None :param workstation: The workstation we are using to authenticate with, default is None :param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage for more details :return: A base64 encoded string of the AUTHENTICATE_MESSAGE
Below is the the instruction that describes the task: ### Input: Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received :param user_name: The user name of the user we are trying to authenticate with :param password: The password of the user we are trying to authenticate with :param domain_name: The domain name of the user account we are authenticated with, default is None :param workstation: The workstation we are using to authenticate with, default is None :param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage for more details :return: A base64 encoded string of the AUTHENTICATE_MESSAGE ### Response: def create_authenticate_message(self, user_name, password, domain_name=None, workstation=None, server_certificate_hash=None): """ Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received :param user_name: The user name of the user we are trying to authenticate with :param password: The password of the user we are trying to authenticate with :param domain_name: The domain name of the user account we are authenticated with, default is None :param workstation: The workstation we are using to authenticate with, default is None :param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage for more details :return: A base64 encoded string of the AUTHENTICATE_MESSAGE """ self.authenticate_message = AuthenticateMessage(user_name, password, domain_name, workstation, self.challenge_message, self.ntlm_compatibility, server_certificate_hash) self.authenticate_message.add_mic(self.negotiate_message, self.challenge_message) # Setups up the session_security context used to sign and seal messages if wanted if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL or self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN: self.session_security = SessionSecurity(struct.unpack("<I", self.authenticate_message.negotiate_flags)[0], self.authenticate_message.exported_session_key) return base64.b64encode(self.authenticate_message.get_data())
def fit_mle(self, init_vals, num_draws, seed=None, constrained_pos=None, print_res=True, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, just_point=False, **kwargs): """ Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. There should be one value for each utility coefficient and shape parameter being estimated. num_draws : int. Should be greater than zero. Denotes the number of draws that we are making from each normal distribution. seed : int or None, optional. If an int is passed, it should be greater than zero. Denotes the value to be used in seeding the random generator used to generate the draws from the normal distribution. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_values.size.` Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. method : str, optional. Should be a valid string which can be passed to scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next which is needed to determine convergence. Default = 1e-06. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default = 1e-06. maxiter : int, optional. Denotes the maximum number of iterations of the algorithm specified by `method` that will be used to estimate the parameters of the given model. Default == 1000. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. just_point : bool, optional. Determines whether (True) or not (False) calculations that are non- critical for obtaining the maximum likelihood point estimate will be performed. If True, this function will return the results dictionary from scipy.optimize. Default == False. Returns ------- None. Estimation results are saved to the model instance. """ # Check integrity of passed arguments kwargs_to_be_ignored = ["init_shapes", "init_intercepts", "init_coefs"] if any([x in kwargs for x in kwargs_to_be_ignored]): msg = "MNL model does not use of any of the following kwargs:\n{}" msg_2 = "Remove such kwargs and pass a single init_vals argument" raise ValueError(msg.format(kwargs_to_be_ignored) + msg_2) # Store the optimization method self.optimization_method = method # Store the ridge parameter self.ridge_param = ridge if ridge is not None: warnings.warn(_ridge_warning_msg) # Construct the mappings from alternatives to observations and from # chosen alternatives to observations mapping_res = self.get_mappings_for_fit() rows_to_mixers = mapping_res["rows_to_mixers"] # Get the draws for each random coefficient num_mixing_units = rows_to_mixers.shape[1] draw_list = mlc.get_normal_draws(num_mixing_units, num_draws, len(self.mixing_pos), seed=seed) # Create the 3D design matrix self.design_3d = mlc.create_expanded_design_for_mixing(self.design, draw_list, self.mixing_pos, rows_to_mixers) # Create the estimation object zero_vector = np.zeros(init_vals.shape) mixl_estimator = MixedEstimator(self, mapping_res, ridge, zero_vector, split_param_vec, constrained_pos=constrained_pos) # Perform one final check on the length of the initial values mixl_estimator.check_length_of_initial_values(init_vals) # Get the estimation results estimation_res = estimate(init_vals, mixl_estimator, method, loss_tol, gradient_tol, maxiter, print_res, use_hessian=True, just_point=just_point) if not just_point: # Store the mixed logit specific estimation results args = [mixl_estimator, estimation_res] estimation_res = add_mixl_specific_results_to_estimation_res(*args) # Store the estimation results self.store_fit_results(estimation_res) return None else: return estimation_res
Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. There should be one value for each utility coefficient and shape parameter being estimated. num_draws : int. Should be greater than zero. Denotes the number of draws that we are making from each normal distribution. seed : int or None, optional. If an int is passed, it should be greater than zero. Denotes the value to be used in seeding the random generator used to generate the draws from the normal distribution. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_values.size.` Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. method : str, optional. Should be a valid string which can be passed to scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next which is needed to determine convergence. Default = 1e-06. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default = 1e-06. maxiter : int, optional. Denotes the maximum number of iterations of the algorithm specified by `method` that will be used to estimate the parameters of the given model. Default == 1000. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. just_point : bool, optional. Determines whether (True) or not (False) calculations that are non- critical for obtaining the maximum likelihood point estimate will be performed. If True, this function will return the results dictionary from scipy.optimize. Default == False. Returns ------- None. Estimation results are saved to the model instance.
Below is the the instruction that describes the task: ### Input: Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. There should be one value for each utility coefficient and shape parameter being estimated. num_draws : int. Should be greater than zero. Denotes the number of draws that we are making from each normal distribution. seed : int or None, optional. If an int is passed, it should be greater than zero. Denotes the value to be used in seeding the random generator used to generate the draws from the normal distribution. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_values.size.` Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. method : str, optional. Should be a valid string which can be passed to scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next which is needed to determine convergence. Default = 1e-06. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default = 1e-06. maxiter : int, optional. Denotes the maximum number of iterations of the algorithm specified by `method` that will be used to estimate the parameters of the given model. Default == 1000. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. just_point : bool, optional. Determines whether (True) or not (False) calculations that are non- critical for obtaining the maximum likelihood point estimate will be performed. If True, this function will return the results dictionary from scipy.optimize. Default == False. Returns ------- None. Estimation results are saved to the model instance. ### Response: def fit_mle(self, init_vals, num_draws, seed=None, constrained_pos=None, print_res=True, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, just_point=False, **kwargs): """ Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. There should be one value for each utility coefficient and shape parameter being estimated. num_draws : int. Should be greater than zero. Denotes the number of draws that we are making from each normal distribution. seed : int or None, optional. If an int is passed, it should be greater than zero. Denotes the value to be used in seeding the random generator used to generate the draws from the normal distribution. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_values.size.` Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. method : str, optional. Should be a valid string which can be passed to scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next which is needed to determine convergence. Default = 1e-06. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default = 1e-06. maxiter : int, optional. Denotes the maximum number of iterations of the algorithm specified by `method` that will be used to estimate the parameters of the given model. Default == 1000. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. just_point : bool, optional. Determines whether (True) or not (False) calculations that are non- critical for obtaining the maximum likelihood point estimate will be performed. If True, this function will return the results dictionary from scipy.optimize. Default == False. Returns ------- None. Estimation results are saved to the model instance. """ # Check integrity of passed arguments kwargs_to_be_ignored = ["init_shapes", "init_intercepts", "init_coefs"] if any([x in kwargs for x in kwargs_to_be_ignored]): msg = "MNL model does not use of any of the following kwargs:\n{}" msg_2 = "Remove such kwargs and pass a single init_vals argument" raise ValueError(msg.format(kwargs_to_be_ignored) + msg_2) # Store the optimization method self.optimization_method = method # Store the ridge parameter self.ridge_param = ridge if ridge is not None: warnings.warn(_ridge_warning_msg) # Construct the mappings from alternatives to observations and from # chosen alternatives to observations mapping_res = self.get_mappings_for_fit() rows_to_mixers = mapping_res["rows_to_mixers"] # Get the draws for each random coefficient num_mixing_units = rows_to_mixers.shape[1] draw_list = mlc.get_normal_draws(num_mixing_units, num_draws, len(self.mixing_pos), seed=seed) # Create the 3D design matrix self.design_3d = mlc.create_expanded_design_for_mixing(self.design, draw_list, self.mixing_pos, rows_to_mixers) # Create the estimation object zero_vector = np.zeros(init_vals.shape) mixl_estimator = MixedEstimator(self, mapping_res, ridge, zero_vector, split_param_vec, constrained_pos=constrained_pos) # Perform one final check on the length of the initial values mixl_estimator.check_length_of_initial_values(init_vals) # Get the estimation results estimation_res = estimate(init_vals, mixl_estimator, method, loss_tol, gradient_tol, maxiter, print_res, use_hessian=True, just_point=just_point) if not just_point: # Store the mixed logit specific estimation results args = [mixl_estimator, estimation_res] estimation_res = add_mixl_specific_results_to_estimation_res(*args) # Store the estimation results self.store_fit_results(estimation_res) return None else: return estimation_res
def get_context_data(self, **kwargs): """ Injects variables necessary for rendering the calendar into the context. Variables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`. """ data = super(BaseCalendarMonthView, self).get_context_data(**kwargs) year = self.get_year() month = self.get_month() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) cal = Calendar(self.get_first_of_week()) month_calendar = [] now = datetime.datetime.utcnow() date_lists = defaultdict(list) multidate_objs = [] for obj in data['object_list']: obj_date = self.get_start_date(obj) end_date_field = self.get_end_date_field() if end_date_field: end_date = self.get_end_date(obj) if end_date and end_date != obj_date: multidate_objs.append({ 'obj': obj, 'range': [x for x in daterange(obj_date, end_date)] }) continue # We don't put multi-day events in date_lists date_lists[obj_date].append(obj) for week in cal.monthdatescalendar(date.year, date.month): week_range = set(daterange(week[0], week[6])) week_events = [] for val in multidate_objs: intersect_length = len(week_range.intersection(val['range'])) if intersect_length: # Event happens during this week slot = 1 width = intersect_length # How many days is the event during this week? nowrap_previous = True # Does the event continue from the previous week? nowrap_next = True # Does the event continue to the next week? if val['range'][0] >= week[0]: slot = 1 + (val['range'][0] - week[0]).days else: nowrap_previous = False if val['range'][-1] > week[6]: nowrap_next = False week_events.append({ 'event': val['obj'], 'slot': slot, 'width': width, 'nowrap_previous': nowrap_previous, 'nowrap_next': nowrap_next, }) week_calendar = { 'events': week_events, 'date_list': [], } for day in week: week_calendar['date_list'].append({ 'day': day, 'events': date_lists[day], 'today': day == now.date(), 'is_current_month': day.month == date.month, }) month_calendar.append(week_calendar) data['calendar'] = month_calendar data['weekdays'] = [DAYS[x] for x in cal.iterweekdays()] data['month'] = date data['next_month'] = self.get_next_month(date) data['previous_month'] = self.get_previous_month(date) return data
Injects variables necessary for rendering the calendar into the context. Variables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`.
Below is the the instruction that describes the task: ### Input: Injects variables necessary for rendering the calendar into the context. Variables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`. ### Response: def get_context_data(self, **kwargs): """ Injects variables necessary for rendering the calendar into the context. Variables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`. """ data = super(BaseCalendarMonthView, self).get_context_data(**kwargs) year = self.get_year() month = self.get_month() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) cal = Calendar(self.get_first_of_week()) month_calendar = [] now = datetime.datetime.utcnow() date_lists = defaultdict(list) multidate_objs = [] for obj in data['object_list']: obj_date = self.get_start_date(obj) end_date_field = self.get_end_date_field() if end_date_field: end_date = self.get_end_date(obj) if end_date and end_date != obj_date: multidate_objs.append({ 'obj': obj, 'range': [x for x in daterange(obj_date, end_date)] }) continue # We don't put multi-day events in date_lists date_lists[obj_date].append(obj) for week in cal.monthdatescalendar(date.year, date.month): week_range = set(daterange(week[0], week[6])) week_events = [] for val in multidate_objs: intersect_length = len(week_range.intersection(val['range'])) if intersect_length: # Event happens during this week slot = 1 width = intersect_length # How many days is the event during this week? nowrap_previous = True # Does the event continue from the previous week? nowrap_next = True # Does the event continue to the next week? if val['range'][0] >= week[0]: slot = 1 + (val['range'][0] - week[0]).days else: nowrap_previous = False if val['range'][-1] > week[6]: nowrap_next = False week_events.append({ 'event': val['obj'], 'slot': slot, 'width': width, 'nowrap_previous': nowrap_previous, 'nowrap_next': nowrap_next, }) week_calendar = { 'events': week_events, 'date_list': [], } for day in week: week_calendar['date_list'].append({ 'day': day, 'events': date_lists[day], 'today': day == now.date(), 'is_current_month': day.month == date.month, }) month_calendar.append(week_calendar) data['calendar'] = month_calendar data['weekdays'] = [DAYS[x] for x in cal.iterweekdays()] data['month'] = date data['next_month'] = self.get_next_month(date) data['previous_month'] = self.get_previous_month(date) return data
def readTupleAndExtra(self, stream): """Read symbol and extrabits from stream. Returns symbol length, symbol, extraBits, extra >>> olleke.pos = 6 >>> MetablockLengthAlphabet().readTupleAndExtra(olleke) (2, Symbol(MLEN, 4), 16, 46) """ length, symbol = self.decodePeek(stream.peek(self.maxLength)) stream.pos += length extraBits = self.extraBits(symbol.index) return length, symbol, extraBits, stream.read(extraBits)
Read symbol and extrabits from stream. Returns symbol length, symbol, extraBits, extra >>> olleke.pos = 6 >>> MetablockLengthAlphabet().readTupleAndExtra(olleke) (2, Symbol(MLEN, 4), 16, 46)
Below is the the instruction that describes the task: ### Input: Read symbol and extrabits from stream. Returns symbol length, symbol, extraBits, extra >>> olleke.pos = 6 >>> MetablockLengthAlphabet().readTupleAndExtra(olleke) (2, Symbol(MLEN, 4), 16, 46) ### Response: def readTupleAndExtra(self, stream): """Read symbol and extrabits from stream. Returns symbol length, symbol, extraBits, extra >>> olleke.pos = 6 >>> MetablockLengthAlphabet().readTupleAndExtra(olleke) (2, Symbol(MLEN, 4), 16, 46) """ length, symbol = self.decodePeek(stream.peek(self.maxLength)) stream.pos += length extraBits = self.extraBits(symbol.index) return length, symbol, extraBits, stream.read(extraBits)
def updatepLvlGrid(self): ''' Update the grid of permanent income levels. Currently only works for infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not clear what to do about cycles>1. Identical to version in persistent shocks model, but pLvl=0 is manually added to the grid (because there is no closed form lower-bounding cFunc for pLvl=0). Parameters ---------- None Returns ------- None ''' # Run basic version of this method PersistentShockConsumerType.updatepLvlGrid(self) for j in range(len(self.pLvlGrid)): # Then add 0 to the bottom of each pLvlGrid this_grid = self.pLvlGrid[j] self.pLvlGrid[j] = np.insert(this_grid,0,0.0001)
Update the grid of permanent income levels. Currently only works for infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not clear what to do about cycles>1. Identical to version in persistent shocks model, but pLvl=0 is manually added to the grid (because there is no closed form lower-bounding cFunc for pLvl=0). Parameters ---------- None Returns ------- None
Below is the the instruction that describes the task: ### Input: Update the grid of permanent income levels. Currently only works for infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not clear what to do about cycles>1. Identical to version in persistent shocks model, but pLvl=0 is manually added to the grid (because there is no closed form lower-bounding cFunc for pLvl=0). Parameters ---------- None Returns ------- None ### Response: def updatepLvlGrid(self): ''' Update the grid of permanent income levels. Currently only works for infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not clear what to do about cycles>1. Identical to version in persistent shocks model, but pLvl=0 is manually added to the grid (because there is no closed form lower-bounding cFunc for pLvl=0). Parameters ---------- None Returns ------- None ''' # Run basic version of this method PersistentShockConsumerType.updatepLvlGrid(self) for j in range(len(self.pLvlGrid)): # Then add 0 to the bottom of each pLvlGrid this_grid = self.pLvlGrid[j] self.pLvlGrid[j] = np.insert(this_grid,0,0.0001)
def save(self): """ Save existing record """ data = { "type": self.type, "data": self.data, "name": self.name, "priority": self.priority, "port": self.port, "ttl": self.ttl, "weight": self.weight, "flags": self.flags, "tags": self.tags } return self.get_data( "domains/%s/records/%s" % (self.domain, self.id), type=PUT, params=data )
Save existing record
Below is the the instruction that describes the task: ### Input: Save existing record ### Response: def save(self): """ Save existing record """ data = { "type": self.type, "data": self.data, "name": self.name, "priority": self.priority, "port": self.port, "ttl": self.ttl, "weight": self.weight, "flags": self.flags, "tags": self.tags } return self.get_data( "domains/%s/records/%s" % (self.domain, self.id), type=PUT, params=data )
def calc_retinotopy(note, error, subject, clean, run_lh, run_rh, invert_rh_angle, max_in_eccen, min_in_eccen, angle_lh_file, theta_lh_file, eccen_lh_file, rho_lh_file, weight_lh_file, radius_lh_file, angle_rh_file, theta_rh_file, eccen_rh_file, rho_rh_file, weight_rh_file, radius_rh_file): ''' calc_retinotopy extracts the retinotopy options from the command line, loads the relevant files, and stores them as properties on the subject's lh and rh cortices. ''' ctcs = {} for (h,ang,tht,ecc,rho,wgt,rad,run) in [ ('lh', angle_lh_file,theta_lh_file, eccen_lh_file,rho_lh_file, weight_lh_file, radius_lh_file, run_lh), ('rh', angle_rh_file,theta_rh_file, eccen_rh_file,rho_rh_file, weight_rh_file, radius_rh_file, run_rh)]: if not run: continue hemi = getattr(subject, h) props = {} # load the properties or find them in the auto-properties if ang: try: props['polar_angle'] = _guess_surf_file(ang) except Exception: error('could not load surface file %s' % ang) elif tht: try: tmp = _guess_surf_file(tht) props['polar_angle'] = 90.0 - 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % tht) else: props['polar_angle'] = empirical_retinotopy_data(hemi, 'polar_angle') if ecc: try: props['eccentricity'] = _guess_surf_file(ecc) except Exception: error('could not load surface file %s' % ecc) elif rho: try: tmp = _guess_surf_file(rhp) props['eccentricity'] = 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % rho) else: props['eccentricity'] = empirical_retinotopy_data(hemi, 'eccentricity') if wgt: try: props['weight'] = _guess_surf_file(wgt) except Exception: error('could not load surface file %s' % wgt) else: props['weight'] = empirical_retinotopy_data(hemi, 'weight') if rad: try: props['radius'] = _guess_surf_file(rad) except Exception: error('could not load surface file %s' % rad) else: props['radius'] = empirical_retinotopy_data(hemi, 'radius') # Check for inverted rh if h == 'rh' and invert_rh_angle: props['polar_angle'] = -props['polar_angle'] # and zero-out weights for high eccentricities props['weight'] = np.array(props['weight']) if max_in_eccen is not None: props['weight'][props['eccentricity'] > max_in_eccen] = 0 if min_in_eccen is not None: props['weight'][props['eccentricity'] < min_in_eccen] = 0 # Do smoothing, if requested if clean: note('Cleaning %s retinotopy...' % h.upper()) (ang,ecc) = clean_retinotopy(hemi, retinotopy=props, mask=None, weight='weight') props['polar_angle'] = ang props['eccentricity'] = ecc ctcs[h] = hemi.with_prop(props) return {'cortices': pyr.pmap(ctcs)}
calc_retinotopy extracts the retinotopy options from the command line, loads the relevant files, and stores them as properties on the subject's lh and rh cortices.
Below is the the instruction that describes the task: ### Input: calc_retinotopy extracts the retinotopy options from the command line, loads the relevant files, and stores them as properties on the subject's lh and rh cortices. ### Response: def calc_retinotopy(note, error, subject, clean, run_lh, run_rh, invert_rh_angle, max_in_eccen, min_in_eccen, angle_lh_file, theta_lh_file, eccen_lh_file, rho_lh_file, weight_lh_file, radius_lh_file, angle_rh_file, theta_rh_file, eccen_rh_file, rho_rh_file, weight_rh_file, radius_rh_file): ''' calc_retinotopy extracts the retinotopy options from the command line, loads the relevant files, and stores them as properties on the subject's lh and rh cortices. ''' ctcs = {} for (h,ang,tht,ecc,rho,wgt,rad,run) in [ ('lh', angle_lh_file,theta_lh_file, eccen_lh_file,rho_lh_file, weight_lh_file, radius_lh_file, run_lh), ('rh', angle_rh_file,theta_rh_file, eccen_rh_file,rho_rh_file, weight_rh_file, radius_rh_file, run_rh)]: if not run: continue hemi = getattr(subject, h) props = {} # load the properties or find them in the auto-properties if ang: try: props['polar_angle'] = _guess_surf_file(ang) except Exception: error('could not load surface file %s' % ang) elif tht: try: tmp = _guess_surf_file(tht) props['polar_angle'] = 90.0 - 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % tht) else: props['polar_angle'] = empirical_retinotopy_data(hemi, 'polar_angle') if ecc: try: props['eccentricity'] = _guess_surf_file(ecc) except Exception: error('could not load surface file %s' % ecc) elif rho: try: tmp = _guess_surf_file(rhp) props['eccentricity'] = 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % rho) else: props['eccentricity'] = empirical_retinotopy_data(hemi, 'eccentricity') if wgt: try: props['weight'] = _guess_surf_file(wgt) except Exception: error('could not load surface file %s' % wgt) else: props['weight'] = empirical_retinotopy_data(hemi, 'weight') if rad: try: props['radius'] = _guess_surf_file(rad) except Exception: error('could not load surface file %s' % rad) else: props['radius'] = empirical_retinotopy_data(hemi, 'radius') # Check for inverted rh if h == 'rh' and invert_rh_angle: props['polar_angle'] = -props['polar_angle'] # and zero-out weights for high eccentricities props['weight'] = np.array(props['weight']) if max_in_eccen is not None: props['weight'][props['eccentricity'] > max_in_eccen] = 0 if min_in_eccen is not None: props['weight'][props['eccentricity'] < min_in_eccen] = 0 # Do smoothing, if requested if clean: note('Cleaning %s retinotopy...' % h.upper()) (ang,ecc) = clean_retinotopy(hemi, retinotopy=props, mask=None, weight='weight') props['polar_angle'] = ang props['eccentricity'] = ecc ctcs[h] = hemi.with_prop(props) return {'cortices': pyr.pmap(ctcs)}
def linear(self, **paircoords): """ Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index """ N = len(paircoords[list(paircoords.keys())[0]]) integer = numpy.empty(N, ('i8', (self.Ndim,))).T # do each dimension for i, dim in enumerate(self.dims): if self.spacing[i] == 'linspace': x = paircoords[dim] - self.min[i] integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] == 'logspace': x = paircoords[dim].copy() x[x == 0] = self.min[i] * 0.9 x = numpy.log10(x / self.min[i]) integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] is None: edge = self.edges if self.Ndim == 1 else self.edges[i] integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left') return numpy.ravel_multi_index(integer, self.shape, mode='clip')
Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index
Below is the the instruction that describes the task: ### Input: Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index ### Response: def linear(self, **paircoords): """ Linearize bin indices. This function is called by subclasses. Refer to the source code of :py:class:`RBinning` for an example. Parameters ---------- args : list a list of bin index, (xi, yi, zi, ..) Returns ------- linearlized bin index """ N = len(paircoords[list(paircoords.keys())[0]]) integer = numpy.empty(N, ('i8', (self.Ndim,))).T # do each dimension for i, dim in enumerate(self.dims): if self.spacing[i] == 'linspace': x = paircoords[dim] - self.min[i] integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] == 'logspace': x = paircoords[dim].copy() x[x == 0] = self.min[i] * 0.9 x = numpy.log10(x / self.min[i]) integer[i] = numpy.ceil(x * self.inv[i]) elif self.spacing[i] is None: edge = self.edges if self.Ndim == 1 else self.edges[i] integer[i] = numpy.searchsorted(edge, paircoords[dim], side='left') return numpy.ravel_multi_index(integer, self.shape, mode='clip')
def set_hook(self, phase, action): """Allows setting hooks (attaching actions) for various uWSGI phases. :param str|unicode phase: See constants in ``.phases``. :param str|unicode|list|HookAction|list[HookAction] action: """ self._set('hook-%s' % phase, action, multi=True) return self._section
Allows setting hooks (attaching actions) for various uWSGI phases. :param str|unicode phase: See constants in ``.phases``. :param str|unicode|list|HookAction|list[HookAction] action:
Below is the the instruction that describes the task: ### Input: Allows setting hooks (attaching actions) for various uWSGI phases. :param str|unicode phase: See constants in ``.phases``. :param str|unicode|list|HookAction|list[HookAction] action: ### Response: def set_hook(self, phase, action): """Allows setting hooks (attaching actions) for various uWSGI phases. :param str|unicode phase: See constants in ``.phases``. :param str|unicode|list|HookAction|list[HookAction] action: """ self._set('hook-%s' % phase, action, multi=True) return self._section
def select_token(request, scopes='', new=False): """ Presents the user with a selection of applicable tokens for the requested view. """ @tokens_required(scopes=scopes, new=new) def _token_list(r, tokens): context = { 'tokens': tokens, 'base_template': app_settings.ESI_BASE_TEMPLATE, } return render(r, 'esi/select_token.html', context=context) return _token_list(request)
Presents the user with a selection of applicable tokens for the requested view.
Below is the the instruction that describes the task: ### Input: Presents the user with a selection of applicable tokens for the requested view. ### Response: def select_token(request, scopes='', new=False): """ Presents the user with a selection of applicable tokens for the requested view. """ @tokens_required(scopes=scopes, new=new) def _token_list(r, tokens): context = { 'tokens': tokens, 'base_template': app_settings.ESI_BASE_TEMPLATE, } return render(r, 'esi/select_token.html', context=context) return _token_list(request)
def session_preparation(self): """Prepare the session after the connection has been established.""" self.ansi_escape_codes = True self._test_channel_read() self.set_base_prompt() self.disable_paging() self.set_terminal_width(command="terminal width 511") # Clear the read buffer time.sleep(0.3 * self.global_delay_factor) self.clear_buffer()
Prepare the session after the connection has been established.
Below is the the instruction that describes the task: ### Input: Prepare the session after the connection has been established. ### Response: def session_preparation(self): """Prepare the session after the connection has been established.""" self.ansi_escape_codes = True self._test_channel_read() self.set_base_prompt() self.disable_paging() self.set_terminal_width(command="terminal width 511") # Clear the read buffer time.sleep(0.3 * self.global_delay_factor) self.clear_buffer()
def appendMissingSignatures(self): """ Store which accounts/keys are supposed to sign the transaction This method is used for an offline-signer! """ missing_signatures = self.get("missing_signatures", []) for pub in missing_signatures: wif = self.blockchain.wallet.getPrivateKeyForPublicKey(pub) if wif: self.appendWif(wif)
Store which accounts/keys are supposed to sign the transaction This method is used for an offline-signer!
Below is the the instruction that describes the task: ### Input: Store which accounts/keys are supposed to sign the transaction This method is used for an offline-signer! ### Response: def appendMissingSignatures(self): """ Store which accounts/keys are supposed to sign the transaction This method is used for an offline-signer! """ missing_signatures = self.get("missing_signatures", []) for pub in missing_signatures: wif = self.blockchain.wallet.getPrivateKeyForPublicKey(pub) if wif: self.appendWif(wif)
def confd_state_internal_cdb_client_subscription_twophase(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") cdb = ET.SubElement(internal, "cdb") client = ET.SubElement(cdb, "client") subscription = ET.SubElement(client, "subscription") twophase = ET.SubElement(subscription, "twophase") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def confd_state_internal_cdb_client_subscription_twophase(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") cdb = ET.SubElement(internal, "cdb") client = ET.SubElement(cdb, "client") subscription = ET.SubElement(client, "subscription") twophase = ET.SubElement(subscription, "twophase") callback = kwargs.pop('callback', self._callback) return callback(config)
def add_comment(self, page_id, text): """ Add comment into page :param page_id :param text """ data = {'type': 'comment', 'container': {'id': page_id, 'type': 'page', 'status': 'current'}, 'body': {'storage': {'value': text, 'representation': 'storage'}}} return self.post('rest/api/content/', data=data)
Add comment into page :param page_id :param text
Below is the the instruction that describes the task: ### Input: Add comment into page :param page_id :param text ### Response: def add_comment(self, page_id, text): """ Add comment into page :param page_id :param text """ data = {'type': 'comment', 'container': {'id': page_id, 'type': 'page', 'status': 'current'}, 'body': {'storage': {'value': text, 'representation': 'storage'}}} return self.post('rest/api/content/', data=data)
def sys_version(version_tuple): """ Set a temporary sys.version_info tuple :param version_tuple: a fake sys.version_info tuple """ old_version = sys.version_info sys.version_info = version_tuple yield sys.version_info = old_version
Set a temporary sys.version_info tuple :param version_tuple: a fake sys.version_info tuple
Below is the the instruction that describes the task: ### Input: Set a temporary sys.version_info tuple :param version_tuple: a fake sys.version_info tuple ### Response: def sys_version(version_tuple): """ Set a temporary sys.version_info tuple :param version_tuple: a fake sys.version_info tuple """ old_version = sys.version_info sys.version_info = version_tuple yield sys.version_info = old_version
def set_permission(self, username, virtual_host, configure_regex='.*', write_regex='.*', read_regex='.*'): """Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') permission_payload = json.dumps({ "configure": configure_regex, "read": read_regex, "write": write_regex }) return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ), payload=permission_payload)
Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
Below is the the instruction that describes the task: ### Input: Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict ### Response: def set_permission(self, username, virtual_host, configure_regex='.*', write_regex='.*', read_regex='.*'): """Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') permission_payload = json.dumps({ "configure": configure_regex, "read": read_regex, "write": write_regex }) return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ), payload=permission_payload)
def make_node_dict(outer_list, sort="zone"): """Convert node data from nested-list to sorted dict.""" raw_dict = {} x = 1 for inner_list in outer_list: for node in inner_list: raw_dict[x] = node x += 1 if sort == "name": # sort by provider - name srt_dict = OrderedDict(sorted(raw_dict.items(), key=lambda k: (k[1].cloud, k[1].name.lower()))) else: # sort by provider - zone - name srt_dict = OrderedDict(sorted(raw_dict.items(), key=lambda k: (k[1].cloud, k[1].zone, k[1].name.lower()))) x = 1 node_dict = {} for i, v in srt_dict.items(): node_dict[x] = v x += 1 return node_dict
Convert node data from nested-list to sorted dict.
Below is the the instruction that describes the task: ### Input: Convert node data from nested-list to sorted dict. ### Response: def make_node_dict(outer_list, sort="zone"): """Convert node data from nested-list to sorted dict.""" raw_dict = {} x = 1 for inner_list in outer_list: for node in inner_list: raw_dict[x] = node x += 1 if sort == "name": # sort by provider - name srt_dict = OrderedDict(sorted(raw_dict.items(), key=lambda k: (k[1].cloud, k[1].name.lower()))) else: # sort by provider - zone - name srt_dict = OrderedDict(sorted(raw_dict.items(), key=lambda k: (k[1].cloud, k[1].zone, k[1].name.lower()))) x = 1 node_dict = {} for i, v in srt_dict.items(): node_dict[x] = v x += 1 return node_dict
def get_assessment_parts(self): """Gets all ``AssessmentParts``. return: (osid.assessment.authoring.AssessmentPartList) - a list of ``AssessmentParts`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.AssessmentPartList(result, runtime=self._runtime, proxy=self._proxy)
Gets all ``AssessmentParts``. return: (osid.assessment.authoring.AssessmentPartList) - a list of ``AssessmentParts`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets all ``AssessmentParts``. return: (osid.assessment.authoring.AssessmentPartList) - a list of ``AssessmentParts`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ### Response: def get_assessment_parts(self): """Gets all ``AssessmentParts``. return: (osid.assessment.authoring.AssessmentPartList) - a list of ``AssessmentParts`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.AssessmentPartList(result, runtime=self._runtime, proxy=self._proxy)
def correct_spurious_inversions(scaffolds, criterion="colinear"): """Invert bins based on orientation neighborhoods. Neighborhoods can be defined by three criteria: -a 'cis' neighborhood is a group of bins belonging to the same initial contig -a 'colinear' neighborhood is a 'cis' neighborhood where bins are ordered the same way they were on the initial contig -a 'contiguous' neighborhood is a 'colinear' neighborhood where all bins are exactly consecutive, i.e. the end position of each bin matches the starting position of the next bin This function looks for such neighborhoods and orients all bins in it according to the majority orientation. An example with three inversions, one for each criterion: >>> scaffolds = { ... "scaffold1": [ ... ["contig1", 1, 100, 200, 1], ... ["contig1", 2, 200, 300, 1], ... ["contig1", 3, 300, 400, -1], # <-- inversion (contiguous) ... ["contig1", 4, 400, 500, 1], ... ["contig1", 10, 1500, 1605, 1], ... ["contig1", 12, 1750, 1850, -1], # <-- inversion (colinear) ... ["contig1", 23, 2100, 2499, 1], ... ["contig1", 28, 2850, 3000, 1], ... ["contig1", 0, 0, 100, -1], # <-- inversion (cis) ... ["contig2", 554, 1850, 1900, -1], ... ], ... } With the 'cis' criterion, pretty much all bins from "contig1" get inverted to the majority orientation (+): >>> sc_cis = correct_spurious_inversions(scaffolds, "cis") >>> for my_bin in sc_cis['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, 1] ['contig2', 554, 1850, 1900, -1] With the 'colinear' criterion, the bin ['contig1', 0, 0, 100, -1] is treated as a different neighborhood from the rest (as it is not colinear with the other bins from 'contig1') and remains untouched: >>> sc_colinear = correct_spurious_inversions(scaffolds, "colinear") >>> for my_bin in sc_colinear['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] With the 'contiguous' criterion, the ['contig1', 12, 1750, 1850, -1] breaks with the contiguous region spanning from 100 to 400 bp on 'contig1' and so is treated as a different neighborhood as well: >>> sc_cont = correct_spurious_inversions(scaffolds, "contiguous") >>> for my_bin in sc_cont['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, -1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] Note that 'contig2' remains untouched at all times since bins in it are never in the same neighborhood as those from 'contig1'. """ scaffolds = format_info_frags(scaffolds) new_scaffolds = {} def is_cis(bin1, bin2): return bin1[0] == bin2[0] def is_contiguous(bin1, bin2): return is_cis(bin1, bin2) and bin1[3] == bin2[2] def is_colinear(bin1, bin2): return is_cis(bin1, bin2) and bin1[3] <= bin2[2] condition_callables = { "cis": is_cis, "colinear": is_colinear, "contiguous": is_contiguous, } block_test = condition_callables.get(criterion, "colinear") for name, scaffold in scaffolds.items(): new_scaffold = [] block_cumulative_ori = 0 if len(scaffold) > 2: current_bin = scaffold[0] block_buffer = [] for my_bin in scaffold: if not block_buffer: new_bin = copy.deepcopy(my_bin) block_buffer.append(new_bin) block_cumulative_ori = my_bin[-1] continue elif not block_test(current_bin, my_bin): for my_buf_bin in block_buffer: new_bin = copy.deepcopy(my_buf_bin) if block_cumulative_ori >= 0: new_bin[-1] = 1 else: new_bin[-1] = -1 new_scaffold.append(new_bin) block_cumulative_ori = my_bin[-1] current_bin = copy.deepcopy(my_bin) block_buffer = copy.deepcopy([my_bin]) else: block_cumulative_ori += my_bin[-1] new_bin = copy.deepcopy(my_bin) block_buffer.append(new_bin) current_bin = my_bin for my_bin in block_buffer: new_bin = copy.deepcopy(my_bin) if block_cumulative_ori >= 0: new_bin[-1] = 1 else: new_bin[-1] = -1 new_scaffold.append(new_bin) new_scaffolds[name] = copy.deepcopy(new_scaffold) else: new_scaffolds[name] = copy.deepcopy(scaffold) return new_scaffolds
Invert bins based on orientation neighborhoods. Neighborhoods can be defined by three criteria: -a 'cis' neighborhood is a group of bins belonging to the same initial contig -a 'colinear' neighborhood is a 'cis' neighborhood where bins are ordered the same way they were on the initial contig -a 'contiguous' neighborhood is a 'colinear' neighborhood where all bins are exactly consecutive, i.e. the end position of each bin matches the starting position of the next bin This function looks for such neighborhoods and orients all bins in it according to the majority orientation. An example with three inversions, one for each criterion: >>> scaffolds = { ... "scaffold1": [ ... ["contig1", 1, 100, 200, 1], ... ["contig1", 2, 200, 300, 1], ... ["contig1", 3, 300, 400, -1], # <-- inversion (contiguous) ... ["contig1", 4, 400, 500, 1], ... ["contig1", 10, 1500, 1605, 1], ... ["contig1", 12, 1750, 1850, -1], # <-- inversion (colinear) ... ["contig1", 23, 2100, 2499, 1], ... ["contig1", 28, 2850, 3000, 1], ... ["contig1", 0, 0, 100, -1], # <-- inversion (cis) ... ["contig2", 554, 1850, 1900, -1], ... ], ... } With the 'cis' criterion, pretty much all bins from "contig1" get inverted to the majority orientation (+): >>> sc_cis = correct_spurious_inversions(scaffolds, "cis") >>> for my_bin in sc_cis['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, 1] ['contig2', 554, 1850, 1900, -1] With the 'colinear' criterion, the bin ['contig1', 0, 0, 100, -1] is treated as a different neighborhood from the rest (as it is not colinear with the other bins from 'contig1') and remains untouched: >>> sc_colinear = correct_spurious_inversions(scaffolds, "colinear") >>> for my_bin in sc_colinear['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] With the 'contiguous' criterion, the ['contig1', 12, 1750, 1850, -1] breaks with the contiguous region spanning from 100 to 400 bp on 'contig1' and so is treated as a different neighborhood as well: >>> sc_cont = correct_spurious_inversions(scaffolds, "contiguous") >>> for my_bin in sc_cont['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, -1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] Note that 'contig2' remains untouched at all times since bins in it are never in the same neighborhood as those from 'contig1'.
Below is the the instruction that describes the task: ### Input: Invert bins based on orientation neighborhoods. Neighborhoods can be defined by three criteria: -a 'cis' neighborhood is a group of bins belonging to the same initial contig -a 'colinear' neighborhood is a 'cis' neighborhood where bins are ordered the same way they were on the initial contig -a 'contiguous' neighborhood is a 'colinear' neighborhood where all bins are exactly consecutive, i.e. the end position of each bin matches the starting position of the next bin This function looks for such neighborhoods and orients all bins in it according to the majority orientation. An example with three inversions, one for each criterion: >>> scaffolds = { ... "scaffold1": [ ... ["contig1", 1, 100, 200, 1], ... ["contig1", 2, 200, 300, 1], ... ["contig1", 3, 300, 400, -1], # <-- inversion (contiguous) ... ["contig1", 4, 400, 500, 1], ... ["contig1", 10, 1500, 1605, 1], ... ["contig1", 12, 1750, 1850, -1], # <-- inversion (colinear) ... ["contig1", 23, 2100, 2499, 1], ... ["contig1", 28, 2850, 3000, 1], ... ["contig1", 0, 0, 100, -1], # <-- inversion (cis) ... ["contig2", 554, 1850, 1900, -1], ... ], ... } With the 'cis' criterion, pretty much all bins from "contig1" get inverted to the majority orientation (+): >>> sc_cis = correct_spurious_inversions(scaffolds, "cis") >>> for my_bin in sc_cis['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, 1] ['contig2', 554, 1850, 1900, -1] With the 'colinear' criterion, the bin ['contig1', 0, 0, 100, -1] is treated as a different neighborhood from the rest (as it is not colinear with the other bins from 'contig1') and remains untouched: >>> sc_colinear = correct_spurious_inversions(scaffolds, "colinear") >>> for my_bin in sc_colinear['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] With the 'contiguous' criterion, the ['contig1', 12, 1750, 1850, -1] breaks with the contiguous region spanning from 100 to 400 bp on 'contig1' and so is treated as a different neighborhood as well: >>> sc_cont = correct_spurious_inversions(scaffolds, "contiguous") >>> for my_bin in sc_cont['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, -1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] Note that 'contig2' remains untouched at all times since bins in it are never in the same neighborhood as those from 'contig1'. ### Response: def correct_spurious_inversions(scaffolds, criterion="colinear"): """Invert bins based on orientation neighborhoods. Neighborhoods can be defined by three criteria: -a 'cis' neighborhood is a group of bins belonging to the same initial contig -a 'colinear' neighborhood is a 'cis' neighborhood where bins are ordered the same way they were on the initial contig -a 'contiguous' neighborhood is a 'colinear' neighborhood where all bins are exactly consecutive, i.e. the end position of each bin matches the starting position of the next bin This function looks for such neighborhoods and orients all bins in it according to the majority orientation. An example with three inversions, one for each criterion: >>> scaffolds = { ... "scaffold1": [ ... ["contig1", 1, 100, 200, 1], ... ["contig1", 2, 200, 300, 1], ... ["contig1", 3, 300, 400, -1], # <-- inversion (contiguous) ... ["contig1", 4, 400, 500, 1], ... ["contig1", 10, 1500, 1605, 1], ... ["contig1", 12, 1750, 1850, -1], # <-- inversion (colinear) ... ["contig1", 23, 2100, 2499, 1], ... ["contig1", 28, 2850, 3000, 1], ... ["contig1", 0, 0, 100, -1], # <-- inversion (cis) ... ["contig2", 554, 1850, 1900, -1], ... ], ... } With the 'cis' criterion, pretty much all bins from "contig1" get inverted to the majority orientation (+): >>> sc_cis = correct_spurious_inversions(scaffolds, "cis") >>> for my_bin in sc_cis['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, 1] ['contig2', 554, 1850, 1900, -1] With the 'colinear' criterion, the bin ['contig1', 0, 0, 100, -1] is treated as a different neighborhood from the rest (as it is not colinear with the other bins from 'contig1') and remains untouched: >>> sc_colinear = correct_spurious_inversions(scaffolds, "colinear") >>> for my_bin in sc_colinear['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, 1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] With the 'contiguous' criterion, the ['contig1', 12, 1750, 1850, -1] breaks with the contiguous region spanning from 100 to 400 bp on 'contig1' and so is treated as a different neighborhood as well: >>> sc_cont = correct_spurious_inversions(scaffolds, "contiguous") >>> for my_bin in sc_cont['scaffold1']: ... print(my_bin) ... ['contig1', 1, 100, 200, 1] ['contig1', 2, 200, 300, 1] ['contig1', 3, 300, 400, 1] ['contig1', 4, 400, 500, 1] ['contig1', 10, 1500, 1605, 1] ['contig1', 12, 1750, 1850, -1] ['contig1', 23, 2100, 2499, 1] ['contig1', 28, 2850, 3000, 1] ['contig1', 0, 0, 100, -1] ['contig2', 554, 1850, 1900, -1] Note that 'contig2' remains untouched at all times since bins in it are never in the same neighborhood as those from 'contig1'. """ scaffolds = format_info_frags(scaffolds) new_scaffolds = {} def is_cis(bin1, bin2): return bin1[0] == bin2[0] def is_contiguous(bin1, bin2): return is_cis(bin1, bin2) and bin1[3] == bin2[2] def is_colinear(bin1, bin2): return is_cis(bin1, bin2) and bin1[3] <= bin2[2] condition_callables = { "cis": is_cis, "colinear": is_colinear, "contiguous": is_contiguous, } block_test = condition_callables.get(criterion, "colinear") for name, scaffold in scaffolds.items(): new_scaffold = [] block_cumulative_ori = 0 if len(scaffold) > 2: current_bin = scaffold[0] block_buffer = [] for my_bin in scaffold: if not block_buffer: new_bin = copy.deepcopy(my_bin) block_buffer.append(new_bin) block_cumulative_ori = my_bin[-1] continue elif not block_test(current_bin, my_bin): for my_buf_bin in block_buffer: new_bin = copy.deepcopy(my_buf_bin) if block_cumulative_ori >= 0: new_bin[-1] = 1 else: new_bin[-1] = -1 new_scaffold.append(new_bin) block_cumulative_ori = my_bin[-1] current_bin = copy.deepcopy(my_bin) block_buffer = copy.deepcopy([my_bin]) else: block_cumulative_ori += my_bin[-1] new_bin = copy.deepcopy(my_bin) block_buffer.append(new_bin) current_bin = my_bin for my_bin in block_buffer: new_bin = copy.deepcopy(my_bin) if block_cumulative_ori >= 0: new_bin[-1] = 1 else: new_bin[-1] = -1 new_scaffold.append(new_bin) new_scaffolds[name] = copy.deepcopy(new_scaffold) else: new_scaffolds[name] = copy.deepcopy(scaffold) return new_scaffolds
def await_socket(self, timeout): """Wait up to a given timeout for a process to write socket info.""" return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
Wait up to a given timeout for a process to write socket info.
Below is the the instruction that describes the task: ### Input: Wait up to a given timeout for a process to write socket info. ### Response: def await_socket(self, timeout): """Wait up to a given timeout for a process to write socket info.""" return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
def find_pids(self, name, search_string, exact_match, ignore_ad=True): """ Create a set of pids of selected processes. Search for search_string """ if not self.should_refresh_pid_cache(name): return self.pid_cache[name] ad_error_logger = self.log.debug if not ignore_ad: ad_error_logger = self.log.error refresh_ad_cache = self.should_refresh_ad_cache(name) matching_pids = set() for proc in psutil.process_iter(): # Skip access denied processes if not refresh_ad_cache and proc.pid in self.ad_cache: continue found = False for string in search_string: try: # FIXME 8.x: All has been deprecated # from the doc, should be removed if string == 'All': found = True if exact_match: if os.name == 'nt': if proc.name().lower() == string.lower(): found = True else: if proc.name() == string: found = True else: cmdline = proc.cmdline() if os.name == 'nt': lstring = string.lower() if re.search(lstring, ' '.join(cmdline).lower()): found = True else: if re.search(string, ' '.join(cmdline)): found = True except psutil.NoSuchProcess: self.log.warning('Process disappeared while scanning') except psutil.AccessDenied as e: ad_error_logger('Access denied to process with PID {}'.format(proc.pid)) ad_error_logger('Error: {}'.format(e)) if refresh_ad_cache: self.ad_cache.add(proc.pid) if not ignore_ad: raise else: if refresh_ad_cache: self.ad_cache.discard(proc.pid) if found: matching_pids.add(proc.pid) break self.pid_cache[name] = matching_pids self.last_pid_cache_ts[name] = time.time() if refresh_ad_cache: self.last_ad_cache_ts[name] = time.time() return matching_pids
Create a set of pids of selected processes. Search for search_string
Below is the the instruction that describes the task: ### Input: Create a set of pids of selected processes. Search for search_string ### Response: def find_pids(self, name, search_string, exact_match, ignore_ad=True): """ Create a set of pids of selected processes. Search for search_string """ if not self.should_refresh_pid_cache(name): return self.pid_cache[name] ad_error_logger = self.log.debug if not ignore_ad: ad_error_logger = self.log.error refresh_ad_cache = self.should_refresh_ad_cache(name) matching_pids = set() for proc in psutil.process_iter(): # Skip access denied processes if not refresh_ad_cache and proc.pid in self.ad_cache: continue found = False for string in search_string: try: # FIXME 8.x: All has been deprecated # from the doc, should be removed if string == 'All': found = True if exact_match: if os.name == 'nt': if proc.name().lower() == string.lower(): found = True else: if proc.name() == string: found = True else: cmdline = proc.cmdline() if os.name == 'nt': lstring = string.lower() if re.search(lstring, ' '.join(cmdline).lower()): found = True else: if re.search(string, ' '.join(cmdline)): found = True except psutil.NoSuchProcess: self.log.warning('Process disappeared while scanning') except psutil.AccessDenied as e: ad_error_logger('Access denied to process with PID {}'.format(proc.pid)) ad_error_logger('Error: {}'.format(e)) if refresh_ad_cache: self.ad_cache.add(proc.pid) if not ignore_ad: raise else: if refresh_ad_cache: self.ad_cache.discard(proc.pid) if found: matching_pids.add(proc.pid) break self.pid_cache[name] = matching_pids self.last_pid_cache_ts[name] = time.time() if refresh_ad_cache: self.last_ad_cache_ts[name] = time.time() return matching_pids
def kernel(self, spread=1): """ This will return whatever kind of kernel we want to use. Must have signature (ndarray size NxM, ndarray size 1xM) -> ndarray size Nx1 """ # TODO: use self.kernel_type to choose function def gaussian(data, pixel): return mvn.pdf(data, mean=pixel, cov=spread) return gaussian
This will return whatever kind of kernel we want to use. Must have signature (ndarray size NxM, ndarray size 1xM) -> ndarray size Nx1
Below is the the instruction that describes the task: ### Input: This will return whatever kind of kernel we want to use. Must have signature (ndarray size NxM, ndarray size 1xM) -> ndarray size Nx1 ### Response: def kernel(self, spread=1): """ This will return whatever kind of kernel we want to use. Must have signature (ndarray size NxM, ndarray size 1xM) -> ndarray size Nx1 """ # TODO: use self.kernel_type to choose function def gaussian(data, pixel): return mvn.pdf(data, mean=pixel, cov=spread) return gaussian
def add_attachment(self, issue_key, filename): """ Add attachment to Issue :param issue_key: str :param filename: str, name, if file in current directory or full path to file """ log.warning('Adding attachment...') headers = {'X-Atlassian-Token': 'no-check'} with open(filename, 'rb') as file: files = {'file': file} url = 'rest/api/2/issue/{}/attachments'.format(issue_key) return self.post(url, headers=headers, files=files)
Add attachment to Issue :param issue_key: str :param filename: str, name, if file in current directory or full path to file
Below is the the instruction that describes the task: ### Input: Add attachment to Issue :param issue_key: str :param filename: str, name, if file in current directory or full path to file ### Response: def add_attachment(self, issue_key, filename): """ Add attachment to Issue :param issue_key: str :param filename: str, name, if file in current directory or full path to file """ log.warning('Adding attachment...') headers = {'X-Atlassian-Token': 'no-check'} with open(filename, 'rb') as file: files = {'file': file} url = 'rest/api/2/issue/{}/attachments'.format(issue_key) return self.post(url, headers=headers, files=files)
def _read_undone_shard_from_datastore(self, shard_id=None): """Reads undone worke pieces which are assigned to shard with given id.""" self._work = {} client = self._datastore_client parent_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id) filters = [('is_completed', '=', False)] if shard_id is not None: filters.append(('shard_id', '=', shard_id)) for entity in client.query_fetch(kind=KIND_WORK, ancestor=parent_key, filters=filters): work_id = entity.key.flat_path[-1] self.work[work_id] = dict(entity) if len(self._work) >= MAX_WORK_RECORDS_READ: break
Reads undone worke pieces which are assigned to shard with given id.
Below is the the instruction that describes the task: ### Input: Reads undone worke pieces which are assigned to shard with given id. ### Response: def _read_undone_shard_from_datastore(self, shard_id=None): """Reads undone worke pieces which are assigned to shard with given id.""" self._work = {} client = self._datastore_client parent_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id) filters = [('is_completed', '=', False)] if shard_id is not None: filters.append(('shard_id', '=', shard_id)) for entity in client.query_fetch(kind=KIND_WORK, ancestor=parent_key, filters=filters): work_id = entity.key.flat_path[-1] self.work[work_id] = dict(entity) if len(self._work) >= MAX_WORK_RECORDS_READ: break
def publish(self, topic, *args, **kwargs): """Publish an event to a topic. Replace :meth:`autobahn.wamp.interface.IApplicationSession.publish` """ return self._async_session.publish(topic, *args, **kwargs)
Publish an event to a topic. Replace :meth:`autobahn.wamp.interface.IApplicationSession.publish`
Below is the the instruction that describes the task: ### Input: Publish an event to a topic. Replace :meth:`autobahn.wamp.interface.IApplicationSession.publish` ### Response: def publish(self, topic, *args, **kwargs): """Publish an event to a topic. Replace :meth:`autobahn.wamp.interface.IApplicationSession.publish` """ return self._async_session.publish(topic, *args, **kwargs)
def to_yaml(obj): """ This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or a DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit. Args: obj: Object to convert. Returns: Unicode string containing YAML representation of the object. """ if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'): if hasattr(obj, 'message'): payload = obj.message header = 'Message' elif hasattr(obj, 'request'): payload = obj.request header = 'Request' elif hasattr(obj, 'response'): payload = obj.response header = 'Response' else: raise ValueError('Cannot generate YAML representation for %r' % type(obj)) prefix = '### %s from %s to %s ts_mono=%.6f ts_real=%.6f\n' % \ (header, obj.transfer.source_node_id or 'Anon', obj.transfer.dest_node_id or 'All', obj.transfer.ts_monotonic, obj.transfer.ts_real) return prefix + _to_yaml_impl(payload) else: return _to_yaml_impl(obj)
This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or a DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit. Args: obj: Object to convert. Returns: Unicode string containing YAML representation of the object.
Below is the the instruction that describes the task: ### Input: This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or a DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit. Args: obj: Object to convert. Returns: Unicode string containing YAML representation of the object. ### Response: def to_yaml(obj): """ This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or a DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit. Args: obj: Object to convert. Returns: Unicode string containing YAML representation of the object. """ if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'): if hasattr(obj, 'message'): payload = obj.message header = 'Message' elif hasattr(obj, 'request'): payload = obj.request header = 'Request' elif hasattr(obj, 'response'): payload = obj.response header = 'Response' else: raise ValueError('Cannot generate YAML representation for %r' % type(obj)) prefix = '### %s from %s to %s ts_mono=%.6f ts_real=%.6f\n' % \ (header, obj.transfer.source_node_id or 'Anon', obj.transfer.dest_node_id or 'All', obj.transfer.ts_monotonic, obj.transfer.ts_real) return prefix + _to_yaml_impl(payload) else: return _to_yaml_impl(obj)
def _parse_hparams(hparams): """Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. """ prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] ret = [] for prefix in prefixes: ret_dict = {} for key in hparams.values(): if prefix in key: par_name = key[len(prefix):] ret_dict[par_name] = hparams.get(key) ret.append(ret_dict) return ret
Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer.
Below is the the instruction that describes the task: ### Input: Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. ### Response: def _parse_hparams(hparams): """Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. """ prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] ret = [] for prefix in prefixes: ret_dict = {} for key in hparams.values(): if prefix in key: par_name = key[len(prefix):] ret_dict[par_name] = hparams.get(key) ret.append(ret_dict) return ret
def _getOpenID1SessionType(self, assoc_response): """Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent. """ # If it's an OpenID 1 message, allow session_type to default # to None (which signifies "no-encryption") session_type = assoc_response.getArg(OPENID1_NS, 'session_type') # Handle the differences between no-encryption association # respones in OpenID 1 and 2: # no-encryption is not really a valid session type for # OpenID 1, but we'll accept it anyway, while issuing a # warning. if session_type == 'no-encryption': logging.warning('OpenID server sent "no-encryption"' 'for OpenID 1.X') # Missing or empty session type is the way to flag a # 'no-encryption' response. Change the session type to # 'no-encryption' so that it can be handled in the same # way as OpenID 2 'no-encryption' respones. elif session_type == '' or session_type is None: session_type = 'no-encryption' return session_type
Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent.
Below is the the instruction that describes the task: ### Input: Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent. ### Response: def _getOpenID1SessionType(self, assoc_response): """Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent. """ # If it's an OpenID 1 message, allow session_type to default # to None (which signifies "no-encryption") session_type = assoc_response.getArg(OPENID1_NS, 'session_type') # Handle the differences between no-encryption association # respones in OpenID 1 and 2: # no-encryption is not really a valid session type for # OpenID 1, but we'll accept it anyway, while issuing a # warning. if session_type == 'no-encryption': logging.warning('OpenID server sent "no-encryption"' 'for OpenID 1.X') # Missing or empty session type is the way to flag a # 'no-encryption' response. Change the session type to # 'no-encryption' so that it can be handled in the same # way as OpenID 2 'no-encryption' respones. elif session_type == '' or session_type is None: session_type = 'no-encryption' return session_type
def DropTables(self): """Drop all existing tables.""" rows, _ = self.ExecuteQuery( "SELECT table_name FROM information_schema.tables " "WHERE table_schema='%s'" % self.database_name) for row in rows: self.ExecuteQuery("DROP TABLE `%s`" % row["table_name"])
Drop all existing tables.
Below is the the instruction that describes the task: ### Input: Drop all existing tables. ### Response: def DropTables(self): """Drop all existing tables.""" rows, _ = self.ExecuteQuery( "SELECT table_name FROM information_schema.tables " "WHERE table_schema='%s'" % self.database_name) for row in rows: self.ExecuteQuery("DROP TABLE `%s`" % row["table_name"])
def set_state(self): """ Sets the state required for this vertex region. Currently binds and enables the texture of the material of the region. """ glEnable(self.region.material.target) glBindTexture(self.region.material.target, self.region.material.id) self.region.bone.setRotate(self.data)
Sets the state required for this vertex region. Currently binds and enables the texture of the material of the region.
Below is the the instruction that describes the task: ### Input: Sets the state required for this vertex region. Currently binds and enables the texture of the material of the region. ### Response: def set_state(self): """ Sets the state required for this vertex region. Currently binds and enables the texture of the material of the region. """ glEnable(self.region.material.target) glBindTexture(self.region.material.target, self.region.material.id) self.region.bone.setRotate(self.data)
def set_XY(self, X, Y): """ Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray or ObsAr """ self.update_model(False) self.set_Y(Y) self.set_X(X) self.update_model(True)
Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray or ObsAr
Below is the the instruction that describes the task: ### Input: Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray or ObsAr ### Response: def set_XY(self, X, Y): """ Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray or ObsAr """ self.update_model(False) self.set_Y(Y) self.set_X(X) self.update_model(True)
def decode(input, output): """Decode a file.""" while True: line = input.readline() if not line: break s = binascii.a2b_base64(line) output.write(s)
Decode a file.
Below is the the instruction that describes the task: ### Input: Decode a file. ### Response: def decode(input, output): """Decode a file.""" while True: line = input.readline() if not line: break s = binascii.a2b_base64(line) output.write(s)
def get_fields(model_class, field_name='', path=''): """ Get fields and meta data from a model :param model_class: A django model class :param field_name: The field name to get sub fields from :param path: path of our field in format field_name__second_field_name__ect__ :returns: Returns fields and meta data about such fields fields: Django model fields properties: Any properties the model has path: Our new path :rtype: dict """ fields = get_direct_fields_from_model(model_class) app_label = model_class._meta.app_label if field_name != '': field, model, direct, m2m = _get_field_by_name(model_class, field_name) path += field_name path += '__' if direct: # Direct field try: new_model = _get_remote_field(field).parent_model except AttributeError: new_model = _get_remote_field(field).model else: # Indirect related field new_model = field.related_model fields = get_direct_fields_from_model(new_model) app_label = new_model._meta.app_label return { 'fields': fields, 'path': path, 'app_label': app_label, }
Get fields and meta data from a model :param model_class: A django model class :param field_name: The field name to get sub fields from :param path: path of our field in format field_name__second_field_name__ect__ :returns: Returns fields and meta data about such fields fields: Django model fields properties: Any properties the model has path: Our new path :rtype: dict
Below is the the instruction that describes the task: ### Input: Get fields and meta data from a model :param model_class: A django model class :param field_name: The field name to get sub fields from :param path: path of our field in format field_name__second_field_name__ect__ :returns: Returns fields and meta data about such fields fields: Django model fields properties: Any properties the model has path: Our new path :rtype: dict ### Response: def get_fields(model_class, field_name='', path=''): """ Get fields and meta data from a model :param model_class: A django model class :param field_name: The field name to get sub fields from :param path: path of our field in format field_name__second_field_name__ect__ :returns: Returns fields and meta data about such fields fields: Django model fields properties: Any properties the model has path: Our new path :rtype: dict """ fields = get_direct_fields_from_model(model_class) app_label = model_class._meta.app_label if field_name != '': field, model, direct, m2m = _get_field_by_name(model_class, field_name) path += field_name path += '__' if direct: # Direct field try: new_model = _get_remote_field(field).parent_model except AttributeError: new_model = _get_remote_field(field).model else: # Indirect related field new_model = field.related_model fields = get_direct_fields_from_model(new_model) app_label = new_model._meta.app_label return { 'fields': fields, 'path': path, 'app_label': app_label, }
def _parse_qualimap_rnaseq(table): """ Retrieve metrics of interest from globals table. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] col = col.replace(":", "").strip() val = val.replace(",", "") m = {col: val} if val.find("/") > -1: m = _parse_num_pct(col, val.replace("%", "")) out.update(m) return out
Retrieve metrics of interest from globals table.
Below is the the instruction that describes the task: ### Input: Retrieve metrics of interest from globals table. ### Response: def _parse_qualimap_rnaseq(table): """ Retrieve metrics of interest from globals table. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] col = col.replace(":", "").strip() val = val.replace(",", "") m = {col: val} if val.find("/") > -1: m = _parse_num_pct(col, val.replace("%", "")) out.update(m) return out
def button_press(self, terminal, event): """Handles the button press event in the terminal widget. If any match string is caught, another application is open to handle the matched resource uri. """ self.matched_value = '' if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) >= (0, 46): matched_string = self.match_check_event(event) else: matched_string = self.match_check( int(event.x / self.get_char_width()), int(event.y / self.get_char_height()) ) self.found_link = None if event.button == 1 and (event.get_state() & Gdk.ModifierType.CONTROL_MASK): if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) > (0, 50): s = self.hyperlink_check_event(event) else: s = None if s is not None: self._on_ctrl_click_matcher((s, None)) elif self.get_has_selection(): self.quick_open() elif matched_string and matched_string[0]: self._on_ctrl_click_matcher(matched_string) elif event.button == 3 and matched_string: self.found_link = self.handleTerminalMatch(matched_string) self.matched_value = matched_string[0]
Handles the button press event in the terminal widget. If any match string is caught, another application is open to handle the matched resource uri.
Below is the the instruction that describes the task: ### Input: Handles the button press event in the terminal widget. If any match string is caught, another application is open to handle the matched resource uri. ### Response: def button_press(self, terminal, event): """Handles the button press event in the terminal widget. If any match string is caught, another application is open to handle the matched resource uri. """ self.matched_value = '' if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) >= (0, 46): matched_string = self.match_check_event(event) else: matched_string = self.match_check( int(event.x / self.get_char_width()), int(event.y / self.get_char_height()) ) self.found_link = None if event.button == 1 and (event.get_state() & Gdk.ModifierType.CONTROL_MASK): if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) > (0, 50): s = self.hyperlink_check_event(event) else: s = None if s is not None: self._on_ctrl_click_matcher((s, None)) elif self.get_has_selection(): self.quick_open() elif matched_string and matched_string[0]: self._on_ctrl_click_matcher(matched_string) elif event.button == 3 and matched_string: self.found_link = self.handleTerminalMatch(matched_string) self.matched_value = matched_string[0]
def average_loss(lc): """ Given a loss curve array with `poe` and `loss` fields, computes the average loss on a period of time. :note: As the loss curve is supposed to be piecewise linear as it is a result of a linear interpolation, we compute an exact integral by using the trapeizodal rule with the width given by the loss bin width. """ losses, poes = (lc['loss'], lc['poe']) if lc.dtype.names else lc return -pairwise_diff(losses) @ pairwise_mean(poes)
Given a loss curve array with `poe` and `loss` fields, computes the average loss on a period of time. :note: As the loss curve is supposed to be piecewise linear as it is a result of a linear interpolation, we compute an exact integral by using the trapeizodal rule with the width given by the loss bin width.
Below is the the instruction that describes the task: ### Input: Given a loss curve array with `poe` and `loss` fields, computes the average loss on a period of time. :note: As the loss curve is supposed to be piecewise linear as it is a result of a linear interpolation, we compute an exact integral by using the trapeizodal rule with the width given by the loss bin width. ### Response: def average_loss(lc): """ Given a loss curve array with `poe` and `loss` fields, computes the average loss on a period of time. :note: As the loss curve is supposed to be piecewise linear as it is a result of a linear interpolation, we compute an exact integral by using the trapeizodal rule with the width given by the loss bin width. """ losses, poes = (lc['loss'], lc['poe']) if lc.dtype.names else lc return -pairwise_diff(losses) @ pairwise_mean(poes)
def flatten(value, prefix=None): """Takes an arbitrary JSON(ish) object and 'flattens' it into a dict with values consisting of either simple types or lists of simple types.""" def issimple(value): # foldr(True, or, value)? for item in value: if isinstance(item, dict) or isinstance(item, list): return False return True if isinstance(value, six.text_type): return value.encode("utf8") if isinstance(value, list): if issimple(value): return value offset = 0 result = {} prefix = "%d" if prefix is None else "%s_%%d" % prefix for item in value: k = prefix % offset v = flatten(item, k) if not isinstance(v, dict): v = {k:v} result.update(v) offset += 1 return result if isinstance(value, dict): result = {} prefix = "%s" if prefix is None else "%s_%%s" % prefix for k, v in six.iteritems(value): k = prefix % str(k) v = flatten(v, k) if not isinstance(v, dict): v = {k:v} result.update(v) return result return value
Takes an arbitrary JSON(ish) object and 'flattens' it into a dict with values consisting of either simple types or lists of simple types.
Below is the the instruction that describes the task: ### Input: Takes an arbitrary JSON(ish) object and 'flattens' it into a dict with values consisting of either simple types or lists of simple types. ### Response: def flatten(value, prefix=None): """Takes an arbitrary JSON(ish) object and 'flattens' it into a dict with values consisting of either simple types or lists of simple types.""" def issimple(value): # foldr(True, or, value)? for item in value: if isinstance(item, dict) or isinstance(item, list): return False return True if isinstance(value, six.text_type): return value.encode("utf8") if isinstance(value, list): if issimple(value): return value offset = 0 result = {} prefix = "%d" if prefix is None else "%s_%%d" % prefix for item in value: k = prefix % offset v = flatten(item, k) if not isinstance(v, dict): v = {k:v} result.update(v) offset += 1 return result if isinstance(value, dict): result = {} prefix = "%s" if prefix is None else "%s_%%s" % prefix for k, v in six.iteritems(value): k = prefix % str(k) v = flatten(v, k) if not isinstance(v, dict): v = {k:v} result.update(v) return result return value
def split(self, N, force=False): """ There are two modes of splitting the records - batch: splitting is sequentially to records/N chunks - cycle: placing each record in the splitted files and cycles use `cycle` if the len of the record is not evenly distributed """ mode = self.mode assert mode in ("batch", "cycle", "optimal") logging.debug("set split mode=%s" % mode) self.names = self.__class__.get_names(self.filename, N) if self.outputdir: self.names = [op.join(self.outputdir, x) for x in self.names] if not need_update(self.filename, self.names) and not force: logging.error("file %s already existed, skip file splitting" % \ self.names[0]) return filehandles = [open(x, "w") for x in self.names] if mode == "batch": for batch, fw in zip(self._batch_iterator(N), filehandles): count = self.write(fw, batch) logging.debug("write %d records to %s" % (count, fw.name)) elif mode == "cycle": handle = self._open(self.filename) for record, fw in zip(handle, cycle(filehandles)): count = self.write(fw, [record]) elif mode == "optimal": """ This mode is based on Longest Processing Time (LPT) algorithm: A simple, often-used algorithm is the LPT algorithm (Longest Processing Time) which sorts the jobs by its processing time and then assigns them to the machine with the earliest end time so far. This algorithm achieves an upper bound of 4/3 - 1/(3m) OPT. Citation: <http://en.wikipedia.org/wiki/Multiprocessor_scheduling> """ endtime = [0] * N handle = self._open(self.filename) for record in handle: mt, mi = min((x, i) for (i, x) in enumerate(endtime)) fw = filehandles[mi] count = self.write(fw, [record]) endtime[mi] += len(record) for fw in filehandles: fw.close()
There are two modes of splitting the records - batch: splitting is sequentially to records/N chunks - cycle: placing each record in the splitted files and cycles use `cycle` if the len of the record is not evenly distributed
Below is the the instruction that describes the task: ### Input: There are two modes of splitting the records - batch: splitting is sequentially to records/N chunks - cycle: placing each record in the splitted files and cycles use `cycle` if the len of the record is not evenly distributed ### Response: def split(self, N, force=False): """ There are two modes of splitting the records - batch: splitting is sequentially to records/N chunks - cycle: placing each record in the splitted files and cycles use `cycle` if the len of the record is not evenly distributed """ mode = self.mode assert mode in ("batch", "cycle", "optimal") logging.debug("set split mode=%s" % mode) self.names = self.__class__.get_names(self.filename, N) if self.outputdir: self.names = [op.join(self.outputdir, x) for x in self.names] if not need_update(self.filename, self.names) and not force: logging.error("file %s already existed, skip file splitting" % \ self.names[0]) return filehandles = [open(x, "w") for x in self.names] if mode == "batch": for batch, fw in zip(self._batch_iterator(N), filehandles): count = self.write(fw, batch) logging.debug("write %d records to %s" % (count, fw.name)) elif mode == "cycle": handle = self._open(self.filename) for record, fw in zip(handle, cycle(filehandles)): count = self.write(fw, [record]) elif mode == "optimal": """ This mode is based on Longest Processing Time (LPT) algorithm: A simple, often-used algorithm is the LPT algorithm (Longest Processing Time) which sorts the jobs by its processing time and then assigns them to the machine with the earliest end time so far. This algorithm achieves an upper bound of 4/3 - 1/(3m) OPT. Citation: <http://en.wikipedia.org/wiki/Multiprocessor_scheduling> """ endtime = [0] * N handle = self._open(self.filename) for record in handle: mt, mi = min((x, i) for (i, x) in enumerate(endtime)) fw = filehandles[mi] count = self.write(fw, [record]) endtime[mi] += len(record) for fw in filehandles: fw.close()
def append_item(self, item): """ Add an item to the end of the menu before the exit item :param MenuItem item: The item to be added """ did_remove = self.remove_exit() item.menu = self self.items.append(item) if did_remove: self.add_exit() if self.screen: max_row, max_cols = self.screen.getmaxyx() if max_row < 6 + len(self.items): self.screen.resize(6 + len(self.items), max_cols) self.draw()
Add an item to the end of the menu before the exit item :param MenuItem item: The item to be added
Below is the the instruction that describes the task: ### Input: Add an item to the end of the menu before the exit item :param MenuItem item: The item to be added ### Response: def append_item(self, item): """ Add an item to the end of the menu before the exit item :param MenuItem item: The item to be added """ did_remove = self.remove_exit() item.menu = self self.items.append(item) if did_remove: self.add_exit() if self.screen: max_row, max_cols = self.screen.getmaxyx() if max_row < 6 + len(self.items): self.screen.resize(6 + len(self.items), max_cols) self.draw()
def confd_state_internal_callpoints_authorization_callbacks_registration_type_daemon_daemon_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") authorization_callbacks = ET.SubElement(callpoints, "authorization-callbacks") registration_type = ET.SubElement(authorization_callbacks, "registration-type") daemon = ET.SubElement(registration_type, "daemon") daemon = ET.SubElement(daemon, "daemon") name = ET.SubElement(daemon, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def confd_state_internal_callpoints_authorization_callbacks_registration_type_daemon_daemon_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") authorization_callbacks = ET.SubElement(callpoints, "authorization-callbacks") registration_type = ET.SubElement(authorization_callbacks, "registration-type") daemon = ET.SubElement(registration_type, "daemon") daemon = ET.SubElement(daemon, "daemon") name = ET.SubElement(daemon, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
def next_task(self, item, **kwargs): """Calls import_batch for the next filename in the queue and "archives" the file. The archive folder is typically the folder for the deserializer queue. """ filename = os.path.basename(item) try: self.tx_importer.import_batch(filename=filename) except TransactionImporterError as e: raise TransactionsFileQueueError(e) from e else: self.archive(filename)
Calls import_batch for the next filename in the queue and "archives" the file. The archive folder is typically the folder for the deserializer queue.
Below is the the instruction that describes the task: ### Input: Calls import_batch for the next filename in the queue and "archives" the file. The archive folder is typically the folder for the deserializer queue. ### Response: def next_task(self, item, **kwargs): """Calls import_batch for the next filename in the queue and "archives" the file. The archive folder is typically the folder for the deserializer queue. """ filename = os.path.basename(item) try: self.tx_importer.import_batch(filename=filename) except TransactionImporterError as e: raise TransactionsFileQueueError(e) from e else: self.archive(filename)
def filter_(predicate, *structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc, too-many-branches """Select elements of a nested structure based on a predicate function. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: predicate: The function to determine whether an element should be kept. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'filter() got unexpected keyword arguments.' def impl(predicate, *structures): if len(structures) == 0: # pylint: disable=len-as-condition return structures if all(isinstance(s, (tuple, list)) for s in structures): if len(set(len(x) for x in structures)) > 1: raise ValueError('Cannot merge tuples or lists of different length.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = (impl(predicate, *x) for x in _builtin_zip(*structures)) else: filtered = (impl(predicate, x) for x in structures[0]) # Remove empty containers and construct result structure. if hasattr(structures[0], '_fields'): # namedtuple filtered = (x if x != () else None for x in filtered) return type(structures[0])(*filtered) else: # tuple, list filtered = ( x for x in filtered if not isinstance(x, (tuple, list, dict)) or x) return type(structures[0])(filtered) if all(isinstance(s, dict) for s in structures): if len(set(frozenset(x.keys()) for x in structures)) > 1: raise ValueError('Cannot merge dicts with different keys.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = { k: impl(predicate, *(s[k] for s in structures)) for k in structures[0]} else: filtered = {k: impl(predicate, v) for k, v in structures[0].items()} # Remove empty containers and construct result structure. filtered = { k: v for k, v in filtered.items() if not isinstance(v, (tuple, list, dict)) or v} return type(structures[0])(filtered) if len(structures) > 1: return structures if predicate(*structures) else () else: return structures[0] if predicate(structures[0]) else () result = impl(predicate, *structures) if flatten: result = flatten_(result) return result
Select elements of a nested structure based on a predicate function. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: predicate: The function to determine whether an element should be kept. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure.
Below is the the instruction that describes the task: ### Input: Select elements of a nested structure based on a predicate function. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: predicate: The function to determine whether an element should be kept. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. ### Response: def filter_(predicate, *structures, **kwargs): # pylint: disable=differing-param-doc,missing-param-doc, too-many-branches """Select elements of a nested structure based on a predicate function. If multiple structures are provided as input, their structure must match and the function will be applied to corresponding groups of elements. The nested structure can consist of any combination of lists, tuples, and dicts. Args: predicate: The function to determine whether an element should be kept. Receives one argument for every structure that is provided. *structures: One of more nested structures. flatten: Whether to flatten the resulting structure into a tuple. Keys of dictionaries will be discarded. Returns: Nested structure. """ # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs.pop('flatten', False) assert not kwargs, 'filter() got unexpected keyword arguments.' def impl(predicate, *structures): if len(structures) == 0: # pylint: disable=len-as-condition return structures if all(isinstance(s, (tuple, list)) for s in structures): if len(set(len(x) for x in structures)) > 1: raise ValueError('Cannot merge tuples or lists of different length.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = (impl(predicate, *x) for x in _builtin_zip(*structures)) else: filtered = (impl(predicate, x) for x in structures[0]) # Remove empty containers and construct result structure. if hasattr(structures[0], '_fields'): # namedtuple filtered = (x if x != () else None for x in filtered) return type(structures[0])(*filtered) else: # tuple, list filtered = ( x for x in filtered if not isinstance(x, (tuple, list, dict)) or x) return type(structures[0])(filtered) if all(isinstance(s, dict) for s in structures): if len(set(frozenset(x.keys()) for x in structures)) > 1: raise ValueError('Cannot merge dicts with different keys.') # Only wrap in tuples if more than one structure provided. if len(structures) > 1: filtered = { k: impl(predicate, *(s[k] for s in structures)) for k in structures[0]} else: filtered = {k: impl(predicate, v) for k, v in structures[0].items()} # Remove empty containers and construct result structure. filtered = { k: v for k, v in filtered.items() if not isinstance(v, (tuple, list, dict)) or v} return type(structures[0])(filtered) if len(structures) > 1: return structures if predicate(*structures) else () else: return structures[0] if predicate(structures[0]) else () result = impl(predicate, *structures) if flatten: result = flatten_(result) return result
def _process_docs(self, anexec, docblocks, parent, module, docsearch): """Associates the docstrings from the docblocks with their parameters.""" #The documentation for the parameters is stored outside of the executable #We need to get hold of them from docblocks from the parent text key = "{}.{}".format(parent.name, anexec.name) if key in docblocks: docs = self.docparser.to_doc(docblocks[key][0], anexec.name) anexec.docstart, anexec.docend = (docblocks[key][1], docblocks[key][2]) self.docparser.process_execdocs(docs, anexec, key)
Associates the docstrings from the docblocks with their parameters.
Below is the the instruction that describes the task: ### Input: Associates the docstrings from the docblocks with their parameters. ### Response: def _process_docs(self, anexec, docblocks, parent, module, docsearch): """Associates the docstrings from the docblocks with their parameters.""" #The documentation for the parameters is stored outside of the executable #We need to get hold of them from docblocks from the parent text key = "{}.{}".format(parent.name, anexec.name) if key in docblocks: docs = self.docparser.to_doc(docblocks[key][0], anexec.name) anexec.docstart, anexec.docend = (docblocks[key][1], docblocks[key][2]) self.docparser.process_execdocs(docs, anexec, key)