code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _get_base_class_names(frame): """ Get baseclass names from the code object """ co, lasti = frame.f_code, frame.f_lasti code = co.co_code extends = [] for (op, oparg) in op_stream(code, lasti): if op in dis.hasconst: if type(co.co_consts[oparg]) == str: extends = [] elif op in dis.hasname: if dis.opname[op] == 'LOAD_NAME': extends.append(('name', co.co_names[oparg])) if dis.opname[op] == 'LOAD_ATTR': extends.append(('attr', co.co_names[oparg])) if dis.opname[op] == 'LOAD_GLOBAL': extends.append(('name', co.co_names[oparg])) items = [] previous_item = [] for t, s in extends: if t == 'name': if previous_item: items.append(previous_item) previous_item = [s] else: previous_item += [s] if previous_item: items.append(previous_item) return items
Get baseclass names from the code object
Below is the the instruction that describes the task: ### Input: Get baseclass names from the code object ### Response: def _get_base_class_names(frame): """ Get baseclass names from the code object """ co, lasti = frame.f_code, frame.f_lasti code = co.co_code extends = [] for (op, oparg) in op_stream(code, lasti): if op in dis.hasconst: if type(co.co_consts[oparg]) == str: extends = [] elif op in dis.hasname: if dis.opname[op] == 'LOAD_NAME': extends.append(('name', co.co_names[oparg])) if dis.opname[op] == 'LOAD_ATTR': extends.append(('attr', co.co_names[oparg])) if dis.opname[op] == 'LOAD_GLOBAL': extends.append(('name', co.co_names[oparg])) items = [] previous_item = [] for t, s in extends: if t == 'name': if previous_item: items.append(previous_item) previous_item = [s] else: previous_item += [s] if previous_item: items.append(previous_item) return items
def get_capability_definitions(profile_manager): ''' Returns a list of all capability definitions. profile_manager Reference to the profile manager. ''' res_type = pbm.profile.ResourceType( resourceType=pbm.profile.ResourceTypeEnum.STORAGE) try: cap_categories = profile_manager.FetchCapabilityMetadata(res_type) except vim.fault.NoPermission as exc: log.exception(exc) raise VMwareApiError('Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) cap_definitions = [] for cat in cap_categories: cap_definitions.extend(cat.capabilityMetadata) return cap_definitions
Returns a list of all capability definitions. profile_manager Reference to the profile manager.
Below is the the instruction that describes the task: ### Input: Returns a list of all capability definitions. profile_manager Reference to the profile manager. ### Response: def get_capability_definitions(profile_manager): ''' Returns a list of all capability definitions. profile_manager Reference to the profile manager. ''' res_type = pbm.profile.ResourceType( resourceType=pbm.profile.ResourceTypeEnum.STORAGE) try: cap_categories = profile_manager.FetchCapabilityMetadata(res_type) except vim.fault.NoPermission as exc: log.exception(exc) raise VMwareApiError('Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) cap_definitions = [] for cat in cap_categories: cap_definitions.extend(cat.capabilityMetadata) return cap_definitions
def change_svc_event_handler(self, service, event_handler_command): """Modify service event handler Format of the line that triggers function call:: CHANGE_SVC_EVENT_HANDLER;<host_name>;<service_description>;<event_handler_command> :param service: service to modify event handler :type service: alignak.objects.service.Service :param event_handler_command: event handler command line :type event_handler_command: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value data = {"commands": self.commands, "call": event_handler_command} service.change_event_handler(data) self.send_an_element(service.get_update_status_brok())
Modify service event handler Format of the line that triggers function call:: CHANGE_SVC_EVENT_HANDLER;<host_name>;<service_description>;<event_handler_command> :param service: service to modify event handler :type service: alignak.objects.service.Service :param event_handler_command: event handler command line :type event_handler_command: :return: None
Below is the the instruction that describes the task: ### Input: Modify service event handler Format of the line that triggers function call:: CHANGE_SVC_EVENT_HANDLER;<host_name>;<service_description>;<event_handler_command> :param service: service to modify event handler :type service: alignak.objects.service.Service :param event_handler_command: event handler command line :type event_handler_command: :return: None ### Response: def change_svc_event_handler(self, service, event_handler_command): """Modify service event handler Format of the line that triggers function call:: CHANGE_SVC_EVENT_HANDLER;<host_name>;<service_description>;<event_handler_command> :param service: service to modify event handler :type service: alignak.objects.service.Service :param event_handler_command: event handler command line :type event_handler_command: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value data = {"commands": self.commands, "call": event_handler_command} service.change_event_handler(data) self.send_an_element(service.get_update_status_brok())
def from_metadata(metadata: Metadata) -> Any: """ Static factory method to create an equivalent instance of this type from the given `Metadata` instance. :param metadata: the `Metadata` instance to create an instance of this class from :return: the created instance of this class """ irods_metadata = IrodsMetadata() for key, value in metadata.items(): irods_metadata[key] = {value} return irods_metadata
Static factory method to create an equivalent instance of this type from the given `Metadata` instance. :param metadata: the `Metadata` instance to create an instance of this class from :return: the created instance of this class
Below is the the instruction that describes the task: ### Input: Static factory method to create an equivalent instance of this type from the given `Metadata` instance. :param metadata: the `Metadata` instance to create an instance of this class from :return: the created instance of this class ### Response: def from_metadata(metadata: Metadata) -> Any: """ Static factory method to create an equivalent instance of this type from the given `Metadata` instance. :param metadata: the `Metadata` instance to create an instance of this class from :return: the created instance of this class """ irods_metadata = IrodsMetadata() for key, value in metadata.items(): irods_metadata[key] = {value} return irods_metadata
def manual_update_license(self, fd, filename='cdrouter.lic'): """Update the license on your CDRouter system manually by uploading a .lic license from the CDRouter Support Lounge. :param fd: File-like object to upload. :param filename: (optional) Filename to use for license as string. :return: :class:`system.Upgrade <system.Upgrade>` object :rtype: system.Upgrade """ schema = UpgradeSchema() resp = self.service.post(self.base+'license/', files={'file': (filename, fd)}) return self.service.decode(schema, resp)
Update the license on your CDRouter system manually by uploading a .lic license from the CDRouter Support Lounge. :param fd: File-like object to upload. :param filename: (optional) Filename to use for license as string. :return: :class:`system.Upgrade <system.Upgrade>` object :rtype: system.Upgrade
Below is the the instruction that describes the task: ### Input: Update the license on your CDRouter system manually by uploading a .lic license from the CDRouter Support Lounge. :param fd: File-like object to upload. :param filename: (optional) Filename to use for license as string. :return: :class:`system.Upgrade <system.Upgrade>` object :rtype: system.Upgrade ### Response: def manual_update_license(self, fd, filename='cdrouter.lic'): """Update the license on your CDRouter system manually by uploading a .lic license from the CDRouter Support Lounge. :param fd: File-like object to upload. :param filename: (optional) Filename to use for license as string. :return: :class:`system.Upgrade <system.Upgrade>` object :rtype: system.Upgrade """ schema = UpgradeSchema() resp = self.service.post(self.base+'license/', files={'file': (filename, fd)}) return self.service.decode(schema, resp)
def get_port_name_from_id(node_id, port_id, nodes): """ Get the name of a port for a given node and port ID :param int node_id: node ID :param int port_id: port ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port name :rtype: str """ port_name = '' for node in nodes: if node['id'] == node_id: for port in node['ports']: if port['id'] == port_id: port_name = port['name'] break return port_name
Get the name of a port for a given node and port ID :param int node_id: node ID :param int port_id: port ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port name :rtype: str
Below is the the instruction that describes the task: ### Input: Get the name of a port for a given node and port ID :param int node_id: node ID :param int port_id: port ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port name :rtype: str ### Response: def get_port_name_from_id(node_id, port_id, nodes): """ Get the name of a port for a given node and port ID :param int node_id: node ID :param int port_id: port ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port name :rtype: str """ port_name = '' for node in nodes: if node['id'] == node_id: for port in node['ports']: if port['id'] == port_id: port_name = port['name'] break return port_name
def _step3(self, word): """step3() deals with -ic-, -full, -ness etc. similar strategy to step2.""" ch = word[-1] if ch == 'e': if word.endswith("icate"): return word[:-3] if self._m(word, len(word)-6) else word elif word.endswith("ative"): return word[:-5] if self._m(word, len(word)-6) else word elif word.endswith("alize"): return word[:-3] if self._m(word, len(word)-6) else word else: return word elif ch == 'i': if word.endswith("iciti"): return word[:-3] if self._m(word, len(word)-6) else word else: return word elif ch == 'l': if word.endswith("ical"): return word[:-2] if self._m(word, len(word)-5) else word elif word.endswith("ful"): return word[:-3] if self._m(word, len(word)-4) else word else: return word elif ch == 's': if word.endswith("ness"): return word[:-4] if self._m(word, len(word)-5) else word else: return word else: return word
step3() deals with -ic-, -full, -ness etc. similar strategy to step2.
Below is the the instruction that describes the task: ### Input: step3() deals with -ic-, -full, -ness etc. similar strategy to step2. ### Response: def _step3(self, word): """step3() deals with -ic-, -full, -ness etc. similar strategy to step2.""" ch = word[-1] if ch == 'e': if word.endswith("icate"): return word[:-3] if self._m(word, len(word)-6) else word elif word.endswith("ative"): return word[:-5] if self._m(word, len(word)-6) else word elif word.endswith("alize"): return word[:-3] if self._m(word, len(word)-6) else word else: return word elif ch == 'i': if word.endswith("iciti"): return word[:-3] if self._m(word, len(word)-6) else word else: return word elif ch == 'l': if word.endswith("ical"): return word[:-2] if self._m(word, len(word)-5) else word elif word.endswith("ful"): return word[:-3] if self._m(word, len(word)-4) else word else: return word elif ch == 's': if word.endswith("ness"): return word[:-4] if self._m(word, len(word)-5) else word else: return word else: return word
def _get_publish(self): """ Find this publish on remote """ publishes = self._get_publishes(self.client) for publish in publishes: if publish['Distribution'] == self.distribution and \ publish['Prefix'].replace("/", "_") == (self.prefix or '.') and \ publish['Storage'] == self.storage: return publish raise NoSuchPublish("Publish %s (%s) does not exist" % (self.name, self.storage or "local"))
Find this publish on remote
Below is the the instruction that describes the task: ### Input: Find this publish on remote ### Response: def _get_publish(self): """ Find this publish on remote """ publishes = self._get_publishes(self.client) for publish in publishes: if publish['Distribution'] == self.distribution and \ publish['Prefix'].replace("/", "_") == (self.prefix or '.') and \ publish['Storage'] == self.storage: return publish raise NoSuchPublish("Publish %s (%s) does not exist" % (self.name, self.storage or "local"))
def cdx_clamp(cdx_iter, from_ts, to_ts): """ Clamp by start and end ts """ if from_ts and len(from_ts) < 14: from_ts = pad_timestamp(from_ts, PAD_14_DOWN) if to_ts and len(to_ts) < 14: to_ts = pad_timestamp(to_ts, PAD_14_UP) for cdx in cdx_iter: if from_ts and cdx[TIMESTAMP] < from_ts: continue if to_ts and cdx[TIMESTAMP] > to_ts: continue yield cdx
Clamp by start and end ts
Below is the the instruction that describes the task: ### Input: Clamp by start and end ts ### Response: def cdx_clamp(cdx_iter, from_ts, to_ts): """ Clamp by start and end ts """ if from_ts and len(from_ts) < 14: from_ts = pad_timestamp(from_ts, PAD_14_DOWN) if to_ts and len(to_ts) < 14: to_ts = pad_timestamp(to_ts, PAD_14_UP) for cdx in cdx_iter: if from_ts and cdx[TIMESTAMP] < from_ts: continue if to_ts and cdx[TIMESTAMP] > to_ts: continue yield cdx
def full_data(self): """ Returns all the info available for the user in the following format: name [username] <id> (locale) bot_or_user If any data is not available, it is not added. """ data = [ self.full_name, self._username(), self._id(), self._language_code(), self._is_bot() ] return " ".join(filter(None, data))
Returns all the info available for the user in the following format: name [username] <id> (locale) bot_or_user If any data is not available, it is not added.
Below is the the instruction that describes the task: ### Input: Returns all the info available for the user in the following format: name [username] <id> (locale) bot_or_user If any data is not available, it is not added. ### Response: def full_data(self): """ Returns all the info available for the user in the following format: name [username] <id> (locale) bot_or_user If any data is not available, it is not added. """ data = [ self.full_name, self._username(), self._id(), self._language_code(), self._is_bot() ] return " ".join(filter(None, data))
def is_valid(arxiv_id): """ Check that a given arXiv ID is a valid one. :param arxiv_id: The arXiv ID to be checked. :returns: Boolean indicating whether the arXiv ID is valid or not. >>> is_valid('1506.06690') True >>> is_valid('1506.06690v1') True >>> is_valid('arXiv:1506.06690') True >>> is_valid('arXiv:1506.06690v1') True >>> is_valid('arxiv:1506.06690') True >>> is_valid('arxiv:1506.06690v1') True >>> is_valid('math.GT/0309136') True >>> is_valid('abcdf') False >>> is_valid('bar1506.06690foo') False >>> is_valid('mare.GG/0309136') False """ match = REGEX.match(arxiv_id) return (match is not None) and (match.group(0) == arxiv_id)
Check that a given arXiv ID is a valid one. :param arxiv_id: The arXiv ID to be checked. :returns: Boolean indicating whether the arXiv ID is valid or not. >>> is_valid('1506.06690') True >>> is_valid('1506.06690v1') True >>> is_valid('arXiv:1506.06690') True >>> is_valid('arXiv:1506.06690v1') True >>> is_valid('arxiv:1506.06690') True >>> is_valid('arxiv:1506.06690v1') True >>> is_valid('math.GT/0309136') True >>> is_valid('abcdf') False >>> is_valid('bar1506.06690foo') False >>> is_valid('mare.GG/0309136') False
Below is the the instruction that describes the task: ### Input: Check that a given arXiv ID is a valid one. :param arxiv_id: The arXiv ID to be checked. :returns: Boolean indicating whether the arXiv ID is valid or not. >>> is_valid('1506.06690') True >>> is_valid('1506.06690v1') True >>> is_valid('arXiv:1506.06690') True >>> is_valid('arXiv:1506.06690v1') True >>> is_valid('arxiv:1506.06690') True >>> is_valid('arxiv:1506.06690v1') True >>> is_valid('math.GT/0309136') True >>> is_valid('abcdf') False >>> is_valid('bar1506.06690foo') False >>> is_valid('mare.GG/0309136') False ### Response: def is_valid(arxiv_id): """ Check that a given arXiv ID is a valid one. :param arxiv_id: The arXiv ID to be checked. :returns: Boolean indicating whether the arXiv ID is valid or not. >>> is_valid('1506.06690') True >>> is_valid('1506.06690v1') True >>> is_valid('arXiv:1506.06690') True >>> is_valid('arXiv:1506.06690v1') True >>> is_valid('arxiv:1506.06690') True >>> is_valid('arxiv:1506.06690v1') True >>> is_valid('math.GT/0309136') True >>> is_valid('abcdf') False >>> is_valid('bar1506.06690foo') False >>> is_valid('mare.GG/0309136') False """ match = REGEX.match(arxiv_id) return (match is not None) and (match.group(0) == arxiv_id)
def run(self): """ Runs all tasks in this runner on the executor. Blocks until all tasks have been completed. :return: """ for task in self.get_next_tasks(None): self.executor.add_task(task, None) while not self.executor.is_done(): done_task_and_result = self.executor.wait_for_tasks() for task, task_result in done_task_and_result: self._add_sub_tasks_to_executor(task, task_result)
Runs all tasks in this runner on the executor. Blocks until all tasks have been completed. :return:
Below is the the instruction that describes the task: ### Input: Runs all tasks in this runner on the executor. Blocks until all tasks have been completed. :return: ### Response: def run(self): """ Runs all tasks in this runner on the executor. Blocks until all tasks have been completed. :return: """ for task in self.get_next_tasks(None): self.executor.add_task(task, None) while not self.executor.is_done(): done_task_and_result = self.executor.wait_for_tasks() for task, task_result in done_task_and_result: self._add_sub_tasks_to_executor(task, task_result)
def disable_FTDI_driver(): """Disable the FTDI drivers for the current platform. This is necessary because they will conflict with libftdi and accessing the FT232H. Note you can enable the FTDI drivers again by calling enable_FTDI_driver. """ logger.debug('Disabling FTDI driver.') if sys.platform == 'darwin': logger.debug('Detected Mac OSX') # Mac OS commands to disable FTDI driver. _check_running_as_root() subprocess.call('kextunload -b com.apple.driver.AppleUSBFTDI', shell=True) subprocess.call('kextunload /System/Library/Extensions/FTDIUSBSerialDriver.kext', shell=True) elif sys.platform.startswith('linux'): logger.debug('Detected Linux') # Linux commands to disable FTDI driver. _check_running_as_root() subprocess.call('modprobe -r -q ftdi_sio', shell=True) subprocess.call('modprobe -r -q usbserial', shell=True)
Disable the FTDI drivers for the current platform. This is necessary because they will conflict with libftdi and accessing the FT232H. Note you can enable the FTDI drivers again by calling enable_FTDI_driver.
Below is the the instruction that describes the task: ### Input: Disable the FTDI drivers for the current platform. This is necessary because they will conflict with libftdi and accessing the FT232H. Note you can enable the FTDI drivers again by calling enable_FTDI_driver. ### Response: def disable_FTDI_driver(): """Disable the FTDI drivers for the current platform. This is necessary because they will conflict with libftdi and accessing the FT232H. Note you can enable the FTDI drivers again by calling enable_FTDI_driver. """ logger.debug('Disabling FTDI driver.') if sys.platform == 'darwin': logger.debug('Detected Mac OSX') # Mac OS commands to disable FTDI driver. _check_running_as_root() subprocess.call('kextunload -b com.apple.driver.AppleUSBFTDI', shell=True) subprocess.call('kextunload /System/Library/Extensions/FTDIUSBSerialDriver.kext', shell=True) elif sys.platform.startswith('linux'): logger.debug('Detected Linux') # Linux commands to disable FTDI driver. _check_running_as_root() subprocess.call('modprobe -r -q ftdi_sio', shell=True) subprocess.call('modprobe -r -q usbserial', shell=True)
def atlas_node_add_callback(atlas_state, callback_name, callback): """ Add a callback to the initialized atlas state """ if callback_name == 'store_zonefile': atlas_state['zonefile_crawler'].set_store_zonefile_callback(callback) else: raise ValueError("Unrecognized callback {}".format(callback_name))
Add a callback to the initialized atlas state
Below is the the instruction that describes the task: ### Input: Add a callback to the initialized atlas state ### Response: def atlas_node_add_callback(atlas_state, callback_name, callback): """ Add a callback to the initialized atlas state """ if callback_name == 'store_zonefile': atlas_state['zonefile_crawler'].set_store_zonefile_callback(callback) else: raise ValueError("Unrecognized callback {}".format(callback_name))
def write_dot_file(G, filename): """ Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files """ with io.open(filename, "w") as fh: fh.write("strict digraph DependencyDiagram {\n") edge_list = G.edges() node_list = set(G.nodes()) if edge_list: for edge in sorted(edge_list): source, targ = edge node_list = node_list - set(source) node_list = node_list - set(targ) line = '"{}" -> "{}";\n' fh.write(line.format(source, targ)) # draw nodes with no links if node_list: for node in sorted(node_list): line = '"{}"\n'.format(node) fh.write(line) fh.write("}")
Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files
Below is the the instruction that describes the task: ### Input: Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files ### Response: def write_dot_file(G, filename): """ Writes the graph G in dot file format for graphviz visualization. Args: a Networkx graph A filename to name the dot files """ with io.open(filename, "w") as fh: fh.write("strict digraph DependencyDiagram {\n") edge_list = G.edges() node_list = set(G.nodes()) if edge_list: for edge in sorted(edge_list): source, targ = edge node_list = node_list - set(source) node_list = node_list - set(targ) line = '"{}" -> "{}";\n' fh.write(line.format(source, targ)) # draw nodes with no links if node_list: for node in sorted(node_list): line = '"{}"\n'.format(node) fh.write(line) fh.write("}")
def _namespace_filtered_iterator(graph, namespace): """Iterate over names in the given namespace.""" for it_namespace, name in _identifier_filtered_iterator(graph): if namespace == it_namespace: yield name
Iterate over names in the given namespace.
Below is the the instruction that describes the task: ### Input: Iterate over names in the given namespace. ### Response: def _namespace_filtered_iterator(graph, namespace): """Iterate over names in the given namespace.""" for it_namespace, name in _identifier_filtered_iterator(graph): if namespace == it_namespace: yield name
def get_offers(self, pair="SWTH_NEO"): """ Function to fetch the open orders on the order book for the trade pair requested. Execution of this function is as follows:: get_offers(pair="SWTH_NEO") The expected return result for this function is as follows:: [{ 'id': '2716c0ca-59bb-4c86-8ee4-6b9528d0e5d2', 'offer_asset': 'GAS', 'want_asset': 'NEO', 'available_amount': 9509259, 'offer_amount': 30000000, 'want_amount': 300000000, 'address': '7f345d1a031c4099540dbbbc220d4e5640ab2b6f' }, { .... }] :param pair: The trading pair that will be used to request open offers on the order book. :type pair: str :return: List of dictionaries consisting of the open offers for the requested trading pair. """ api_params = { "pair": pair, "contract_hash": self.contract_hash } return self.request.get(path='/offers', params=api_params)
Function to fetch the open orders on the order book for the trade pair requested. Execution of this function is as follows:: get_offers(pair="SWTH_NEO") The expected return result for this function is as follows:: [{ 'id': '2716c0ca-59bb-4c86-8ee4-6b9528d0e5d2', 'offer_asset': 'GAS', 'want_asset': 'NEO', 'available_amount': 9509259, 'offer_amount': 30000000, 'want_amount': 300000000, 'address': '7f345d1a031c4099540dbbbc220d4e5640ab2b6f' }, { .... }] :param pair: The trading pair that will be used to request open offers on the order book. :type pair: str :return: List of dictionaries consisting of the open offers for the requested trading pair.
Below is the the instruction that describes the task: ### Input: Function to fetch the open orders on the order book for the trade pair requested. Execution of this function is as follows:: get_offers(pair="SWTH_NEO") The expected return result for this function is as follows:: [{ 'id': '2716c0ca-59bb-4c86-8ee4-6b9528d0e5d2', 'offer_asset': 'GAS', 'want_asset': 'NEO', 'available_amount': 9509259, 'offer_amount': 30000000, 'want_amount': 300000000, 'address': '7f345d1a031c4099540dbbbc220d4e5640ab2b6f' }, { .... }] :param pair: The trading pair that will be used to request open offers on the order book. :type pair: str :return: List of dictionaries consisting of the open offers for the requested trading pair. ### Response: def get_offers(self, pair="SWTH_NEO"): """ Function to fetch the open orders on the order book for the trade pair requested. Execution of this function is as follows:: get_offers(pair="SWTH_NEO") The expected return result for this function is as follows:: [{ 'id': '2716c0ca-59bb-4c86-8ee4-6b9528d0e5d2', 'offer_asset': 'GAS', 'want_asset': 'NEO', 'available_amount': 9509259, 'offer_amount': 30000000, 'want_amount': 300000000, 'address': '7f345d1a031c4099540dbbbc220d4e5640ab2b6f' }, { .... }] :param pair: The trading pair that will be used to request open offers on the order book. :type pair: str :return: List of dictionaries consisting of the open offers for the requested trading pair. """ api_params = { "pair": pair, "contract_hash": self.contract_hash } return self.request.get(path='/offers', params=api_params)
def set_pixel(self, x, y, color): """ Color may be: value, tuple, list etc. If the image is set to contain more color-channels than len(color), the remaining channels will be filled automatically. Example (channels = 4, i.e. RGBA output): color = 17 -> color = [17,17,17,255] color = (17, 99) -> color = [17,99,0,255] Passing in shorthand color-tuples for larger images on a regular basis might result in a very noticeable performance penalty. """ try: # these checks are for convenience, not for safety if len(color) < self.channels: # color is a a tuple (length >= 1) if len(color) == 1: if self.channels == 2: color = [color[0], 255] elif self.channels == 3: color = [color[0], color[0], color[0]] elif self.channels == 4: color = [color[0], color[0], color[0], 255] elif len(color) == 2: if self.channels == 3: color = [color[0], color[1], 0] elif self.channels == 4: color = [color[0], color[1], 0, 255] elif len(color) == 3: if self.channels == 4: color = [color[0], color[1], color[2], 255] except TypeError: # color is not an iterable if self.channels > 1: if self.channels == 2: color = [color, 255] elif self.channels == 3: color = [color, color, color] else: # only values 1..4 are allowed color = [color, color, color, 255] self.array[y, x] = color
Color may be: value, tuple, list etc. If the image is set to contain more color-channels than len(color), the remaining channels will be filled automatically. Example (channels = 4, i.e. RGBA output): color = 17 -> color = [17,17,17,255] color = (17, 99) -> color = [17,99,0,255] Passing in shorthand color-tuples for larger images on a regular basis might result in a very noticeable performance penalty.
Below is the the instruction that describes the task: ### Input: Color may be: value, tuple, list etc. If the image is set to contain more color-channels than len(color), the remaining channels will be filled automatically. Example (channels = 4, i.e. RGBA output): color = 17 -> color = [17,17,17,255] color = (17, 99) -> color = [17,99,0,255] Passing in shorthand color-tuples for larger images on a regular basis might result in a very noticeable performance penalty. ### Response: def set_pixel(self, x, y, color): """ Color may be: value, tuple, list etc. If the image is set to contain more color-channels than len(color), the remaining channels will be filled automatically. Example (channels = 4, i.e. RGBA output): color = 17 -> color = [17,17,17,255] color = (17, 99) -> color = [17,99,0,255] Passing in shorthand color-tuples for larger images on a regular basis might result in a very noticeable performance penalty. """ try: # these checks are for convenience, not for safety if len(color) < self.channels: # color is a a tuple (length >= 1) if len(color) == 1: if self.channels == 2: color = [color[0], 255] elif self.channels == 3: color = [color[0], color[0], color[0]] elif self.channels == 4: color = [color[0], color[0], color[0], 255] elif len(color) == 2: if self.channels == 3: color = [color[0], color[1], 0] elif self.channels == 4: color = [color[0], color[1], 0, 255] elif len(color) == 3: if self.channels == 4: color = [color[0], color[1], color[2], 255] except TypeError: # color is not an iterable if self.channels > 1: if self.channels == 2: color = [color, 255] elif self.channels == 3: color = [color, color, color] else: # only values 1..4 are allowed color = [color, color, color, 255] self.array[y, x] = color
def _on_library_path_changed(self, renderer, path, new_library_path): """Callback handling a change of a library path :param Gtk.CellRenderer renderer: Cell renderer showing the library path :param path: Path of library within the list store :param str new_library_path: New library path """ library_name = self.library_list_store[int(path)][self.KEY_STORAGE_ID] library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True, default={}) library_config[library_name] = new_library_path self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config) self._select_row_by_column_value(self.view['library_tree_view'], self.library_list_store, self.KEY_STORAGE_ID, library_name)
Callback handling a change of a library path :param Gtk.CellRenderer renderer: Cell renderer showing the library path :param path: Path of library within the list store :param str new_library_path: New library path
Below is the the instruction that describes the task: ### Input: Callback handling a change of a library path :param Gtk.CellRenderer renderer: Cell renderer showing the library path :param path: Path of library within the list store :param str new_library_path: New library path ### Response: def _on_library_path_changed(self, renderer, path, new_library_path): """Callback handling a change of a library path :param Gtk.CellRenderer renderer: Cell renderer showing the library path :param path: Path of library within the list store :param str new_library_path: New library path """ library_name = self.library_list_store[int(path)][self.KEY_STORAGE_ID] library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True, default={}) library_config[library_name] = new_library_path self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config) self._select_row_by_column_value(self.view['library_tree_view'], self.library_list_store, self.KEY_STORAGE_ID, library_name)
def toTag(self, output): ''' This methods returns all data of this feed as feed xml tag :param output: XML Document to which the data should be added :type output: xml.dom.DOMImplementation.createDocument ''' feed = output.createElement('feed') feed.setAttribute('name', self.name) feed.setAttribute('priority', str(self.priority)) # schedule schedule = output.createElement('schedule') schedule.setAttribute('dayOfMonth', self.dayOfMonth) schedule.setAttribute('dayOfWeek', self.dayOfWeek) schedule.setAttribute('hour', self.hour) schedule.setAttribute('minute', self.minute) if self.retry: schedule.setAttribute('retry', self.retry) feed.appendChild(schedule) # url url = output.createElement('url') url.appendChild(output.createTextNode(self.url)) feed.appendChild(url) # source if self.source: source = output.createElement('source') source.appendChild(output.createTextNode(self.source)) feed.appendChild(source) return feed
This methods returns all data of this feed as feed xml tag :param output: XML Document to which the data should be added :type output: xml.dom.DOMImplementation.createDocument
Below is the the instruction that describes the task: ### Input: This methods returns all data of this feed as feed xml tag :param output: XML Document to which the data should be added :type output: xml.dom.DOMImplementation.createDocument ### Response: def toTag(self, output): ''' This methods returns all data of this feed as feed xml tag :param output: XML Document to which the data should be added :type output: xml.dom.DOMImplementation.createDocument ''' feed = output.createElement('feed') feed.setAttribute('name', self.name) feed.setAttribute('priority', str(self.priority)) # schedule schedule = output.createElement('schedule') schedule.setAttribute('dayOfMonth', self.dayOfMonth) schedule.setAttribute('dayOfWeek', self.dayOfWeek) schedule.setAttribute('hour', self.hour) schedule.setAttribute('minute', self.minute) if self.retry: schedule.setAttribute('retry', self.retry) feed.appendChild(schedule) # url url = output.createElement('url') url.appendChild(output.createTextNode(self.url)) feed.appendChild(url) # source if self.source: source = output.createElement('source') source.appendChild(output.createTextNode(self.source)) feed.appendChild(source) return feed
def delete_if_error(path): '''If any exception is raised inside the context, delete the file at the given path, and allow the exception to continue.''' try: yield except Exception: if os.path.exists(path): os.remove(path) raise
If any exception is raised inside the context, delete the file at the given path, and allow the exception to continue.
Below is the the instruction that describes the task: ### Input: If any exception is raised inside the context, delete the file at the given path, and allow the exception to continue. ### Response: def delete_if_error(path): '''If any exception is raised inside the context, delete the file at the given path, and allow the exception to continue.''' try: yield except Exception: if os.path.exists(path): os.remove(path) raise
def _ConvertMethodType(self, methodType): """ Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method definition """ if methodType: name = methodType.name wsdlName = methodType.wsdlName version = methodType.version params = self._Filter(self._ConvertParamType, methodType.paramTypeInfo) privId = methodType.privId faults = methodType.fault # Figure out reture info if methodType.returnTypeInfo: returnTypeInfo = methodType.returnTypeInfo retFlags = self._ConvertAnnotations(returnTypeInfo.annotation) methodRetType = returnTypeInfo.type else: retFlags = 0 methodRetType = "void" if wsdlName.endswith("_Task"): # TODO: Need a seperate task return type for task, instead of # hardcode vim.Task as return type retType = "vim.Task" else: retType = methodRetType retInfo = (retFlags, retType, methodRetType) method = (name, wsdlName, version, params, retInfo, privId, faults) else: method = None return method
Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method definition
Below is the the instruction that describes the task: ### Input: Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method definition ### Response: def _ConvertMethodType(self, methodType): """ Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method definition """ if methodType: name = methodType.name wsdlName = methodType.wsdlName version = methodType.version params = self._Filter(self._ConvertParamType, methodType.paramTypeInfo) privId = methodType.privId faults = methodType.fault # Figure out reture info if methodType.returnTypeInfo: returnTypeInfo = methodType.returnTypeInfo retFlags = self._ConvertAnnotations(returnTypeInfo.annotation) methodRetType = returnTypeInfo.type else: retFlags = 0 methodRetType = "void" if wsdlName.endswith("_Task"): # TODO: Need a seperate task return type for task, instead of # hardcode vim.Task as return type retType = "vim.Task" else: retType = methodRetType retInfo = (retFlags, retType, methodRetType) method = (name, wsdlName, version, params, retInfo, privId, faults) else: method = None return method
def _get_block_storage(kwargs): ''' Construct a block storage instance from passed arguments ''' if kwargs is None: kwargs = {} block_storage_name = kwargs.get('name', None) block_storage_size = kwargs.get('size', None) block_storage_description = kwargs.get('description', None) datacenter_id = kwargs.get('datacenter_id', None) server_id = kwargs.get('server_id', None) block_storage = BlockStorage( name=block_storage_name, size=block_storage_size) if block_storage_description: block_storage.description = block_storage_description if datacenter_id: block_storage.datacenter_id = datacenter_id if server_id: block_storage.server_id = server_id return block_storage
Construct a block storage instance from passed arguments
Below is the the instruction that describes the task: ### Input: Construct a block storage instance from passed arguments ### Response: def _get_block_storage(kwargs): ''' Construct a block storage instance from passed arguments ''' if kwargs is None: kwargs = {} block_storage_name = kwargs.get('name', None) block_storage_size = kwargs.get('size', None) block_storage_description = kwargs.get('description', None) datacenter_id = kwargs.get('datacenter_id', None) server_id = kwargs.get('server_id', None) block_storage = BlockStorage( name=block_storage_name, size=block_storage_size) if block_storage_description: block_storage.description = block_storage_description if datacenter_id: block_storage.datacenter_id = datacenter_id if server_id: block_storage.server_id = server_id return block_storage
def encodeIntoArray(self, inpt, output): """ See the function description in base.py """ # Get the scaled value scaledVal = self._getScaledValue(inpt) if scaledVal is None: output[0:] = 0 else: self.encoder.encodeIntoArray(scaledVal, output) if self.verbosity >= 2: print "input:", inpt, "scaledVal:", scaledVal, "output:", output print "decoded:", self.decodedToStr(self.decode(output))
See the function description in base.py
Below is the the instruction that describes the task: ### Input: See the function description in base.py ### Response: def encodeIntoArray(self, inpt, output): """ See the function description in base.py """ # Get the scaled value scaledVal = self._getScaledValue(inpt) if scaledVal is None: output[0:] = 0 else: self.encoder.encodeIntoArray(scaledVal, output) if self.verbosity >= 2: print "input:", inpt, "scaledVal:", scaledVal, "output:", output print "decoded:", self.decodedToStr(self.decode(output))
def setPadding(self, pad): """setPadding() -> bytes of length 1. Padding character.""" _baseDes.setPadding(self, pad) for key in (self.__key1, self.__key2, self.__key3): key.setPadding(pad)
setPadding() -> bytes of length 1. Padding character.
Below is the the instruction that describes the task: ### Input: setPadding() -> bytes of length 1. Padding character. ### Response: def setPadding(self, pad): """setPadding() -> bytes of length 1. Padding character.""" _baseDes.setPadding(self, pad) for key in (self.__key1, self.__key2, self.__key3): key.setPadding(pad)
def _readXputFile(self, file_cards, card_name, directory, session, spatial=False, spatialReferenceID=None, replaceParamFile=None, **kwargs): """ Read specific IO file for a GSSHA project to the database. """ # Automatically derive the spatial reference system, if possible if spatialReferenceID is None: spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory) card = self.getCard(card_name) if card: fileIO = file_cards[card.name] filename = card.value.strip('"').strip("'") # Invoke read method on each file return self._invokeRead(fileIO=fileIO, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile, **kwargs)
Read specific IO file for a GSSHA project to the database.
Below is the the instruction that describes the task: ### Input: Read specific IO file for a GSSHA project to the database. ### Response: def _readXputFile(self, file_cards, card_name, directory, session, spatial=False, spatialReferenceID=None, replaceParamFile=None, **kwargs): """ Read specific IO file for a GSSHA project to the database. """ # Automatically derive the spatial reference system, if possible if spatialReferenceID is None: spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory) card = self.getCard(card_name) if card: fileIO = file_cards[card.name] filename = card.value.strip('"').strip("'") # Invoke read method on each file return self._invokeRead(fileIO=fileIO, directory=directory, filename=filename, session=session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile, **kwargs)
def modify_instance_groups(self, instance_group_ids, new_sizes): """ Modify the number of nodes and configuration settings in an instance group. :type instance_group_ids: list(str) :param instance_group_ids: A list of the ID's of the instance groups to be modified :type new_sizes: list(int) :param new_sizes: A list of the new sizes for each instance group """ if type(instance_group_ids) != types.ListType: instance_group_ids = [instance_group_ids] if type(new_sizes) != types.ListType: new_sizes = [new_sizes] instance_groups = zip(instance_group_ids, new_sizes) params = {} for k, ig in enumerate(instance_groups): # could be wrong - the example amazon gives uses # InstanceRequestCount, while the api documentation # says InstanceCount params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0] params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1] return self.get_object('ModifyInstanceGroups', params, ModifyInstanceGroupsResponse, verb='POST')
Modify the number of nodes and configuration settings in an instance group. :type instance_group_ids: list(str) :param instance_group_ids: A list of the ID's of the instance groups to be modified :type new_sizes: list(int) :param new_sizes: A list of the new sizes for each instance group
Below is the the instruction that describes the task: ### Input: Modify the number of nodes and configuration settings in an instance group. :type instance_group_ids: list(str) :param instance_group_ids: A list of the ID's of the instance groups to be modified :type new_sizes: list(int) :param new_sizes: A list of the new sizes for each instance group ### Response: def modify_instance_groups(self, instance_group_ids, new_sizes): """ Modify the number of nodes and configuration settings in an instance group. :type instance_group_ids: list(str) :param instance_group_ids: A list of the ID's of the instance groups to be modified :type new_sizes: list(int) :param new_sizes: A list of the new sizes for each instance group """ if type(instance_group_ids) != types.ListType: instance_group_ids = [instance_group_ids] if type(new_sizes) != types.ListType: new_sizes = [new_sizes] instance_groups = zip(instance_group_ids, new_sizes) params = {} for k, ig in enumerate(instance_groups): # could be wrong - the example amazon gives uses # InstanceRequestCount, while the api documentation # says InstanceCount params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0] params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1] return self.get_object('ModifyInstanceGroups', params, ModifyInstanceGroupsResponse, verb='POST')
def internal_assert(condition, message=None, item=None, extra=None): """Raise InternalException if condition is False. If condition is a function, execute it on DEVELOP only.""" if DEVELOP and callable(condition): condition = condition() if not condition: if message is None: message = "assertion failed" if item is None: item = condition raise CoconutInternalException(message, item, extra)
Raise InternalException if condition is False. If condition is a function, execute it on DEVELOP only.
Below is the the instruction that describes the task: ### Input: Raise InternalException if condition is False. If condition is a function, execute it on DEVELOP only. ### Response: def internal_assert(condition, message=None, item=None, extra=None): """Raise InternalException if condition is False. If condition is a function, execute it on DEVELOP only.""" if DEVELOP and callable(condition): condition = condition() if not condition: if message is None: message = "assertion failed" if item is None: item = condition raise CoconutInternalException(message, item, extra)
def create_or_update(self, resource_group, name, container_group): """ Create a new container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param container_group: the properties of the container group :type container_group: azure.mgmt.containerinstance.models.ContainerGroup """ self.connection.container_groups.create_or_update(resource_group, name, container_group)
Create a new container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param container_group: the properties of the container group :type container_group: azure.mgmt.containerinstance.models.ContainerGroup
Below is the the instruction that describes the task: ### Input: Create a new container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param container_group: the properties of the container group :type container_group: azure.mgmt.containerinstance.models.ContainerGroup ### Response: def create_or_update(self, resource_group, name, container_group): """ Create a new container group :param resource_group: the name of the resource group :type resource_group: str :param name: the name of the container group :type name: str :param container_group: the properties of the container group :type container_group: azure.mgmt.containerinstance.models.ContainerGroup """ self.connection.container_groups.create_or_update(resource_group, name, container_group)
def pop(self): """Pop a request""" method_frame, header, body = self.server.basic_get(queue=self.key) if body: return self._decode_request(body)
Pop a request
Below is the the instruction that describes the task: ### Input: Pop a request ### Response: def pop(self): """Pop a request""" method_frame, header, body = self.server.basic_get(queue=self.key) if body: return self._decode_request(body)
def mxmtg(m1, m2, nrow1, nc1c2, nrow2): """ Multiply a matrix and the transpose of a matrix, both of arbitrary size. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmtg_c.html :param m1: Left-hand matrix to be multiplied. :type m1: NxM-Element Array of floats :param m2: Right-hand matrix whose transpose is to be multiplied :type m2: NxM-Element Array of floats :param nrow1: Row dimension of m1 and row dimension of mout. :type nrow1: int :param nc1c2: Column dimension of m1 and column dimension of m2. :type nc1c2: int :param nrow2: Row dimension of m2 and column dimension of mout. :type nrow2: int :return: Product matrix. :rtype: NxM-Element Array of floats """ m1 = stypes.toDoubleMatrix(m1) m2 = stypes.toDoubleMatrix(m2) mout = stypes.emptyDoubleMatrix(x=nrow2, y=nrow1) nrow1 = ctypes.c_int(nrow1) nc1c2 = ctypes.c_int(nc1c2) nrow2 = ctypes.c_int(nrow2) libspice.mxmtg_c(m1, m2, nrow1, nc1c2, nrow2, mout) return stypes.cMatrixToNumpy(mout)
Multiply a matrix and the transpose of a matrix, both of arbitrary size. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmtg_c.html :param m1: Left-hand matrix to be multiplied. :type m1: NxM-Element Array of floats :param m2: Right-hand matrix whose transpose is to be multiplied :type m2: NxM-Element Array of floats :param nrow1: Row dimension of m1 and row dimension of mout. :type nrow1: int :param nc1c2: Column dimension of m1 and column dimension of m2. :type nc1c2: int :param nrow2: Row dimension of m2 and column dimension of mout. :type nrow2: int :return: Product matrix. :rtype: NxM-Element Array of floats
Below is the the instruction that describes the task: ### Input: Multiply a matrix and the transpose of a matrix, both of arbitrary size. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmtg_c.html :param m1: Left-hand matrix to be multiplied. :type m1: NxM-Element Array of floats :param m2: Right-hand matrix whose transpose is to be multiplied :type m2: NxM-Element Array of floats :param nrow1: Row dimension of m1 and row dimension of mout. :type nrow1: int :param nc1c2: Column dimension of m1 and column dimension of m2. :type nc1c2: int :param nrow2: Row dimension of m2 and column dimension of mout. :type nrow2: int :return: Product matrix. :rtype: NxM-Element Array of floats ### Response: def mxmtg(m1, m2, nrow1, nc1c2, nrow2): """ Multiply a matrix and the transpose of a matrix, both of arbitrary size. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmtg_c.html :param m1: Left-hand matrix to be multiplied. :type m1: NxM-Element Array of floats :param m2: Right-hand matrix whose transpose is to be multiplied :type m2: NxM-Element Array of floats :param nrow1: Row dimension of m1 and row dimension of mout. :type nrow1: int :param nc1c2: Column dimension of m1 and column dimension of m2. :type nc1c2: int :param nrow2: Row dimension of m2 and column dimension of mout. :type nrow2: int :return: Product matrix. :rtype: NxM-Element Array of floats """ m1 = stypes.toDoubleMatrix(m1) m2 = stypes.toDoubleMatrix(m2) mout = stypes.emptyDoubleMatrix(x=nrow2, y=nrow1) nrow1 = ctypes.c_int(nrow1) nc1c2 = ctypes.c_int(nc1c2) nrow2 = ctypes.c_int(nrow2) libspice.mxmtg_c(m1, m2, nrow1, nc1c2, nrow2, mout) return stypes.cMatrixToNumpy(mout)
def post_build(self, pkt, pay): """ need to set the length of the whole PDU manually to avoid any bit fiddling use a dummy class to build the layer content also add padding if frame is < 64 bytes Note: padding only handles Ether/n*Dot1Q/EtherCat (no special mumbo jumbo) :param pkt: raw string containing the current layer :param pay: raw string containing the payload :return: <new current layer> + payload """ class _EtherCatLengthCalc(Packet): """ dummy class used to generate str representation easily """ fields_desc = [ LEBitField('length', None, 11), LEBitField('_reserved', 0, 1), LEBitField('type', 0, 4), ] payload_len = len(pay) # length field is 11 bit if payload_len > 2047: raise ValueError('payload size {} exceeds maximum length {} ' 'of EtherCat message.'.format(payload_len, 2047)) self.length = payload_len vlan_headers_total_size = 0 upper_layer = self.underlayer # add size occupied by VLAN tags while upper_layer and isinstance(upper_layer, Dot1Q): vlan_headers_total_size += 4 upper_layer = upper_layer.underlayer if not isinstance(upper_layer, Ether): raise Exception('missing Ether layer') pad_len = EtherCat.ETHER_FRAME_MIN_LEN - (EtherCat.ETHER_HEADER_LEN + vlan_headers_total_size + EtherCat.ETHERCAT_HEADER_LEN + # noqa: E501 payload_len + EtherCat.ETHER_FSC_LEN) if pad_len > 0: pad = Padding() pad.load = b'\x00' * pad_len return raw(_EtherCatLengthCalc(length=self.length, type=self.type)) + pay + raw(pad) return raw(_EtherCatLengthCalc(length=self.length, type=self.type)) + pay
need to set the length of the whole PDU manually to avoid any bit fiddling use a dummy class to build the layer content also add padding if frame is < 64 bytes Note: padding only handles Ether/n*Dot1Q/EtherCat (no special mumbo jumbo) :param pkt: raw string containing the current layer :param pay: raw string containing the payload :return: <new current layer> + payload
Below is the the instruction that describes the task: ### Input: need to set the length of the whole PDU manually to avoid any bit fiddling use a dummy class to build the layer content also add padding if frame is < 64 bytes Note: padding only handles Ether/n*Dot1Q/EtherCat (no special mumbo jumbo) :param pkt: raw string containing the current layer :param pay: raw string containing the payload :return: <new current layer> + payload ### Response: def post_build(self, pkt, pay): """ need to set the length of the whole PDU manually to avoid any bit fiddling use a dummy class to build the layer content also add padding if frame is < 64 bytes Note: padding only handles Ether/n*Dot1Q/EtherCat (no special mumbo jumbo) :param pkt: raw string containing the current layer :param pay: raw string containing the payload :return: <new current layer> + payload """ class _EtherCatLengthCalc(Packet): """ dummy class used to generate str representation easily """ fields_desc = [ LEBitField('length', None, 11), LEBitField('_reserved', 0, 1), LEBitField('type', 0, 4), ] payload_len = len(pay) # length field is 11 bit if payload_len > 2047: raise ValueError('payload size {} exceeds maximum length {} ' 'of EtherCat message.'.format(payload_len, 2047)) self.length = payload_len vlan_headers_total_size = 0 upper_layer = self.underlayer # add size occupied by VLAN tags while upper_layer and isinstance(upper_layer, Dot1Q): vlan_headers_total_size += 4 upper_layer = upper_layer.underlayer if not isinstance(upper_layer, Ether): raise Exception('missing Ether layer') pad_len = EtherCat.ETHER_FRAME_MIN_LEN - (EtherCat.ETHER_HEADER_LEN + vlan_headers_total_size + EtherCat.ETHERCAT_HEADER_LEN + # noqa: E501 payload_len + EtherCat.ETHER_FSC_LEN) if pad_len > 0: pad = Padding() pad.load = b'\x00' * pad_len return raw(_EtherCatLengthCalc(length=self.length, type=self.type)) + pay + raw(pad) return raw(_EtherCatLengthCalc(length=self.length, type=self.type)) + pay
def checkMaxSessions(self, nMax=None): """ check whether max. number of saved sessions is reached if: remove the oldest session """ if nMax is None: nMax = self.opts['maxSessions'] l = self.stateNames() if len(l) > nMax: for f in l[:len(l) - nMax]: self.tmp_dir_session.remove(str(f))
check whether max. number of saved sessions is reached if: remove the oldest session
Below is the the instruction that describes the task: ### Input: check whether max. number of saved sessions is reached if: remove the oldest session ### Response: def checkMaxSessions(self, nMax=None): """ check whether max. number of saved sessions is reached if: remove the oldest session """ if nMax is None: nMax = self.opts['maxSessions'] l = self.stateNames() if len(l) > nMax: for f in l[:len(l) - nMax]: self.tmp_dir_session.remove(str(f))
def list(self, offset=0, limit=0, fields=None, sort=None, **kwargs): """Return filtered list of documents in a collection. For text-based search, we support searching on a name/string field by regex and text index. So strings passed in to a r=text search are used to filter collections by text index and regex on a named field. :param offset: for pagination, which record to start attribute :param limit: for pagination, how many records to return :param fields: list of field names to return (otherwise returns all) :param sort: list of fields to sort by (prefix with '-' for descending) :param kwargs: key/values to find (only supports equality for now) :returns: a tuple of the list of documents and the total count """ try: cursor = self._cursor(offset=offset, limit=limit, fields=fields, sort=sort, **kwargs) return list(cursor), cursor.count() except pymongo.errors.OperationFailure as exc: # This is workaround for mongodb v2.4 and 'q' filter params try: kwargs['$or'][0]['$text']['$search'] except (KeyError, IndexError): raise exc LOG.warn("Falling back to hard-coded mongo v2.4 search behavior") kwargs = self.search_alternative(limit, **kwargs) LOG.debug("Modified kwargs: %s", kwargs) cursor = self._cursor(offset=offset, limit=limit, fields=fields, sort=sort, **kwargs) return list(cursor), cursor.count()
Return filtered list of documents in a collection. For text-based search, we support searching on a name/string field by regex and text index. So strings passed in to a r=text search are used to filter collections by text index and regex on a named field. :param offset: for pagination, which record to start attribute :param limit: for pagination, how many records to return :param fields: list of field names to return (otherwise returns all) :param sort: list of fields to sort by (prefix with '-' for descending) :param kwargs: key/values to find (only supports equality for now) :returns: a tuple of the list of documents and the total count
Below is the the instruction that describes the task: ### Input: Return filtered list of documents in a collection. For text-based search, we support searching on a name/string field by regex and text index. So strings passed in to a r=text search are used to filter collections by text index and regex on a named field. :param offset: for pagination, which record to start attribute :param limit: for pagination, how many records to return :param fields: list of field names to return (otherwise returns all) :param sort: list of fields to sort by (prefix with '-' for descending) :param kwargs: key/values to find (only supports equality for now) :returns: a tuple of the list of documents and the total count ### Response: def list(self, offset=0, limit=0, fields=None, sort=None, **kwargs): """Return filtered list of documents in a collection. For text-based search, we support searching on a name/string field by regex and text index. So strings passed in to a r=text search are used to filter collections by text index and regex on a named field. :param offset: for pagination, which record to start attribute :param limit: for pagination, how many records to return :param fields: list of field names to return (otherwise returns all) :param sort: list of fields to sort by (prefix with '-' for descending) :param kwargs: key/values to find (only supports equality for now) :returns: a tuple of the list of documents and the total count """ try: cursor = self._cursor(offset=offset, limit=limit, fields=fields, sort=sort, **kwargs) return list(cursor), cursor.count() except pymongo.errors.OperationFailure as exc: # This is workaround for mongodb v2.4 and 'q' filter params try: kwargs['$or'][0]['$text']['$search'] except (KeyError, IndexError): raise exc LOG.warn("Falling back to hard-coded mongo v2.4 search behavior") kwargs = self.search_alternative(limit, **kwargs) LOG.debug("Modified kwargs: %s", kwargs) cursor = self._cursor(offset=offset, limit=limit, fields=fields, sort=sort, **kwargs) return list(cursor), cursor.count()
def _verifyReturnToArgs(query): """Verify that the arguments in the return_to URL are present in this response. """ message = Message.fromPostArgs(query) return_to = message.getArg(OPENID_NS, 'return_to') if return_to is None: raise ProtocolError('Response has no return_to') parsed_url = urlparse(return_to) rt_query = parsed_url[4] parsed_args = cgi.parse_qsl(rt_query) for rt_key, rt_value in parsed_args: try: value = query[rt_key] if rt_value != value: format = ("parameter %s value %r does not match " "return_to's value %r") raise ProtocolError(format % (rt_key, value, rt_value)) except KeyError: format = "return_to parameter %s absent from query %r" raise ProtocolError(format % (rt_key, query)) # Make sure all non-OpenID arguments in the response are also # in the signed return_to. bare_args = message.getArgs(BARE_NS) for pair in bare_args.iteritems(): if pair not in parsed_args: raise ProtocolError("Parameter %s not in return_to URL" % (pair[0],))
Verify that the arguments in the return_to URL are present in this response.
Below is the the instruction that describes the task: ### Input: Verify that the arguments in the return_to URL are present in this response. ### Response: def _verifyReturnToArgs(query): """Verify that the arguments in the return_to URL are present in this response. """ message = Message.fromPostArgs(query) return_to = message.getArg(OPENID_NS, 'return_to') if return_to is None: raise ProtocolError('Response has no return_to') parsed_url = urlparse(return_to) rt_query = parsed_url[4] parsed_args = cgi.parse_qsl(rt_query) for rt_key, rt_value in parsed_args: try: value = query[rt_key] if rt_value != value: format = ("parameter %s value %r does not match " "return_to's value %r") raise ProtocolError(format % (rt_key, value, rt_value)) except KeyError: format = "return_to parameter %s absent from query %r" raise ProtocolError(format % (rt_key, query)) # Make sure all non-OpenID arguments in the response are also # in the signed return_to. bare_args = message.getArgs(BARE_NS) for pair in bare_args.iteritems(): if pair not in parsed_args: raise ProtocolError("Parameter %s not in return_to URL" % (pair[0],))
def GET_save_modifiedconditionitemvalues(self) -> None: """ToDo: extend functionality and add tests""" for item in state.conditionitems: state.modifiedconditionitemvalues[self._id][item.name] = \ list(item.device2target.values())[0].value
ToDo: extend functionality and add tests
Below is the the instruction that describes the task: ### Input: ToDo: extend functionality and add tests ### Response: def GET_save_modifiedconditionitemvalues(self) -> None: """ToDo: extend functionality and add tests""" for item in state.conditionitems: state.modifiedconditionitemvalues[self._id][item.name] = \ list(item.device2target.values())[0].value
def base64url_to_long(data): """ Stricter then base64_to_long since it really checks that it's base64url encoded :param data: The base64 string :return: """ _data = as_bytes(data) _d = base64.urlsafe_b64decode(_data + b'==') # verify that it's base64url encoded and not just base64 # that is no '+' and '/' characters and not trailing "="s. if [e for e in [b'+', b'/', b'='] if e in _data]: raise ValueError("Not base64url encoded") return intarr2long(struct.unpack('%sB' % len(_d), _d))
Stricter then base64_to_long since it really checks that it's base64url encoded :param data: The base64 string :return:
Below is the the instruction that describes the task: ### Input: Stricter then base64_to_long since it really checks that it's base64url encoded :param data: The base64 string :return: ### Response: def base64url_to_long(data): """ Stricter then base64_to_long since it really checks that it's base64url encoded :param data: The base64 string :return: """ _data = as_bytes(data) _d = base64.urlsafe_b64decode(_data + b'==') # verify that it's base64url encoded and not just base64 # that is no '+' and '/' characters and not trailing "="s. if [e for e in [b'+', b'/', b'='] if e in _data]: raise ValueError("Not base64url encoded") return intarr2long(struct.unpack('%sB' % len(_d), _d))
def normalize_sphere(alpha, delta): """Normalize angles of a point on a sphere. Parameters ---------- alpha: float The alpha (right ascension/longitude like) angle in degrees. delta: float The delta (declination/latitude like) angle in degrees. Returns ------- (alpha, delta): (float, float) Normalized alpha (degrees) and delta (degrees). Notes ----- This function converts given position on a sphere into the simplest normalized values, considering that the points are on a sphere. Input position Output position (180, 91) (0, 89) (180, -91) (0, -89) (0, 91) (180, 89) (0, -91) (180, -89) (120, 280) (120, -80) (h2d(25), 45) (225, 45) (h2d(-25), -45) (345, -45) """ v = CartesianVector.from_spherical(r=1.0, alpha=d2r(alpha), delta=d2r(delta)) angles = v.normalized_angles return r2d(angles[0]), r2d(angles[1])
Normalize angles of a point on a sphere. Parameters ---------- alpha: float The alpha (right ascension/longitude like) angle in degrees. delta: float The delta (declination/latitude like) angle in degrees. Returns ------- (alpha, delta): (float, float) Normalized alpha (degrees) and delta (degrees). Notes ----- This function converts given position on a sphere into the simplest normalized values, considering that the points are on a sphere. Input position Output position (180, 91) (0, 89) (180, -91) (0, -89) (0, 91) (180, 89) (0, -91) (180, -89) (120, 280) (120, -80) (h2d(25), 45) (225, 45) (h2d(-25), -45) (345, -45)
Below is the the instruction that describes the task: ### Input: Normalize angles of a point on a sphere. Parameters ---------- alpha: float The alpha (right ascension/longitude like) angle in degrees. delta: float The delta (declination/latitude like) angle in degrees. Returns ------- (alpha, delta): (float, float) Normalized alpha (degrees) and delta (degrees). Notes ----- This function converts given position on a sphere into the simplest normalized values, considering that the points are on a sphere. Input position Output position (180, 91) (0, 89) (180, -91) (0, -89) (0, 91) (180, 89) (0, -91) (180, -89) (120, 280) (120, -80) (h2d(25), 45) (225, 45) (h2d(-25), -45) (345, -45) ### Response: def normalize_sphere(alpha, delta): """Normalize angles of a point on a sphere. Parameters ---------- alpha: float The alpha (right ascension/longitude like) angle in degrees. delta: float The delta (declination/latitude like) angle in degrees. Returns ------- (alpha, delta): (float, float) Normalized alpha (degrees) and delta (degrees). Notes ----- This function converts given position on a sphere into the simplest normalized values, considering that the points are on a sphere. Input position Output position (180, 91) (0, 89) (180, -91) (0, -89) (0, 91) (180, 89) (0, -91) (180, -89) (120, 280) (120, -80) (h2d(25), 45) (225, 45) (h2d(-25), -45) (345, -45) """ v = CartesianVector.from_spherical(r=1.0, alpha=d2r(alpha), delta=d2r(delta)) angles = v.normalized_angles return r2d(angles[0]), r2d(angles[1])
def fts_contrast2(self, fs, ft_name, inv): """Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`. Args: fs (list): feature specifications used to filter `inv`. ft_name (str): name of the feature where contrast must be present. inv (list): collection of segments represented as Unicode segments. Returns: bool: `True` if two segments in `inv` are identical in features except for feature `ft_name` """ inv_fts = [self.fts(x) for x in inv if set(fs) <= self.fts(x)] for a in inv_fts: for b in inv_fts: if a != b: diff = a ^ b if len(diff) == 2: if all([nm == ft_name for (_, nm) in diff]): return True return False
Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`. Args: fs (list): feature specifications used to filter `inv`. ft_name (str): name of the feature where contrast must be present. inv (list): collection of segments represented as Unicode segments. Returns: bool: `True` if two segments in `inv` are identical in features except for feature `ft_name`
Below is the the instruction that describes the task: ### Input: Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`. Args: fs (list): feature specifications used to filter `inv`. ft_name (str): name of the feature where contrast must be present. inv (list): collection of segments represented as Unicode segments. Returns: bool: `True` if two segments in `inv` are identical in features except for feature `ft_name` ### Response: def fts_contrast2(self, fs, ft_name, inv): """Return `True` if there is a segment in `inv` that contrasts in feature `ft_name`. Args: fs (list): feature specifications used to filter `inv`. ft_name (str): name of the feature where contrast must be present. inv (list): collection of segments represented as Unicode segments. Returns: bool: `True` if two segments in `inv` are identical in features except for feature `ft_name` """ inv_fts = [self.fts(x) for x in inv if set(fs) <= self.fts(x)] for a in inv_fts: for b in inv_fts: if a != b: diff = a ^ b if len(diff) == 2: if all([nm == ft_name for (_, nm) in diff]): return True return False
def needs_refresh(self): ''' This is called when the user is logged in, but they need to be reauthenticated because their session is stale. If you register a callback with `needs_refresh_handler`, then it will be called. Otherwise, it will take the following actions: - Flash :attr:`LoginManager.needs_refresh_message` to the user. - Redirect the user to :attr:`LoginManager.refresh_view`. (The page they were attempting to access will be passed in the ``next`` query string variable, so you can redirect there if present instead of the homepage.) If :attr:`LoginManager.refresh_view` is not defined, then it will simply raise a HTTP 401 (Unauthorized) error instead. This should be returned from a view or before/after_request function, otherwise the redirect will have no effect. ''' user_needs_refresh.send(current_app._get_current_object()) if self.needs_refresh_callback: return self.needs_refresh_callback() if not self.refresh_view: abort(401) if self.localize_callback is not None: flash(self.localize_callback(self.needs_refresh_message), category=self.needs_refresh_message_category) else: flash(self.needs_refresh_message, category=self.needs_refresh_message_category) config = current_app.config if config.get('USE_SESSION_FOR_NEXT', USE_SESSION_FOR_NEXT): login_url = expand_login_view(self.refresh_view) session['_id'] = self._session_identifier_generator() session['next'] = make_next_param(login_url, request.url) redirect_url = make_login_url(self.refresh_view) else: login_url = self.refresh_view redirect_url = make_login_url(login_url, next_url=request.url) return redirect(redirect_url)
This is called when the user is logged in, but they need to be reauthenticated because their session is stale. If you register a callback with `needs_refresh_handler`, then it will be called. Otherwise, it will take the following actions: - Flash :attr:`LoginManager.needs_refresh_message` to the user. - Redirect the user to :attr:`LoginManager.refresh_view`. (The page they were attempting to access will be passed in the ``next`` query string variable, so you can redirect there if present instead of the homepage.) If :attr:`LoginManager.refresh_view` is not defined, then it will simply raise a HTTP 401 (Unauthorized) error instead. This should be returned from a view or before/after_request function, otherwise the redirect will have no effect.
Below is the the instruction that describes the task: ### Input: This is called when the user is logged in, but they need to be reauthenticated because their session is stale. If you register a callback with `needs_refresh_handler`, then it will be called. Otherwise, it will take the following actions: - Flash :attr:`LoginManager.needs_refresh_message` to the user. - Redirect the user to :attr:`LoginManager.refresh_view`. (The page they were attempting to access will be passed in the ``next`` query string variable, so you can redirect there if present instead of the homepage.) If :attr:`LoginManager.refresh_view` is not defined, then it will simply raise a HTTP 401 (Unauthorized) error instead. This should be returned from a view or before/after_request function, otherwise the redirect will have no effect. ### Response: def needs_refresh(self): ''' This is called when the user is logged in, but they need to be reauthenticated because their session is stale. If you register a callback with `needs_refresh_handler`, then it will be called. Otherwise, it will take the following actions: - Flash :attr:`LoginManager.needs_refresh_message` to the user. - Redirect the user to :attr:`LoginManager.refresh_view`. (The page they were attempting to access will be passed in the ``next`` query string variable, so you can redirect there if present instead of the homepage.) If :attr:`LoginManager.refresh_view` is not defined, then it will simply raise a HTTP 401 (Unauthorized) error instead. This should be returned from a view or before/after_request function, otherwise the redirect will have no effect. ''' user_needs_refresh.send(current_app._get_current_object()) if self.needs_refresh_callback: return self.needs_refresh_callback() if not self.refresh_view: abort(401) if self.localize_callback is not None: flash(self.localize_callback(self.needs_refresh_message), category=self.needs_refresh_message_category) else: flash(self.needs_refresh_message, category=self.needs_refresh_message_category) config = current_app.config if config.get('USE_SESSION_FOR_NEXT', USE_SESSION_FOR_NEXT): login_url = expand_login_view(self.refresh_view) session['_id'] = self._session_identifier_generator() session['next'] = make_next_param(login_url, request.url) redirect_url = make_login_url(self.refresh_view) else: login_url = self.refresh_view redirect_url = make_login_url(login_url, next_url=request.url) return redirect(redirect_url)
def generate_file_rst(fname, target_dir, src_dir, gallery_conf): """ Generate the rst file for a given example. Returns the amout of code (in characters) of the corresponding files. """ src_file = os.path.join(src_dir, fname) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) image_dir = os.path.join(target_dir, 'images') if not os.path.exists(image_dir): os.makedirs(image_dir) base_image_name = os.path.splitext(fname)[0] image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png' image_path = os.path.join(image_dir, image_fname) script_blocks = split_code_and_text_blocks(example_file) amount_of_code = sum([len(bcontent) for blabel, bcontent in script_blocks if blabel == 'code']) if _plots_are_current(example_file, image_path): return amount_of_code time_elapsed = 0 ref_fname = example_file.replace(os.path.sep, '_') example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname) example_nb = Notebook(fname, target_dir) filename_pattern = gallery_conf.get('filename_pattern') if re.search(filename_pattern, src_file) and gallery_conf['plot_gallery']: # A lot of examples contains 'print(__doc__)' for example in # scikit-learn so that running the example prints some useful # information. Because the docstring has been separated from # the code blocks in sphinx-gallery, __doc__ is actually # __builtin__.__doc__ in the execution context and we do not # want to print it example_globals = {'__doc__': ''} fig_count = 0 # A simple example has two blocks: one for the # example introduction/explanation and one for the code is_example_notebook_like = len(script_blocks) > 2 for blabel, bcontent in script_blocks: if blabel == 'code': code_output, rtime, fig_count = execute_script(bcontent, example_globals, image_path, fig_count, src_file, gallery_conf) time_elapsed += rtime example_nb.add_code_cell(bcontent) if is_example_notebook_like: example_rst += codestr2rst(bcontent) + '\n' example_rst += code_output else: example_rst += code_output example_rst += codestr2rst(bcontent) + '\n' else: example_rst += text2string(bcontent) + '\n' example_nb.add_markdown_cell(text2string(bcontent)) else: for blabel, bcontent in script_blocks: if blabel == 'code': example_rst += codestr2rst(bcontent) + '\n' example_nb.add_code_cell(bcontent) else: example_rst += bcontent + '\n' example_nb.add_markdown_cell(text2string(bcontent)) save_thumbnail(image_path, base_image_name, gallery_conf) time_m, time_s = divmod(time_elapsed, 60) example_nb.save_file() with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f: example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname, example_nb.file_name) f.write(example_rst) return amount_of_code
Generate the rst file for a given example. Returns the amout of code (in characters) of the corresponding files.
Below is the the instruction that describes the task: ### Input: Generate the rst file for a given example. Returns the amout of code (in characters) of the corresponding files. ### Response: def generate_file_rst(fname, target_dir, src_dir, gallery_conf): """ Generate the rst file for a given example. Returns the amout of code (in characters) of the corresponding files. """ src_file = os.path.join(src_dir, fname) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) image_dir = os.path.join(target_dir, 'images') if not os.path.exists(image_dir): os.makedirs(image_dir) base_image_name = os.path.splitext(fname)[0] image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png' image_path = os.path.join(image_dir, image_fname) script_blocks = split_code_and_text_blocks(example_file) amount_of_code = sum([len(bcontent) for blabel, bcontent in script_blocks if blabel == 'code']) if _plots_are_current(example_file, image_path): return amount_of_code time_elapsed = 0 ref_fname = example_file.replace(os.path.sep, '_') example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname) example_nb = Notebook(fname, target_dir) filename_pattern = gallery_conf.get('filename_pattern') if re.search(filename_pattern, src_file) and gallery_conf['plot_gallery']: # A lot of examples contains 'print(__doc__)' for example in # scikit-learn so that running the example prints some useful # information. Because the docstring has been separated from # the code blocks in sphinx-gallery, __doc__ is actually # __builtin__.__doc__ in the execution context and we do not # want to print it example_globals = {'__doc__': ''} fig_count = 0 # A simple example has two blocks: one for the # example introduction/explanation and one for the code is_example_notebook_like = len(script_blocks) > 2 for blabel, bcontent in script_blocks: if blabel == 'code': code_output, rtime, fig_count = execute_script(bcontent, example_globals, image_path, fig_count, src_file, gallery_conf) time_elapsed += rtime example_nb.add_code_cell(bcontent) if is_example_notebook_like: example_rst += codestr2rst(bcontent) + '\n' example_rst += code_output else: example_rst += code_output example_rst += codestr2rst(bcontent) + '\n' else: example_rst += text2string(bcontent) + '\n' example_nb.add_markdown_cell(text2string(bcontent)) else: for blabel, bcontent in script_blocks: if blabel == 'code': example_rst += codestr2rst(bcontent) + '\n' example_nb.add_code_cell(bcontent) else: example_rst += bcontent + '\n' example_nb.add_markdown_cell(text2string(bcontent)) save_thumbnail(image_path, base_image_name, gallery_conf) time_m, time_s = divmod(time_elapsed, 60) example_nb.save_file() with open(os.path.join(target_dir, base_image_name + '.rst'), 'w') as f: example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname, example_nb.file_name) f.write(example_rst) return amount_of_code
def calcparams_cec(self, effective_irradiance, temp_cell, **kwargs): """ Use the :py:func:`calcparams_cec` function, the input parameters and ``self.module_parameters`` to calculate the module currents and resistances. Parameters ---------- effective_irradiance : numeric The irradiance (W/m2) that is converted to photocurrent. temp_cell : float or Series The average cell temperature of cells within a module in C. **kwargs See pvsystem.calcparams_cec for details Returns ------- See pvsystem.calcparams_cec for details """ kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref', 'R_s', 'alpha_sc', 'Adjust', 'EgRef', 'dEgdT', 'irrad_ref', 'temp_ref'], self.module_parameters) return calcparams_cec(effective_irradiance, temp_cell, **kwargs)
Use the :py:func:`calcparams_cec` function, the input parameters and ``self.module_parameters`` to calculate the module currents and resistances. Parameters ---------- effective_irradiance : numeric The irradiance (W/m2) that is converted to photocurrent. temp_cell : float or Series The average cell temperature of cells within a module in C. **kwargs See pvsystem.calcparams_cec for details Returns ------- See pvsystem.calcparams_cec for details
Below is the the instruction that describes the task: ### Input: Use the :py:func:`calcparams_cec` function, the input parameters and ``self.module_parameters`` to calculate the module currents and resistances. Parameters ---------- effective_irradiance : numeric The irradiance (W/m2) that is converted to photocurrent. temp_cell : float or Series The average cell temperature of cells within a module in C. **kwargs See pvsystem.calcparams_cec for details Returns ------- See pvsystem.calcparams_cec for details ### Response: def calcparams_cec(self, effective_irradiance, temp_cell, **kwargs): """ Use the :py:func:`calcparams_cec` function, the input parameters and ``self.module_parameters`` to calculate the module currents and resistances. Parameters ---------- effective_irradiance : numeric The irradiance (W/m2) that is converted to photocurrent. temp_cell : float or Series The average cell temperature of cells within a module in C. **kwargs See pvsystem.calcparams_cec for details Returns ------- See pvsystem.calcparams_cec for details """ kwargs = _build_kwargs(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref', 'R_s', 'alpha_sc', 'Adjust', 'EgRef', 'dEgdT', 'irrad_ref', 'temp_ref'], self.module_parameters) return calcparams_cec(effective_irradiance, temp_cell, **kwargs)
def clone(self, folder, git_repository): """Ensures theme destination folder and clone git specified repo in it. :param git_repository: git url of the theme folder :param folder: path of the git managed theme folder """ os.makedirs(folder) git.Git().clone(git_repository, folder)
Ensures theme destination folder and clone git specified repo in it. :param git_repository: git url of the theme folder :param folder: path of the git managed theme folder
Below is the the instruction that describes the task: ### Input: Ensures theme destination folder and clone git specified repo in it. :param git_repository: git url of the theme folder :param folder: path of the git managed theme folder ### Response: def clone(self, folder, git_repository): """Ensures theme destination folder and clone git specified repo in it. :param git_repository: git url of the theme folder :param folder: path of the git managed theme folder """ os.makedirs(folder) git.Git().clone(git_repository, folder)
def slice_list(in_list, lens): """Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list. """ if not isinstance(lens, list): raise TypeError('"indices" must be a list of integers') elif sum(lens) != len(in_list): raise ValueError( 'sum of lens and list length does not match: {} != {}'.format( sum(lens), len(in_list))) out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:idx + lens[i]]) idx += lens[i] return out_list
Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list.
Below is the the instruction that describes the task: ### Input: Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list. ### Response: def slice_list(in_list, lens): """Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list. """ if not isinstance(lens, list): raise TypeError('"indices" must be a list of integers') elif sum(lens) != len(in_list): raise ValueError( 'sum of lens and list length does not match: {} != {}'.format( sum(lens), len(in_list))) out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:idx + lens[i]]) idx += lens[i] return out_list
def remove_compute(self, compute, **kwargs): """ Remove a 'compute' from the bundle :parameter str compute: name of the compute options :parameter **kwargs: any other tags to do the filter (except twig or context) :raise NotImplementedError: because it isn't """ kwargs['compute'] = compute kwargs['context'] = 'comute' self.remove_parameters_all(**kwargs)
Remove a 'compute' from the bundle :parameter str compute: name of the compute options :parameter **kwargs: any other tags to do the filter (except twig or context) :raise NotImplementedError: because it isn't
Below is the the instruction that describes the task: ### Input: Remove a 'compute' from the bundle :parameter str compute: name of the compute options :parameter **kwargs: any other tags to do the filter (except twig or context) :raise NotImplementedError: because it isn't ### Response: def remove_compute(self, compute, **kwargs): """ Remove a 'compute' from the bundle :parameter str compute: name of the compute options :parameter **kwargs: any other tags to do the filter (except twig or context) :raise NotImplementedError: because it isn't """ kwargs['compute'] = compute kwargs['context'] = 'comute' self.remove_parameters_all(**kwargs)
def _filesec(self, files=None): """ Returns fileSec Element containing all files grouped by use. """ if files is None: files = self.all_files() filesec = etree.Element(utils.lxmlns("mets") + "fileSec") filegrps = {} for file_ in files: if file_.type.lower() not in ("item", AIP_ENTRY_TYPE): continue # Get fileGrp, or create if not exist filegrp = filegrps.get(file_.use) if filegrp is None: filegrp = etree.SubElement( filesec, utils.lxmlns("mets") + "fileGrp", USE=file_.use ) filegrps[file_.use] = filegrp file_el = file_.serialize_filesec() if file_el is not None: filegrp.append(file_el) return filesec
Returns fileSec Element containing all files grouped by use.
Below is the the instruction that describes the task: ### Input: Returns fileSec Element containing all files grouped by use. ### Response: def _filesec(self, files=None): """ Returns fileSec Element containing all files grouped by use. """ if files is None: files = self.all_files() filesec = etree.Element(utils.lxmlns("mets") + "fileSec") filegrps = {} for file_ in files: if file_.type.lower() not in ("item", AIP_ENTRY_TYPE): continue # Get fileGrp, or create if not exist filegrp = filegrps.get(file_.use) if filegrp is None: filegrp = etree.SubElement( filesec, utils.lxmlns("mets") + "fileGrp", USE=file_.use ) filegrps[file_.use] = filegrp file_el = file_.serialize_filesec() if file_el is not None: filegrp.append(file_el) return filesec
def subn_filter(s, find, replace, count=0): """A non-optimal implementation of a regex filter""" return re.gsub(find, replace, count, s)
A non-optimal implementation of a regex filter
Below is the the instruction that describes the task: ### Input: A non-optimal implementation of a regex filter ### Response: def subn_filter(s, find, replace, count=0): """A non-optimal implementation of a regex filter""" return re.gsub(find, replace, count, s)
def get_seebeck(self, output='eigs', doping_levels=True): """ Gives the seebeck coefficient (microV/K) in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to Seebeck at p-type doping and 'n' to the Seebeck at n-type doping. Otherwise, returns a {temp:[]} dictionary The result contains either the sorted three eigenvalues of the symmetric Seebeck tensor (output='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). units are microV/K """ return BoltztrapAnalyzer._format_to_output(self._seebeck, self._seebeck_doping, output, doping_levels, 1e6)
Gives the seebeck coefficient (microV/K) in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to Seebeck at p-type doping and 'n' to the Seebeck at n-type doping. Otherwise, returns a {temp:[]} dictionary The result contains either the sorted three eigenvalues of the symmetric Seebeck tensor (output='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). units are microV/K
Below is the the instruction that describes the task: ### Input: Gives the seebeck coefficient (microV/K) in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to Seebeck at p-type doping and 'n' to the Seebeck at n-type doping. Otherwise, returns a {temp:[]} dictionary The result contains either the sorted three eigenvalues of the symmetric Seebeck tensor (output='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). units are microV/K ### Response: def get_seebeck(self, output='eigs', doping_levels=True): """ Gives the seebeck coefficient (microV/K) in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to Seebeck at p-type doping and 'n' to the Seebeck at n-type doping. Otherwise, returns a {temp:[]} dictionary The result contains either the sorted three eigenvalues of the symmetric Seebeck tensor (output='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). units are microV/K """ return BoltztrapAnalyzer._format_to_output(self._seebeck, self._seebeck_doping, output, doping_levels, 1e6)
def _slugify(text, delim=u'-'): """Generates an ASCII-only slug.""" result = [] for word in _punct_re.split(text.lower()): word = word.encode('utf-8') if word: result.append(word) slugified = delim.join([i.decode('utf-8') for i in result]) return re.sub('[^a-zA-Z0-9\\s\\-]{1}', replace_char, slugified).lower()
Generates an ASCII-only slug.
Below is the the instruction that describes the task: ### Input: Generates an ASCII-only slug. ### Response: def _slugify(text, delim=u'-'): """Generates an ASCII-only slug.""" result = [] for word in _punct_re.split(text.lower()): word = word.encode('utf-8') if word: result.append(word) slugified = delim.join([i.decode('utf-8') for i in result]) return re.sub('[^a-zA-Z0-9\\s\\-]{1}', replace_char, slugified).lower()
def int_gps_time_to_str(t): """Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError.""" if isinstance(t, int): return str(t) elif isinstance(t, float): # Wouldn't this just work generically? int_t = int(t) if abs(t - int_t) > 0.: raise ValueError('Need an integer GPS time, got %s' % str(t)) return str(int_t) elif isinstance(t, lal.LIGOTimeGPS): if t.gpsNanoSeconds == 0: return str(t.gpsSeconds) else: raise ValueError('Need an integer GPS time, got %s' % str(t)) else: err_msg = "Didn't understand input type {}".format(type(t)) raise ValueError(err_msg)
Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError.
Below is the the instruction that describes the task: ### Input: Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError. ### Response: def int_gps_time_to_str(t): """Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError.""" if isinstance(t, int): return str(t) elif isinstance(t, float): # Wouldn't this just work generically? int_t = int(t) if abs(t - int_t) > 0.: raise ValueError('Need an integer GPS time, got %s' % str(t)) return str(int_t) elif isinstance(t, lal.LIGOTimeGPS): if t.gpsNanoSeconds == 0: return str(t.gpsSeconds) else: raise ValueError('Need an integer GPS time, got %s' % str(t)) else: err_msg = "Didn't understand input type {}".format(type(t)) raise ValueError(err_msg)
def instantiate_client(self, config): """ :param config: The config object. :type config: dict :return: The instantiated class. :rtype: :class:`revision.client.Client` """ modules = config.module.split('.') class_name = modules.pop() module_path = '.'.join(modules) client_instance = getattr( __import__(module_path, {}, {}, ['']), class_name )() client_instance.add_config(config) return client_instance
:param config: The config object. :type config: dict :return: The instantiated class. :rtype: :class:`revision.client.Client`
Below is the the instruction that describes the task: ### Input: :param config: The config object. :type config: dict :return: The instantiated class. :rtype: :class:`revision.client.Client` ### Response: def instantiate_client(self, config): """ :param config: The config object. :type config: dict :return: The instantiated class. :rtype: :class:`revision.client.Client` """ modules = config.module.split('.') class_name = modules.pop() module_path = '.'.join(modules) client_instance = getattr( __import__(module_path, {}, {}, ['']), class_name )() client_instance.add_config(config) return client_instance
def _parse_input(incoming): """Performs the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. Positional arguments: incoming -- the input unicode value. Returns: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors. """ codes = dict((k, v) for k, v in _AutoCodes().items() if '{%s}' % k in incoming) color_codes = dict((k, '' if _AutoCodes.DISABLE_COLORS else '\033[{0}m'.format(v)) for k, v in codes.items()) incoming_padded = _pad_input(incoming) output_colors = incoming_padded.format(**color_codes) # Simplify: '{b}{red}' -> '\033[1m\033[31m' -> '\033[1;31m' groups = sorted(set(_RE_GROUP_SEARCH.findall(output_colors)), key=len, reverse=True) # Get codes, grouped adjacent. groups_simplified = [[x for n in _RE_NUMBER_SEARCH.findall(i) for x in n.split(';')] for i in groups] groups_compiled = ['\033[{0}m'.format(';'.join(g)) for g in groups_simplified] # Final codes. assert len(groups_compiled) == len(groups) # For testing. output_colors_simplified = output_colors for i in range(len(groups)): output_colors_simplified = output_colors_simplified.replace(groups[i], groups_compiled[i]) output_no_colors = _RE_SPLIT.sub('', output_colors_simplified) # Strip any remaining color codes. if _AutoCodes.DISABLE_COLORS: output_colors_simplified = _RE_NUMBER_SEARCH.sub('', output_colors_simplified) return output_colors_simplified, output_no_colors
Performs the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. Positional arguments: incoming -- the input unicode value. Returns: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors.
Below is the the instruction that describes the task: ### Input: Performs the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. Positional arguments: incoming -- the input unicode value. Returns: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors. ### Response: def _parse_input(incoming): """Performs the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. Positional arguments: incoming -- the input unicode value. Returns: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors. """ codes = dict((k, v) for k, v in _AutoCodes().items() if '{%s}' % k in incoming) color_codes = dict((k, '' if _AutoCodes.DISABLE_COLORS else '\033[{0}m'.format(v)) for k, v in codes.items()) incoming_padded = _pad_input(incoming) output_colors = incoming_padded.format(**color_codes) # Simplify: '{b}{red}' -> '\033[1m\033[31m' -> '\033[1;31m' groups = sorted(set(_RE_GROUP_SEARCH.findall(output_colors)), key=len, reverse=True) # Get codes, grouped adjacent. groups_simplified = [[x for n in _RE_NUMBER_SEARCH.findall(i) for x in n.split(';')] for i in groups] groups_compiled = ['\033[{0}m'.format(';'.join(g)) for g in groups_simplified] # Final codes. assert len(groups_compiled) == len(groups) # For testing. output_colors_simplified = output_colors for i in range(len(groups)): output_colors_simplified = output_colors_simplified.replace(groups[i], groups_compiled[i]) output_no_colors = _RE_SPLIT.sub('', output_colors_simplified) # Strip any remaining color codes. if _AutoCodes.DISABLE_COLORS: output_colors_simplified = _RE_NUMBER_SEARCH.sub('', output_colors_simplified) return output_colors_simplified, output_no_colors
async def set_avatar(self, url: str): """ Set the avatar of the user. See also: `API reference`_ Args: url: The new avatar URL for the user. Must be a MXC URI. .. _API reference: https://matrix.org/docs/spec/client_server/r0.3.0.html#put-matrix-client-r0-profile-userid-avatar-url """ await self.ensure_registered() content = {"avatar_url": url} await self.client.request("PUT", f"/profile/{self.mxid}/avatar_url", content)
Set the avatar of the user. See also: `API reference`_ Args: url: The new avatar URL for the user. Must be a MXC URI. .. _API reference: https://matrix.org/docs/spec/client_server/r0.3.0.html#put-matrix-client-r0-profile-userid-avatar-url
Below is the the instruction that describes the task: ### Input: Set the avatar of the user. See also: `API reference`_ Args: url: The new avatar URL for the user. Must be a MXC URI. .. _API reference: https://matrix.org/docs/spec/client_server/r0.3.0.html#put-matrix-client-r0-profile-userid-avatar-url ### Response: async def set_avatar(self, url: str): """ Set the avatar of the user. See also: `API reference`_ Args: url: The new avatar URL for the user. Must be a MXC URI. .. _API reference: https://matrix.org/docs/spec/client_server/r0.3.0.html#put-matrix-client-r0-profile-userid-avatar-url """ await self.ensure_registered() content = {"avatar_url": url} await self.client.request("PUT", f"/profile/{self.mxid}/avatar_url", content)
def get_rendered_fields(self, ctx=None): ''' :param ctx: rendering context in which the method was called :return: ordered list of the fields that will be rendered ''' if ctx is None: ctx = RenderContext() ctx.push(self) current = self._fields[self._field_idx] res = current.get_rendered_fields(ctx) ctx.pop() return res
:param ctx: rendering context in which the method was called :return: ordered list of the fields that will be rendered
Below is the the instruction that describes the task: ### Input: :param ctx: rendering context in which the method was called :return: ordered list of the fields that will be rendered ### Response: def get_rendered_fields(self, ctx=None): ''' :param ctx: rendering context in which the method was called :return: ordered list of the fields that will be rendered ''' if ctx is None: ctx = RenderContext() ctx.push(self) current = self._fields[self._field_idx] res = current.get_rendered_fields(ctx) ctx.pop() return res
def get_dynamical_matrix_at_q(self, q): """Calculate dynamical matrix at a given q-point Parameters ---------- q: array_like A q-vector. shape=(3,), dtype='double' Returns ------- dynamical_matrix: ndarray Dynamical matrix. shape=(bands, bands), dtype='complex' """ self._set_dynamical_matrix() if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) self._dynamical_matrix.set_dynamical_matrix(q) return self._dynamical_matrix.get_dynamical_matrix()
Calculate dynamical matrix at a given q-point Parameters ---------- q: array_like A q-vector. shape=(3,), dtype='double' Returns ------- dynamical_matrix: ndarray Dynamical matrix. shape=(bands, bands), dtype='complex'
Below is the the instruction that describes the task: ### Input: Calculate dynamical matrix at a given q-point Parameters ---------- q: array_like A q-vector. shape=(3,), dtype='double' Returns ------- dynamical_matrix: ndarray Dynamical matrix. shape=(bands, bands), dtype='complex' ### Response: def get_dynamical_matrix_at_q(self, q): """Calculate dynamical matrix at a given q-point Parameters ---------- q: array_like A q-vector. shape=(3,), dtype='double' Returns ------- dynamical_matrix: ndarray Dynamical matrix. shape=(bands, bands), dtype='complex' """ self._set_dynamical_matrix() if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) self._dynamical_matrix.set_dynamical_matrix(q) return self._dynamical_matrix.get_dynamical_matrix()
def _parse_date(date): """Parse from the user input `date`. e.g. current year 2016: input 6-26, 626, ... return 2016626 input 2016-6-26, 2016/6/26, ... retrun 2016626 This fn wouldn't check the date, it only gather the number as a string. """ result = ''.join(re.findall('\d', date)) l = len(result) # User only input month and day, eg 6-1, 6.26, 0626... if l in (2, 3, 4): year = str(datetime.today().year) return year + result # User input full format date, eg 201661, 2016-6-26, 20160626... if l in (6, 7, 8): return result return ''
Parse from the user input `date`. e.g. current year 2016: input 6-26, 626, ... return 2016626 input 2016-6-26, 2016/6/26, ... retrun 2016626 This fn wouldn't check the date, it only gather the number as a string.
Below is the the instruction that describes the task: ### Input: Parse from the user input `date`. e.g. current year 2016: input 6-26, 626, ... return 2016626 input 2016-6-26, 2016/6/26, ... retrun 2016626 This fn wouldn't check the date, it only gather the number as a string. ### Response: def _parse_date(date): """Parse from the user input `date`. e.g. current year 2016: input 6-26, 626, ... return 2016626 input 2016-6-26, 2016/6/26, ... retrun 2016626 This fn wouldn't check the date, it only gather the number as a string. """ result = ''.join(re.findall('\d', date)) l = len(result) # User only input month and day, eg 6-1, 6.26, 0626... if l in (2, 3, 4): year = str(datetime.today().year) return year + result # User input full format date, eg 201661, 2016-6-26, 20160626... if l in (6, 7, 8): return result return ''
def get_secret(secret_name, default=None): """ Gets contents of secret file :param secret_name: The name of the secret present in BANANAS_SECRETS_DIR :param default: Default value to return if no secret was found :return: The secret or default if not found """ secrets_dir = get_secrets_dir() secret_path = os.path.join(secrets_dir, secret_name) try: with open(secret_path, "r") as secret_file: return secret_file.read() except OSError: return default
Gets contents of secret file :param secret_name: The name of the secret present in BANANAS_SECRETS_DIR :param default: Default value to return if no secret was found :return: The secret or default if not found
Below is the the instruction that describes the task: ### Input: Gets contents of secret file :param secret_name: The name of the secret present in BANANAS_SECRETS_DIR :param default: Default value to return if no secret was found :return: The secret or default if not found ### Response: def get_secret(secret_name, default=None): """ Gets contents of secret file :param secret_name: The name of the secret present in BANANAS_SECRETS_DIR :param default: Default value to return if no secret was found :return: The secret or default if not found """ secrets_dir = get_secrets_dir() secret_path = os.path.join(secrets_dir, secret_name) try: with open(secret_path, "r") as secret_file: return secret_file.read() except OSError: return default
def terminate_all(self): """Terminate all worker processes.""" for worker in self._workers: worker.terminate() # for thread in self._threads: # try: # thread.terminate() # thread.wait() # except Exception: # pass self._queue_workers = deque()
Terminate all worker processes.
Below is the the instruction that describes the task: ### Input: Terminate all worker processes. ### Response: def terminate_all(self): """Terminate all worker processes.""" for worker in self._workers: worker.terminate() # for thread in self._threads: # try: # thread.terminate() # thread.wait() # except Exception: # pass self._queue_workers = deque()
def plot_coordinates(network, pores=None, fig=None, **kwargs): r""" Produces a 3D plot showing specified pore coordinates as markers Parameters ---------- network : OpenPNM Network Object The network whose topological connections to plot pores : array_like (optional) The list of pores to plot if only a sub-sample is desired. This is useful for inspecting a small region of the network. If no pores are specified then all are shown. fig : Matplotlib figure handle If a ``fig`` is supplied, then the coordinates will be overlaid. This enables the plotting of multiple different sets of pores as well as throat connections from ``plot_connections``. kwargs : dict By also in different marker properties such as size (``s``) and color (``c``). For information on available marker style options, visit the Matplotlib documentation on the `web <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_ Notes ----- The figure handle returned by this method can be passed into ``plot_topology`` to create a plot that combines pore coordinates and throat connections, and vice versa. See Also -------- plot_connections Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[10, 10, 3]) >>> pn.add_boundary_pores() >>> Ps = pn.pores('internal') >>> # Create figure showing internal pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, c='b') >>> Ps = pn.pores('*boundary') >>> # Pass existing fig back into function to plot boundary pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, fig=fig, ... c='r') """ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D if pores is None: Ps = network.Ps else: Ps = network._parse_indices(indices=pores) if len(sp.unique(network['pore.coords'][:, 2])) == 1: ThreeD = False else: ThreeD = True if fig is None: fig = plt.figure() if ThreeD: ax = fig.add_subplot(111, projection='3d') else: ax = fig.add_subplot(111) else: ax = fig.gca() # Collect specified coordinates X = network['pore.coords'][Ps, 0] Y = network['pore.coords'][Ps, 1] Z = network['pore.coords'][Ps, 2] if ThreeD: _scale_3d_axes(ax=ax, X=X, Y=Y, Z=Z) if ThreeD: ax.scatter(xs=X, ys=Y, zs=Z, **kwargs) else: ax.scatter(X, Y, **kwargs) return fig
r""" Produces a 3D plot showing specified pore coordinates as markers Parameters ---------- network : OpenPNM Network Object The network whose topological connections to plot pores : array_like (optional) The list of pores to plot if only a sub-sample is desired. This is useful for inspecting a small region of the network. If no pores are specified then all are shown. fig : Matplotlib figure handle If a ``fig`` is supplied, then the coordinates will be overlaid. This enables the plotting of multiple different sets of pores as well as throat connections from ``plot_connections``. kwargs : dict By also in different marker properties such as size (``s``) and color (``c``). For information on available marker style options, visit the Matplotlib documentation on the `web <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_ Notes ----- The figure handle returned by this method can be passed into ``plot_topology`` to create a plot that combines pore coordinates and throat connections, and vice versa. See Also -------- plot_connections Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[10, 10, 3]) >>> pn.add_boundary_pores() >>> Ps = pn.pores('internal') >>> # Create figure showing internal pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, c='b') >>> Ps = pn.pores('*boundary') >>> # Pass existing fig back into function to plot boundary pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, fig=fig, ... c='r')
Below is the the instruction that describes the task: ### Input: r""" Produces a 3D plot showing specified pore coordinates as markers Parameters ---------- network : OpenPNM Network Object The network whose topological connections to plot pores : array_like (optional) The list of pores to plot if only a sub-sample is desired. This is useful for inspecting a small region of the network. If no pores are specified then all are shown. fig : Matplotlib figure handle If a ``fig`` is supplied, then the coordinates will be overlaid. This enables the plotting of multiple different sets of pores as well as throat connections from ``plot_connections``. kwargs : dict By also in different marker properties such as size (``s``) and color (``c``). For information on available marker style options, visit the Matplotlib documentation on the `web <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_ Notes ----- The figure handle returned by this method can be passed into ``plot_topology`` to create a plot that combines pore coordinates and throat connections, and vice versa. See Also -------- plot_connections Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[10, 10, 3]) >>> pn.add_boundary_pores() >>> Ps = pn.pores('internal') >>> # Create figure showing internal pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, c='b') >>> Ps = pn.pores('*boundary') >>> # Pass existing fig back into function to plot boundary pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, fig=fig, ... c='r') ### Response: def plot_coordinates(network, pores=None, fig=None, **kwargs): r""" Produces a 3D plot showing specified pore coordinates as markers Parameters ---------- network : OpenPNM Network Object The network whose topological connections to plot pores : array_like (optional) The list of pores to plot if only a sub-sample is desired. This is useful for inspecting a small region of the network. If no pores are specified then all are shown. fig : Matplotlib figure handle If a ``fig`` is supplied, then the coordinates will be overlaid. This enables the plotting of multiple different sets of pores as well as throat connections from ``plot_connections``. kwargs : dict By also in different marker properties such as size (``s``) and color (``c``). For information on available marker style options, visit the Matplotlib documentation on the `web <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_ Notes ----- The figure handle returned by this method can be passed into ``plot_topology`` to create a plot that combines pore coordinates and throat connections, and vice versa. See Also -------- plot_connections Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[10, 10, 3]) >>> pn.add_boundary_pores() >>> Ps = pn.pores('internal') >>> # Create figure showing internal pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, c='b') >>> Ps = pn.pores('*boundary') >>> # Pass existing fig back into function to plot boundary pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, fig=fig, ... c='r') """ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D if pores is None: Ps = network.Ps else: Ps = network._parse_indices(indices=pores) if len(sp.unique(network['pore.coords'][:, 2])) == 1: ThreeD = False else: ThreeD = True if fig is None: fig = plt.figure() if ThreeD: ax = fig.add_subplot(111, projection='3d') else: ax = fig.add_subplot(111) else: ax = fig.gca() # Collect specified coordinates X = network['pore.coords'][Ps, 0] Y = network['pore.coords'][Ps, 1] Z = network['pore.coords'][Ps, 2] if ThreeD: _scale_3d_axes(ax=ax, X=X, Y=Y, Z=Z) if ThreeD: ax.scatter(xs=X, ys=Y, zs=Z, **kwargs) else: ax.scatter(X, Y, **kwargs) return fig
def corrections(self, word, prefix=1, distance=2): """Get corrections for word, if word is an invalid word. :prefix: is the number of characters the prefix of the word must have in common with the suggested corrections. :distance: is the character distance the corrections may have between the input word. This limits the number of available corrections but decreases the correction search space. The return value of this function is a Result tuple, with the :valid: member indicating whether the input word is a valid one and :suggestions: member containing a list of suggestions. """ if word not in self._words: return Dictionary.Result(False, self._corrector.suggest(word, prefix=prefix, maxdist=distance)) else: return Dictionary.Result(True, list())
Get corrections for word, if word is an invalid word. :prefix: is the number of characters the prefix of the word must have in common with the suggested corrections. :distance: is the character distance the corrections may have between the input word. This limits the number of available corrections but decreases the correction search space. The return value of this function is a Result tuple, with the :valid: member indicating whether the input word is a valid one and :suggestions: member containing a list of suggestions.
Below is the the instruction that describes the task: ### Input: Get corrections for word, if word is an invalid word. :prefix: is the number of characters the prefix of the word must have in common with the suggested corrections. :distance: is the character distance the corrections may have between the input word. This limits the number of available corrections but decreases the correction search space. The return value of this function is a Result tuple, with the :valid: member indicating whether the input word is a valid one and :suggestions: member containing a list of suggestions. ### Response: def corrections(self, word, prefix=1, distance=2): """Get corrections for word, if word is an invalid word. :prefix: is the number of characters the prefix of the word must have in common with the suggested corrections. :distance: is the character distance the corrections may have between the input word. This limits the number of available corrections but decreases the correction search space. The return value of this function is a Result tuple, with the :valid: member indicating whether the input word is a valid one and :suggestions: member containing a list of suggestions. """ if word not in self._words: return Dictionary.Result(False, self._corrector.suggest(word, prefix=prefix, maxdist=distance)) else: return Dictionary.Result(True, list())
def mgd(self, mgdid=None, hgnc_symbol=None, hgnc_identifier=None, limit=None, as_df=False): """Method to query :class:`.models.MGD` objects in database :param mgdid: Mouse genome informatics database ID(s) :type mgdid: str or tuple(str) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.MGD`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.MGD`) or :class:`pandas.DataFrame` """ q = self.session.query(models.MGD) model_queries_config = ( (mgdid, models.MGD.mgdid), ) q = self.get_model_queries(q, model_queries_config) many_to_many_queries_config = ( (hgnc_symbol, models.MGD.hgncs, models.HGNC.symbol), (hgnc_identifier, models.MGD.hgncs, models.HGNC.identifier), ) q = self.get_many_to_many_queries(q, many_to_many_queries_config) return self._limit_and_df(q, limit, as_df)
Method to query :class:`.models.MGD` objects in database :param mgdid: Mouse genome informatics database ID(s) :type mgdid: str or tuple(str) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.MGD`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.MGD`) or :class:`pandas.DataFrame`
Below is the the instruction that describes the task: ### Input: Method to query :class:`.models.MGD` objects in database :param mgdid: Mouse genome informatics database ID(s) :type mgdid: str or tuple(str) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.MGD`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.MGD`) or :class:`pandas.DataFrame` ### Response: def mgd(self, mgdid=None, hgnc_symbol=None, hgnc_identifier=None, limit=None, as_df=False): """Method to query :class:`.models.MGD` objects in database :param mgdid: Mouse genome informatics database ID(s) :type mgdid: str or tuple(str) or None :param hgnc_symbol: HGNC symbol(s) :type hgnc_symbol: str or tuple(str) or None :param hgnc_identifier: identifiers(s) in :class:`.models.HGNC` :type hgnc_identifier: int or tuple(int) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.MGD`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.MGD`) or :class:`pandas.DataFrame` """ q = self.session.query(models.MGD) model_queries_config = ( (mgdid, models.MGD.mgdid), ) q = self.get_model_queries(q, model_queries_config) many_to_many_queries_config = ( (hgnc_symbol, models.MGD.hgncs, models.HGNC.symbol), (hgnc_identifier, models.MGD.hgncs, models.HGNC.identifier), ) q = self.get_many_to_many_queries(q, many_to_many_queries_config) return self._limit_and_df(q, limit, as_df)
def GetPatternAs(self, patternId: int, riid): """ Call IUIAutomationElement::GetCurrentPatternAs. Get a new pattern by pattern id if it supports the pattern, todo. patternId: int, a value in class `PatternId`. riid: GUID. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpatternas """ return self.Element.GetCurrentPatternAs(patternId, riid)
Call IUIAutomationElement::GetCurrentPatternAs. Get a new pattern by pattern id if it supports the pattern, todo. patternId: int, a value in class `PatternId`. riid: GUID. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpatternas
Below is the the instruction that describes the task: ### Input: Call IUIAutomationElement::GetCurrentPatternAs. Get a new pattern by pattern id if it supports the pattern, todo. patternId: int, a value in class `PatternId`. riid: GUID. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpatternas ### Response: def GetPatternAs(self, patternId: int, riid): """ Call IUIAutomationElement::GetCurrentPatternAs. Get a new pattern by pattern id if it supports the pattern, todo. patternId: int, a value in class `PatternId`. riid: GUID. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpatternas """ return self.Element.GetCurrentPatternAs(patternId, riid)
def pmap(f, iterable, n=None, dummy=False, p=None): """ parallel map of a function to an iterable if each item in iterable is itself an iterable, then automatically call f(*item) instead of f(item) Arguments: f: function iterable: any iterable where each item is sent to f n: number of cpus (default is number on machine) dummy: use dummy pool. p: existing pool to re-use """ # make it easier to debug. if n == 1: for r in it.starmap(f, iterable): yield r raise StopIteration if p is None: po = pool(n, dummy) else: po = p assert hasattr(po, 'imap') f = _func_star(f) try: for r in po.imap(f, iterable): yield r # explicitly clean up created pool finally: if p is None: try: po.close() po.join() except: pass
parallel map of a function to an iterable if each item in iterable is itself an iterable, then automatically call f(*item) instead of f(item) Arguments: f: function iterable: any iterable where each item is sent to f n: number of cpus (default is number on machine) dummy: use dummy pool. p: existing pool to re-use
Below is the the instruction that describes the task: ### Input: parallel map of a function to an iterable if each item in iterable is itself an iterable, then automatically call f(*item) instead of f(item) Arguments: f: function iterable: any iterable where each item is sent to f n: number of cpus (default is number on machine) dummy: use dummy pool. p: existing pool to re-use ### Response: def pmap(f, iterable, n=None, dummy=False, p=None): """ parallel map of a function to an iterable if each item in iterable is itself an iterable, then automatically call f(*item) instead of f(item) Arguments: f: function iterable: any iterable where each item is sent to f n: number of cpus (default is number on machine) dummy: use dummy pool. p: existing pool to re-use """ # make it easier to debug. if n == 1: for r in it.starmap(f, iterable): yield r raise StopIteration if p is None: po = pool(n, dummy) else: po = p assert hasattr(po, 'imap') f = _func_star(f) try: for r in po.imap(f, iterable): yield r # explicitly clean up created pool finally: if p is None: try: po.close() po.join() except: pass
def write_graph(self, filename): """ Write raw graph data which can be post-processed using graphviz. """ f = open(filename, 'w') f.write(self._get_graphviz_data()) f.close()
Write raw graph data which can be post-processed using graphviz.
Below is the the instruction that describes the task: ### Input: Write raw graph data which can be post-processed using graphviz. ### Response: def write_graph(self, filename): """ Write raw graph data which can be post-processed using graphviz. """ f = open(filename, 'w') f.write(self._get_graphviz_data()) f.close()
def call_service(self, service_id, action): """Call a Vera service. This will call the Vera api to change device state. """ result = self.vera_request(id='action', serviceId=service_id, action=action) logger.debug("call_service: " "result of vera_request with id %s: %s", service_id, result.text) return result
Call a Vera service. This will call the Vera api to change device state.
Below is the the instruction that describes the task: ### Input: Call a Vera service. This will call the Vera api to change device state. ### Response: def call_service(self, service_id, action): """Call a Vera service. This will call the Vera api to change device state. """ result = self.vera_request(id='action', serviceId=service_id, action=action) logger.debug("call_service: " "result of vera_request with id %s: %s", service_id, result.text) return result
def textContent(self, text: str) -> None: # type: ignore """Set text content to inner node.""" if self._inner_element: self._inner_element.textContent = text else: # Need a trick to call property of super-class super().textContent = text
Set text content to inner node.
Below is the the instruction that describes the task: ### Input: Set text content to inner node. ### Response: def textContent(self, text: str) -> None: # type: ignore """Set text content to inner node.""" if self._inner_element: self._inner_element.textContent = text else: # Need a trick to call property of super-class super().textContent = text
def get_cache_key(page, language): """ Create the cache key for the current page and language """ from cms.cache import _get_cache_key try: site_id = page.node.site_id except AttributeError: # CMS_3_4 site_id = page.site_id return _get_cache_key('page_meta', page, language, site_id)
Create the cache key for the current page and language
Below is the the instruction that describes the task: ### Input: Create the cache key for the current page and language ### Response: def get_cache_key(page, language): """ Create the cache key for the current page and language """ from cms.cache import _get_cache_key try: site_id = page.node.site_id except AttributeError: # CMS_3_4 site_id = page.site_id return _get_cache_key('page_meta', page, language, site_id)
def geno(self, y, from_bounds=None, copy_if_changed=True, copy_always=False, repair=None, archive=None): """maps the phenotypic input argument into the genotypic space, that is, computes essentially the inverse of ``pheno``. By default a copy is made only to prevent to modify ``y``. The inverse of the user-defined transformation (if any) is only needed if external solutions are injected, it is not applied to the initial solution x0. Details ======= ``geno`` searches first in ``archive`` for the genotype of ``y`` and returns the found value, typically unrepaired. Otherwise, first ``from_bounds`` is applied, to revert a projection into the bound domain (if necessary) and ``pheno`` is reverted. ``repair`` is applied last, and is usually the method ``CMAEvolutionStrategy.repair_genotype`` that limits the Mahalanobis norm of ``geno(y) - mean``. """ if from_bounds is None: from_bounds = lambda x, copy=False: x # not change, no copy if archive is not None: try: x = archive[y]['geno'] except (KeyError, TypeError): x = None if x is not None: if archive[y]['iteration'] < archive.last_iteration \ and repair is not None: x = repair(x, copy_if_changed=copy_always) return x input_type = type(y) x = y if copy_always: x = array(y, copy=True) copy = False else: copy = copy_if_changed x = from_bounds(x, copy) if self.isidentity: if repair is not None: x = repair(x, copy) return x if copy: # could be improved? x = array(x, copy=True) copy = False # user-defined transformation if self.tf_geno is not None: x = array(self.tf_geno(x), copy=False) elif self.tf_pheno is not None: raise ValueError('t1 of options transformation was not defined but is needed as being the inverse of t0') # affine-linear transformation: shift and scaling if self.typical_x is not 0: x -= self.typical_x if self.scales is not 1: # just for efficiency x /= self.scales # kick out fixed_values if self.fixed_values is not None: # keeping the transformed values does not help much # therefore it is omitted if 1 < 3: keys = sorted(self.fixed_values.keys()) x = array([x[i] for i in xrange(len(x)) if i not in keys], copy=False) # repair injected solutions if repair is not None: x = repair(x, copy) if input_type is np.ndarray: x = array(x, copy=False) return x
maps the phenotypic input argument into the genotypic space, that is, computes essentially the inverse of ``pheno``. By default a copy is made only to prevent to modify ``y``. The inverse of the user-defined transformation (if any) is only needed if external solutions are injected, it is not applied to the initial solution x0. Details ======= ``geno`` searches first in ``archive`` for the genotype of ``y`` and returns the found value, typically unrepaired. Otherwise, first ``from_bounds`` is applied, to revert a projection into the bound domain (if necessary) and ``pheno`` is reverted. ``repair`` is applied last, and is usually the method ``CMAEvolutionStrategy.repair_genotype`` that limits the Mahalanobis norm of ``geno(y) - mean``.
Below is the the instruction that describes the task: ### Input: maps the phenotypic input argument into the genotypic space, that is, computes essentially the inverse of ``pheno``. By default a copy is made only to prevent to modify ``y``. The inverse of the user-defined transformation (if any) is only needed if external solutions are injected, it is not applied to the initial solution x0. Details ======= ``geno`` searches first in ``archive`` for the genotype of ``y`` and returns the found value, typically unrepaired. Otherwise, first ``from_bounds`` is applied, to revert a projection into the bound domain (if necessary) and ``pheno`` is reverted. ``repair`` is applied last, and is usually the method ``CMAEvolutionStrategy.repair_genotype`` that limits the Mahalanobis norm of ``geno(y) - mean``. ### Response: def geno(self, y, from_bounds=None, copy_if_changed=True, copy_always=False, repair=None, archive=None): """maps the phenotypic input argument into the genotypic space, that is, computes essentially the inverse of ``pheno``. By default a copy is made only to prevent to modify ``y``. The inverse of the user-defined transformation (if any) is only needed if external solutions are injected, it is not applied to the initial solution x0. Details ======= ``geno`` searches first in ``archive`` for the genotype of ``y`` and returns the found value, typically unrepaired. Otherwise, first ``from_bounds`` is applied, to revert a projection into the bound domain (if necessary) and ``pheno`` is reverted. ``repair`` is applied last, and is usually the method ``CMAEvolutionStrategy.repair_genotype`` that limits the Mahalanobis norm of ``geno(y) - mean``. """ if from_bounds is None: from_bounds = lambda x, copy=False: x # not change, no copy if archive is not None: try: x = archive[y]['geno'] except (KeyError, TypeError): x = None if x is not None: if archive[y]['iteration'] < archive.last_iteration \ and repair is not None: x = repair(x, copy_if_changed=copy_always) return x input_type = type(y) x = y if copy_always: x = array(y, copy=True) copy = False else: copy = copy_if_changed x = from_bounds(x, copy) if self.isidentity: if repair is not None: x = repair(x, copy) return x if copy: # could be improved? x = array(x, copy=True) copy = False # user-defined transformation if self.tf_geno is not None: x = array(self.tf_geno(x), copy=False) elif self.tf_pheno is not None: raise ValueError('t1 of options transformation was not defined but is needed as being the inverse of t0') # affine-linear transformation: shift and scaling if self.typical_x is not 0: x -= self.typical_x if self.scales is not 1: # just for efficiency x /= self.scales # kick out fixed_values if self.fixed_values is not None: # keeping the transformed values does not help much # therefore it is omitted if 1 < 3: keys = sorted(self.fixed_values.keys()) x = array([x[i] for i in xrange(len(x)) if i not in keys], copy=False) # repair injected solutions if repair is not None: x = repair(x, copy) if input_type is np.ndarray: x = array(x, copy=False) return x
def kernelparams(): ''' Return the kernel boot parameters ''' if salt.utils.platform.is_windows(): # TODO: add grains using `bcdedit /enum {current}` return {} else: try: with salt.utils.files.fopen('/proc/cmdline', 'r') as fhr: cmdline = fhr.read() grains = {'kernelparams': []} for data in [item.split('=') for item in salt.utils.args.shlex_split(cmdline)]: value = None if len(data) == 2: value = data[1].strip('"') grains['kernelparams'] += [(data[0], value)] except IOError as exc: grains = {} log.debug('Failed to read /proc/cmdline: %s', exc) return grains
Return the kernel boot parameters
Below is the the instruction that describes the task: ### Input: Return the kernel boot parameters ### Response: def kernelparams(): ''' Return the kernel boot parameters ''' if salt.utils.platform.is_windows(): # TODO: add grains using `bcdedit /enum {current}` return {} else: try: with salt.utils.files.fopen('/proc/cmdline', 'r') as fhr: cmdline = fhr.read() grains = {'kernelparams': []} for data in [item.split('=') for item in salt.utils.args.shlex_split(cmdline)]: value = None if len(data) == 2: value = data[1].strip('"') grains['kernelparams'] += [(data[0], value)] except IOError as exc: grains = {} log.debug('Failed to read /proc/cmdline: %s', exc) return grains
def setup_menu(self): """Setup context menu""" self.copy_action = create_action(self, _('Copy'), shortcut=keybinding('Copy'), icon=ima.icon('editcopy'), triggered=self.copy, context=Qt.WidgetShortcut) menu = QMenu(self) add_actions(menu, [self.copy_action, ]) return menu
Setup context menu
Below is the the instruction that describes the task: ### Input: Setup context menu ### Response: def setup_menu(self): """Setup context menu""" self.copy_action = create_action(self, _('Copy'), shortcut=keybinding('Copy'), icon=ima.icon('editcopy'), triggered=self.copy, context=Qt.WidgetShortcut) menu = QMenu(self) add_actions(menu, [self.copy_action, ]) return menu
def get_video_transcript_url(video_id, language_code): """ Returns course video transcript url or None if no transcript Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: language code of a video transcript """ video_transcript = VideoTranscript.get_or_none(video_id, language_code) if video_transcript: return video_transcript.url()
Returns course video transcript url or None if no transcript Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: language code of a video transcript
Below is the the instruction that describes the task: ### Input: Returns course video transcript url or None if no transcript Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: language code of a video transcript ### Response: def get_video_transcript_url(video_id, language_code): """ Returns course video transcript url or None if no transcript Arguments: video_id: it can be an edx_video_id or an external_id extracted from external sources in a video component. language_code: language code of a video transcript """ video_transcript = VideoTranscript.get_or_none(video_id, language_code) if video_transcript: return video_transcript.url()
def _find(self, url): """Return properties document for path.""" # Query the permanent view to find a url vr = self.db.view("properties/by_url", key=url, include_docs=True) _logger.debug("find(%r) returned %s" % (url, len(vr))) assert len(vr) <= 1, "Found multiple matches for %r" % url for row in vr: assert row.doc return row.doc return None
Return properties document for path.
Below is the the instruction that describes the task: ### Input: Return properties document for path. ### Response: def _find(self, url): """Return properties document for path.""" # Query the permanent view to find a url vr = self.db.view("properties/by_url", key=url, include_docs=True) _logger.debug("find(%r) returned %s" % (url, len(vr))) assert len(vr) <= 1, "Found multiple matches for %r" % url for row in vr: assert row.doc return row.doc return None
def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw): """ Saves configuration variables in this module's SETTINGS. access_token: project access token. Get this from the Rollbar UI: - click "Settings" in the top nav - click "Projects" in the left nav - copy-paste the appropriate token. environment: environment name. Can be any string; suggestions: 'production', 'development', 'staging', 'yourname' **kw: provided keyword arguments will override keys in SETTINGS. """ global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threads if scrub_fields is not None: SETTINGS['scrub_fields'] = list(scrub_fields) if url_fields is not None: SETTINGS['url_fields'] = list(url_fields) # Merge the extra config settings into SETTINGS SETTINGS = dict_merge(SETTINGS, kw) if _initialized: # NOTE: Temp solution to not being able to re-init. # New versions of pyrollbar will support re-initialization # via the (not-yet-implemented) configure() method. if not SETTINGS.get('suppress_reinit_warning'): log.warning('Rollbar already initialized. Ignoring re-init.') return SETTINGS['access_token'] = access_token SETTINGS['environment'] = environment if SETTINGS.get('allow_logging_basic_config'): logging.basicConfig() if SETTINGS.get('handler') == 'agent': agent_log = _create_agent_log() # We will perform these transforms in order: # 1. Serialize the payload to be all python built-in objects # 2. Scrub the payloads based on the key suffixes in SETTINGS['scrub_fields'] # 3. Scrub URLs in the payload for keys that end with 'url' # 4. Optional - If local variable gathering is enabled, transform the # trace frame values using the ShortReprTransform. _serialize_transform = SerializableTransform(safe_repr=SETTINGS['locals']['safe_repr'], whitelist_types=SETTINGS['locals']['whitelisted_types']) _transforms = [ ScrubRedactTransform(), _serialize_transform, ScrubTransform(suffixes=[(field,) for field in SETTINGS['scrub_fields']], redact_char='*'), ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['url_fields']], params_to_scrub=SETTINGS['scrub_fields']) ] # A list of key prefixes to apply our shortener transform to. The request # being included in the body key is old behavior and is being retained for # backwards compatibility. shortener_keys = [ ('request', 'POST'), ('request', 'json'), ('body', 'request', 'POST'), ('body', 'request', 'json'), ] if SETTINGS['locals']['enabled']: shortener_keys.append(('body', 'trace', 'frames', '*', 'code')) shortener_keys.append(('body', 'trace', 'frames', '*', 'args', '*')) shortener_keys.append(('body', 'trace', 'frames', '*', 'kwargs', '*')) shortener_keys.append(('body', 'trace', 'frames', '*', 'locals', '*')) shortener_keys.extend(SETTINGS['shortener_keys']) shortener = ShortenerTransform(safe_repr=SETTINGS['locals']['safe_repr'], keys=shortener_keys, **SETTINGS['locals']['sizes']) _transforms.append(shortener) _threads = queue.Queue() events.reset() filters.add_builtin_filters(SETTINGS) _initialized = True
Saves configuration variables in this module's SETTINGS. access_token: project access token. Get this from the Rollbar UI: - click "Settings" in the top nav - click "Projects" in the left nav - copy-paste the appropriate token. environment: environment name. Can be any string; suggestions: 'production', 'development', 'staging', 'yourname' **kw: provided keyword arguments will override keys in SETTINGS.
Below is the the instruction that describes the task: ### Input: Saves configuration variables in this module's SETTINGS. access_token: project access token. Get this from the Rollbar UI: - click "Settings" in the top nav - click "Projects" in the left nav - copy-paste the appropriate token. environment: environment name. Can be any string; suggestions: 'production', 'development', 'staging', 'yourname' **kw: provided keyword arguments will override keys in SETTINGS. ### Response: def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw): """ Saves configuration variables in this module's SETTINGS. access_token: project access token. Get this from the Rollbar UI: - click "Settings" in the top nav - click "Projects" in the left nav - copy-paste the appropriate token. environment: environment name. Can be any string; suggestions: 'production', 'development', 'staging', 'yourname' **kw: provided keyword arguments will override keys in SETTINGS. """ global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threads if scrub_fields is not None: SETTINGS['scrub_fields'] = list(scrub_fields) if url_fields is not None: SETTINGS['url_fields'] = list(url_fields) # Merge the extra config settings into SETTINGS SETTINGS = dict_merge(SETTINGS, kw) if _initialized: # NOTE: Temp solution to not being able to re-init. # New versions of pyrollbar will support re-initialization # via the (not-yet-implemented) configure() method. if not SETTINGS.get('suppress_reinit_warning'): log.warning('Rollbar already initialized. Ignoring re-init.') return SETTINGS['access_token'] = access_token SETTINGS['environment'] = environment if SETTINGS.get('allow_logging_basic_config'): logging.basicConfig() if SETTINGS.get('handler') == 'agent': agent_log = _create_agent_log() # We will perform these transforms in order: # 1. Serialize the payload to be all python built-in objects # 2. Scrub the payloads based on the key suffixes in SETTINGS['scrub_fields'] # 3. Scrub URLs in the payload for keys that end with 'url' # 4. Optional - If local variable gathering is enabled, transform the # trace frame values using the ShortReprTransform. _serialize_transform = SerializableTransform(safe_repr=SETTINGS['locals']['safe_repr'], whitelist_types=SETTINGS['locals']['whitelisted_types']) _transforms = [ ScrubRedactTransform(), _serialize_transform, ScrubTransform(suffixes=[(field,) for field in SETTINGS['scrub_fields']], redact_char='*'), ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['url_fields']], params_to_scrub=SETTINGS['scrub_fields']) ] # A list of key prefixes to apply our shortener transform to. The request # being included in the body key is old behavior and is being retained for # backwards compatibility. shortener_keys = [ ('request', 'POST'), ('request', 'json'), ('body', 'request', 'POST'), ('body', 'request', 'json'), ] if SETTINGS['locals']['enabled']: shortener_keys.append(('body', 'trace', 'frames', '*', 'code')) shortener_keys.append(('body', 'trace', 'frames', '*', 'args', '*')) shortener_keys.append(('body', 'trace', 'frames', '*', 'kwargs', '*')) shortener_keys.append(('body', 'trace', 'frames', '*', 'locals', '*')) shortener_keys.extend(SETTINGS['shortener_keys']) shortener = ShortenerTransform(safe_repr=SETTINGS['locals']['safe_repr'], keys=shortener_keys, **SETTINGS['locals']['sizes']) _transforms.append(shortener) _threads = queue.Queue() events.reset() filters.add_builtin_filters(SETTINGS) _initialized = True
def set_wd_mult(self, args_wd_mult): """Sets an individual weight decay multiplier for each parameter. By default, if `param_idx2name` was provided in the constructor, the weight decay multipler is set as 0 for all parameters whose name don't end with ``_weight`` or ``_gamma``. .. note:: The default weight decay multiplier for a `Variable` can be set with its `wd_mult` argument in the constructor. Parameters ---------- args_wd_mult : dict of string/int to float For each of its key-value entries, the weight decay multipler for the parameter specified in the key will be set as the given value. You can specify the parameter with either its name or its index. If you use the name, you should pass `sym` in the constructor, and the name you specified in the key of `args_lr_mult` should match the name of the parameter in `sym`. If you use the index, it should correspond to the index of the parameter used in the `update` method. Specifying a parameter by its index is only supported for backward compatibility, and we recommend to use the name instead. """ self.wd_mult = {} for n in self.idx2name.values(): if not (n.endswith('_weight') or n.endswith('_gamma')): self.wd_mult[n] = 0.0 if self.sym_info: attr, arg_names = self.sym_info for name in arg_names: if name in attr and '__wd_mult__' in attr[name]: self.wd_mult[name] = float(attr[name]['__wd_mult__']) self.wd_mult.update(args_wd_mult)
Sets an individual weight decay multiplier for each parameter. By default, if `param_idx2name` was provided in the constructor, the weight decay multipler is set as 0 for all parameters whose name don't end with ``_weight`` or ``_gamma``. .. note:: The default weight decay multiplier for a `Variable` can be set with its `wd_mult` argument in the constructor. Parameters ---------- args_wd_mult : dict of string/int to float For each of its key-value entries, the weight decay multipler for the parameter specified in the key will be set as the given value. You can specify the parameter with either its name or its index. If you use the name, you should pass `sym` in the constructor, and the name you specified in the key of `args_lr_mult` should match the name of the parameter in `sym`. If you use the index, it should correspond to the index of the parameter used in the `update` method. Specifying a parameter by its index is only supported for backward compatibility, and we recommend to use the name instead.
Below is the the instruction that describes the task: ### Input: Sets an individual weight decay multiplier for each parameter. By default, if `param_idx2name` was provided in the constructor, the weight decay multipler is set as 0 for all parameters whose name don't end with ``_weight`` or ``_gamma``. .. note:: The default weight decay multiplier for a `Variable` can be set with its `wd_mult` argument in the constructor. Parameters ---------- args_wd_mult : dict of string/int to float For each of its key-value entries, the weight decay multipler for the parameter specified in the key will be set as the given value. You can specify the parameter with either its name or its index. If you use the name, you should pass `sym` in the constructor, and the name you specified in the key of `args_lr_mult` should match the name of the parameter in `sym`. If you use the index, it should correspond to the index of the parameter used in the `update` method. Specifying a parameter by its index is only supported for backward compatibility, and we recommend to use the name instead. ### Response: def set_wd_mult(self, args_wd_mult): """Sets an individual weight decay multiplier for each parameter. By default, if `param_idx2name` was provided in the constructor, the weight decay multipler is set as 0 for all parameters whose name don't end with ``_weight`` or ``_gamma``. .. note:: The default weight decay multiplier for a `Variable` can be set with its `wd_mult` argument in the constructor. Parameters ---------- args_wd_mult : dict of string/int to float For each of its key-value entries, the weight decay multipler for the parameter specified in the key will be set as the given value. You can specify the parameter with either its name or its index. If you use the name, you should pass `sym` in the constructor, and the name you specified in the key of `args_lr_mult` should match the name of the parameter in `sym`. If you use the index, it should correspond to the index of the parameter used in the `update` method. Specifying a parameter by its index is only supported for backward compatibility, and we recommend to use the name instead. """ self.wd_mult = {} for n in self.idx2name.values(): if not (n.endswith('_weight') or n.endswith('_gamma')): self.wd_mult[n] = 0.0 if self.sym_info: attr, arg_names = self.sym_info for name in arg_names: if name in attr and '__wd_mult__' in attr[name]: self.wd_mult[name] = float(attr[name]['__wd_mult__']) self.wd_mult.update(args_wd_mult)
def _init_metadata(self): """stub""" super(CalculationInteractionQuestionFormRecord, self)._init_metadata() QuestionTextFormRecord._init_metadata(self) QuestionFilesFormRecord._init_metadata(self) self._variables_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'variables'), 'element_label': 'variables', 'instructions': 'Enter the variables', 'required': True, 'read_only': False, 'linked': False, 'array': True, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] } self._expression_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'expression'), 'element_label': 'expression', 'instructions': 'enter the expression', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [{ 'text': '', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }], 'syntax': 'STRING', 'minimum_string_length': 0, 'maximum_string_length': 1024, 'string_set': [] }
stub
Below is the the instruction that describes the task: ### Input: stub ### Response: def _init_metadata(self): """stub""" super(CalculationInteractionQuestionFormRecord, self)._init_metadata() QuestionTextFormRecord._init_metadata(self) QuestionFilesFormRecord._init_metadata(self) self._variables_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'variables'), 'element_label': 'variables', 'instructions': 'Enter the variables', 'required': True, 'read_only': False, 'linked': False, 'array': True, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] } self._expression_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'expression'), 'element_label': 'expression', 'instructions': 'enter the expression', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [{ 'text': '', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }], 'syntax': 'STRING', 'minimum_string_length': 0, 'maximum_string_length': 1024, 'string_set': [] }
def list_lbaas_loadbalancers(self, retrieve_all=True, **_params): """Fetches a list of all lbaas_loadbalancers for a project.""" return self.list('loadbalancers', self.lbaas_loadbalancers_path, retrieve_all, **_params)
Fetches a list of all lbaas_loadbalancers for a project.
Below is the the instruction that describes the task: ### Input: Fetches a list of all lbaas_loadbalancers for a project. ### Response: def list_lbaas_loadbalancers(self, retrieve_all=True, **_params): """Fetches a list of all lbaas_loadbalancers for a project.""" return self.list('loadbalancers', self.lbaas_loadbalancers_path, retrieve_all, **_params)
def addonModules(cls, recurse=True): """ Returns all the modules that this addon class uses to load plugins from. :param recurse | <bool> :return [<str> || <module>, ..] """ prop = '_{0}__addon_modules'.format(cls.__name__) out = set() # lookup base classes if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): out.update(base.addonModules(recurse)) # always use the highest level for any given key out.update(getattr(cls, prop, set())) return out
Returns all the modules that this addon class uses to load plugins from. :param recurse | <bool> :return [<str> || <module>, ..]
Below is the the instruction that describes the task: ### Input: Returns all the modules that this addon class uses to load plugins from. :param recurse | <bool> :return [<str> || <module>, ..] ### Response: def addonModules(cls, recurse=True): """ Returns all the modules that this addon class uses to load plugins from. :param recurse | <bool> :return [<str> || <module>, ..] """ prop = '_{0}__addon_modules'.format(cls.__name__) out = set() # lookup base classes if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): out.update(base.addonModules(recurse)) # always use the highest level for any given key out.update(getattr(cls, prop, set())) return out
def annual_heating_design_day_996(self): """A design day object representing the annual 99.6% heating design day.""" self._load_header_check() if bool(self._heating_dict) is True: avg_press = self.atmospheric_station_pressure.average avg_press = None if avg_press == 999999 else avg_press return DesignDay.from_ashrae_dict_heating( self._heating_dict, self.location, False, avg_press) else: return None
A design day object representing the annual 99.6% heating design day.
Below is the the instruction that describes the task: ### Input: A design day object representing the annual 99.6% heating design day. ### Response: def annual_heating_design_day_996(self): """A design day object representing the annual 99.6% heating design day.""" self._load_header_check() if bool(self._heating_dict) is True: avg_press = self.atmospheric_station_pressure.average avg_press = None if avg_press == 999999 else avg_press return DesignDay.from_ashrae_dict_heating( self._heating_dict, self.location, False, avg_press) else: return None
def include_sqlalchemy_models(nc, Base): """Include all SQLAlchemy models in the script context. :param nc: notebook_context dictionary :param Base: SQLAlchemy model Base class from where the all models inherit. """ from sqlalchemy.ext.declarative.clsregistry import _ModuleMarker # Include all SQLAlchemy models in the local namespace for name, klass in Base._decl_class_registry.items(): print(name, klass) if isinstance(klass, _ModuleMarker): continue add_script(nc, get_import_statement(klass)) add_greeting(nc, "* **{}** - {}".format(klass.__name__, get_dotted_path(klass)))
Include all SQLAlchemy models in the script context. :param nc: notebook_context dictionary :param Base: SQLAlchemy model Base class from where the all models inherit.
Below is the the instruction that describes the task: ### Input: Include all SQLAlchemy models in the script context. :param nc: notebook_context dictionary :param Base: SQLAlchemy model Base class from where the all models inherit. ### Response: def include_sqlalchemy_models(nc, Base): """Include all SQLAlchemy models in the script context. :param nc: notebook_context dictionary :param Base: SQLAlchemy model Base class from where the all models inherit. """ from sqlalchemy.ext.declarative.clsregistry import _ModuleMarker # Include all SQLAlchemy models in the local namespace for name, klass in Base._decl_class_registry.items(): print(name, klass) if isinstance(klass, _ModuleMarker): continue add_script(nc, get_import_statement(klass)) add_greeting(nc, "* **{}** - {}".format(klass.__name__, get_dotted_path(klass)))
def apply_transformation(self, structure, return_ranked_list=False): """ Apply the transformation. Args: structure: input structure return_ranked_list (bool): Whether or not multiple structures are returned. If return_ranked_list is a number, that number of structures is returned. Returns: Depending on returned_ranked list, either a transformed structure or a list of dictionaries, where each dictionary is of the form {"structure" = .... , "other_arguments"} the key "transformation" is reserved for the transformation that was actually applied to the structure. This transformation is parsed by the alchemy classes for generating a more specific transformation history. Any other information will be stored in the transformation_parameters dictionary in the transmuted structure class. """ num_remove_dict = {} total_combis = 0 for indices, frac in zip(self.indices, self.fractions): num_to_remove = len(indices) * frac if abs(num_to_remove - int(round(num_to_remove))) > 1e-3: raise ValueError("Fraction to remove must be consistent with " "integer amounts in structure.") else: num_to_remove = int(round(num_to_remove)) num_remove_dict[tuple(indices)] = num_to_remove n = len(indices) total_combis += int(round(math.factorial(n) / math.factorial(num_to_remove) / math.factorial(n - num_to_remove))) self.logger.debug("Total combinations = {}".format(total_combis)) try: num_to_return = int(return_ranked_list) except ValueError: num_to_return = 1 num_to_return = max(1, num_to_return) self.logger.debug("Will return {} best structures." .format(num_to_return)) if self.algo == PartialRemoveSitesTransformation.ALGO_FAST: all_structures = self.fast_ordering(structure, num_remove_dict, num_to_return) elif self.algo == PartialRemoveSitesTransformation.ALGO_COMPLETE: all_structures = self.complete_ordering(structure, num_remove_dict) elif self.algo == PartialRemoveSitesTransformation.ALGO_BEST_FIRST: all_structures = self.best_first_ordering(structure, num_remove_dict) elif self.algo == PartialRemoveSitesTransformation.ALGO_ENUMERATE: all_structures = self.enumerate_ordering(structure) else: raise ValueError("Invalid algo.") opt_s = all_structures[0]["structure"] return opt_s if not return_ranked_list \ else all_structures[0:num_to_return]
Apply the transformation. Args: structure: input structure return_ranked_list (bool): Whether or not multiple structures are returned. If return_ranked_list is a number, that number of structures is returned. Returns: Depending on returned_ranked list, either a transformed structure or a list of dictionaries, where each dictionary is of the form {"structure" = .... , "other_arguments"} the key "transformation" is reserved for the transformation that was actually applied to the structure. This transformation is parsed by the alchemy classes for generating a more specific transformation history. Any other information will be stored in the transformation_parameters dictionary in the transmuted structure class.
Below is the the instruction that describes the task: ### Input: Apply the transformation. Args: structure: input structure return_ranked_list (bool): Whether or not multiple structures are returned. If return_ranked_list is a number, that number of structures is returned. Returns: Depending on returned_ranked list, either a transformed structure or a list of dictionaries, where each dictionary is of the form {"structure" = .... , "other_arguments"} the key "transformation" is reserved for the transformation that was actually applied to the structure. This transformation is parsed by the alchemy classes for generating a more specific transformation history. Any other information will be stored in the transformation_parameters dictionary in the transmuted structure class. ### Response: def apply_transformation(self, structure, return_ranked_list=False): """ Apply the transformation. Args: structure: input structure return_ranked_list (bool): Whether or not multiple structures are returned. If return_ranked_list is a number, that number of structures is returned. Returns: Depending on returned_ranked list, either a transformed structure or a list of dictionaries, where each dictionary is of the form {"structure" = .... , "other_arguments"} the key "transformation" is reserved for the transformation that was actually applied to the structure. This transformation is parsed by the alchemy classes for generating a more specific transformation history. Any other information will be stored in the transformation_parameters dictionary in the transmuted structure class. """ num_remove_dict = {} total_combis = 0 for indices, frac in zip(self.indices, self.fractions): num_to_remove = len(indices) * frac if abs(num_to_remove - int(round(num_to_remove))) > 1e-3: raise ValueError("Fraction to remove must be consistent with " "integer amounts in structure.") else: num_to_remove = int(round(num_to_remove)) num_remove_dict[tuple(indices)] = num_to_remove n = len(indices) total_combis += int(round(math.factorial(n) / math.factorial(num_to_remove) / math.factorial(n - num_to_remove))) self.logger.debug("Total combinations = {}".format(total_combis)) try: num_to_return = int(return_ranked_list) except ValueError: num_to_return = 1 num_to_return = max(1, num_to_return) self.logger.debug("Will return {} best structures." .format(num_to_return)) if self.algo == PartialRemoveSitesTransformation.ALGO_FAST: all_structures = self.fast_ordering(structure, num_remove_dict, num_to_return) elif self.algo == PartialRemoveSitesTransformation.ALGO_COMPLETE: all_structures = self.complete_ordering(structure, num_remove_dict) elif self.algo == PartialRemoveSitesTransformation.ALGO_BEST_FIRST: all_structures = self.best_first_ordering(structure, num_remove_dict) elif self.algo == PartialRemoveSitesTransformation.ALGO_ENUMERATE: all_structures = self.enumerate_ordering(structure) else: raise ValueError("Invalid algo.") opt_s = all_structures[0]["structure"] return opt_s if not return_ranked_list \ else all_structures[0:num_to_return]
def get_config_input_source_config_source_running_running(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_config = ET.Element("get_config") config = get_config input = ET.SubElement(get_config, "input") source = ET.SubElement(input, "source") config_source = ET.SubElement(source, "config-source") running = ET.SubElement(config_source, "running") running = ET.SubElement(running, "running") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def get_config_input_source_config_source_running_running(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_config = ET.Element("get_config") config = get_config input = ET.SubElement(get_config, "input") source = ET.SubElement(input, "source") config_source = ET.SubElement(source, "config-source") running = ET.SubElement(config_source, "running") running = ET.SubElement(running, "running") callback = kwargs.pop('callback', self._callback) return callback(config)
def _checksum(self): """Performs the checksum for the current TLE.""" for line in [self._line1, self._line2]: check = 0 for char in line[:-1]: if char.isdigit(): check += int(char) if char == "-": check += 1 if (check % 10) != int(line[-1]): raise ChecksumError(self._platform + " " + line)
Performs the checksum for the current TLE.
Below is the the instruction that describes the task: ### Input: Performs the checksum for the current TLE. ### Response: def _checksum(self): """Performs the checksum for the current TLE.""" for line in [self._line1, self._line2]: check = 0 for char in line[:-1]: if char.isdigit(): check += int(char) if char == "-": check += 1 if (check % 10) != int(line[-1]): raise ChecksumError(self._platform + " " + line)
def publish(cls, message, client_filter=None): """Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them. """ with cls._lock: for client in cls.subscribers: if (not client_filter) or client_filter(client): client.send(message)
Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them.
Below is the the instruction that describes the task: ### Input: Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them. ### Response: def publish(cls, message, client_filter=None): """Publish messages to subscribers. Args: message: The message to publish. client_filter: A filter function to call passing in each client. Only clients for whom the function returns True will have the message sent to them. """ with cls._lock: for client in cls.subscribers: if (not client_filter) or client_filter(client): client.send(message)
def make_stream( name, # type: Text bin_file, # type: RawIOBase mode="r", # type: Text buffering=-1, # type: int encoding=None, # type: Optional[Text] errors=None, # type: Optional[Text] newline="", # type: Optional[Text] line_buffering=False, # type: bool **kwargs # type: Any ): # type: (...) -> IO """Take a Python 2.x binary file and return an IO Stream. """ reading = "r" in mode writing = "w" in mode appending = "a" in mode binary = "b" in mode if "+" in mode: reading = True writing = True encoding = None if binary else (encoding or "utf-8") io_object = RawWrapper(bin_file, mode=mode, name=name) # type: io.IOBase if buffering >= 0: if reading and writing: io_object = io.BufferedRandom( typing.cast(io.RawIOBase, io_object), buffering or io.DEFAULT_BUFFER_SIZE, ) elif reading: io_object = io.BufferedReader( typing.cast(io.RawIOBase, io_object), buffering or io.DEFAULT_BUFFER_SIZE, ) elif writing or appending: io_object = io.BufferedWriter( typing.cast(io.RawIOBase, io_object), buffering or io.DEFAULT_BUFFER_SIZE, ) if not binary: io_object = io.TextIOWrapper( io_object, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, ) return io_object
Take a Python 2.x binary file and return an IO Stream.
Below is the the instruction that describes the task: ### Input: Take a Python 2.x binary file and return an IO Stream. ### Response: def make_stream( name, # type: Text bin_file, # type: RawIOBase mode="r", # type: Text buffering=-1, # type: int encoding=None, # type: Optional[Text] errors=None, # type: Optional[Text] newline="", # type: Optional[Text] line_buffering=False, # type: bool **kwargs # type: Any ): # type: (...) -> IO """Take a Python 2.x binary file and return an IO Stream. """ reading = "r" in mode writing = "w" in mode appending = "a" in mode binary = "b" in mode if "+" in mode: reading = True writing = True encoding = None if binary else (encoding or "utf-8") io_object = RawWrapper(bin_file, mode=mode, name=name) # type: io.IOBase if buffering >= 0: if reading and writing: io_object = io.BufferedRandom( typing.cast(io.RawIOBase, io_object), buffering or io.DEFAULT_BUFFER_SIZE, ) elif reading: io_object = io.BufferedReader( typing.cast(io.RawIOBase, io_object), buffering or io.DEFAULT_BUFFER_SIZE, ) elif writing or appending: io_object = io.BufferedWriter( typing.cast(io.RawIOBase, io_object), buffering or io.DEFAULT_BUFFER_SIZE, ) if not binary: io_object = io.TextIOWrapper( io_object, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, ) return io_object
def update_reminder_item(self, reminder_item_id, reminder_item_dict): """ Updates a reminder item :param reminder_item_id: the reminder item id :param reminder_item_dict: dict :return: dict """ return self._create_put_request( resource=REMINDER_ITEMS, billomat_id=reminder_item_id, send_data=reminder_item_dict )
Updates a reminder item :param reminder_item_id: the reminder item id :param reminder_item_dict: dict :return: dict
Below is the the instruction that describes the task: ### Input: Updates a reminder item :param reminder_item_id: the reminder item id :param reminder_item_dict: dict :return: dict ### Response: def update_reminder_item(self, reminder_item_id, reminder_item_dict): """ Updates a reminder item :param reminder_item_id: the reminder item id :param reminder_item_dict: dict :return: dict """ return self._create_put_request( resource=REMINDER_ITEMS, billomat_id=reminder_item_id, send_data=reminder_item_dict )
def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create the specified flavor.""" try: nova.flavors.find(name=name) except (exceptions.NotFound, exceptions.NoUniqueMatch): self.log.debug('Creating flavor ({})'.format(name)) nova.flavors.create(name, ram, vcpus, disk, flavorid, ephemeral, swap, rxtx_factor, is_public)
Create the specified flavor.
Below is the the instruction that describes the task: ### Input: Create the specified flavor. ### Response: def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create the specified flavor.""" try: nova.flavors.find(name=name) except (exceptions.NotFound, exceptions.NoUniqueMatch): self.log.debug('Creating flavor ({})'.format(name)) nova.flavors.create(name, ram, vcpus, disk, flavorid, ephemeral, swap, rxtx_factor, is_public)
def list_bundled_profiles(): """list profiles that are bundled with IPython.""" path = os.path.join(get_ipython_package_dir(), u'config', u'profile') files = os.listdir(path) profiles = [] for profile in files: full_path = os.path.join(path, profile) if os.path.isdir(full_path) and profile != "__pycache__": profiles.append(profile) return profiles
list profiles that are bundled with IPython.
Below is the the instruction that describes the task: ### Input: list profiles that are bundled with IPython. ### Response: def list_bundled_profiles(): """list profiles that are bundled with IPython.""" path = os.path.join(get_ipython_package_dir(), u'config', u'profile') files = os.listdir(path) profiles = [] for profile in files: full_path = os.path.join(path, profile) if os.path.isdir(full_path) and profile != "__pycache__": profiles.append(profile) return profiles
def get_normed_points(point_array, norm): # good to go """ input: point_array, norm output: normed array """ norm = float(norm) #floated_array = [] #for p in point_array: # need to make sure each point is a float #floated_array.append(float(p)) points = old_div(numpy.array(point_array), norm) return points
input: point_array, norm output: normed array
Below is the the instruction that describes the task: ### Input: input: point_array, norm output: normed array ### Response: def get_normed_points(point_array, norm): # good to go """ input: point_array, norm output: normed array """ norm = float(norm) #floated_array = [] #for p in point_array: # need to make sure each point is a float #floated_array.append(float(p)) points = old_div(numpy.array(point_array), norm) return points
def get_single_color_func(color): """Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2') """ old_r, old_g, old_b = ImageColor.getrgb(color) rgb_max = 255. h, s, v = colorsys.rgb_to_hsv(old_r / rgb_max, old_g / rgb_max, old_b / rgb_max) def single_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random color generation. Additional coloring method. It picks a random value with hue and saturation based on the color given to the generating function. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() r, g, b = colorsys.hsv_to_rgb(h, s, random_state.uniform(0.2, 1)) return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max, b * rgb_max) return single_color_func
Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2')
Below is the the instruction that describes the task: ### Input: Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2') ### Response: def get_single_color_func(color): """Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2') """ old_r, old_g, old_b = ImageColor.getrgb(color) rgb_max = 255. h, s, v = colorsys.rgb_to_hsv(old_r / rgb_max, old_g / rgb_max, old_b / rgb_max) def single_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random color generation. Additional coloring method. It picks a random value with hue and saturation based on the color given to the generating function. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() r, g, b = colorsys.hsv_to_rgb(h, s, random_state.uniform(0.2, 1)) return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max, b * rgb_max) return single_color_func
def plot_graph(G, bbox=None, fig_height=6, fig_width=None, margin=0.02, axis_off=True, equal_aspect=False, bgcolor='w', show=True, save=False, close=True, file_format='png', filename='temp', dpi=300, annotate=False, node_color='#66ccff', node_size=15, node_alpha=1, node_edgecolor='none', node_zorder=1, edge_color='#999999', edge_linewidth=1, edge_alpha=1, use_geom=True): """ Plot a networkx spatial graph. Parameters ---------- G : networkx multidigraph bbox : tuple bounding box as north,south,east,west - if None will calculate from spatial extents of data. if passing a bbox, you probably also want to pass margin=0 to constrain it. fig_height : int matplotlib figure height in inches fig_width : int matplotlib figure width in inches margin : float relative margin around the figure axis_off : bool if True turn off the matplotlib axis equal_aspect : bool if True set the axis aspect ratio equal bgcolor : string the background color of the figure and axis show : bool if True, show the figure save : bool if True, save the figure as an image file to disk close : bool close the figure (only if show equals False) to prevent display file_format : string the format of the file to save (e.g., 'jpg', 'png', 'svg') filename : string the name of the file if saving dpi : int the resolution of the image file if saving annotate : bool if True, annotate the nodes in the figure node_color : string the color of the nodes node_size : int the size of the nodes node_alpha : float the opacity of the nodes node_edgecolor : string the color of the node's marker's border node_zorder : int zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot nodes beneath them or 3 to plot nodes atop them edge_color : string the color of the edges' lines edge_linewidth : float the width of the edges' lines edge_alpha : float the opacity of the edges' lines use_geom : bool if True, use the spatial geometry attribute of the edges to draw geographically accurate edges, rather than just lines straight from node to node Returns ------- fig, ax : tuple """ log('Begin plotting the graph...') node_Xs = [float(x) for _, x in G.nodes(data='x')] node_Ys = [float(y) for _, y in G.nodes(data='y')] # get north, south, east, west values either from bbox parameter or from the # spatial extent of the edges' geometries if bbox is None: edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True) west, south, east, north = edges.total_bounds else: north, south, east, west = bbox # if caller did not pass in a fig_width, calculate it proportionately from # the fig_height and bounding box aspect ratio bbox_aspect_ratio = (north-south)/(east-west) if fig_width is None: fig_width = fig_height / bbox_aspect_ratio # create the figure and axis fig, ax = plt.subplots(figsize=(fig_width, fig_height), facecolor=bgcolor) ax.set_facecolor(bgcolor) # draw the edges as lines from node to node start_time = time.time() lines = [] for u, v, data in G.edges(keys=False, data=True): if 'geometry' in data and use_geom: # if it has a geometry attribute (a list of line segments), add them # to the list of lines to plot xs, ys = data['geometry'].xy lines.append(list(zip(xs, ys))) else: # if it doesn't have a geometry attribute, the edge is a straight # line from node to node x1 = G.nodes[u]['x'] y1 = G.nodes[u]['y'] x2 = G.nodes[v]['x'] y2 = G.nodes[v]['y'] line = [(x1, y1), (x2, y2)] lines.append(line) # add the lines to the axis as a linecollection lc = LineCollection(lines, colors=edge_color, linewidths=edge_linewidth, alpha=edge_alpha, zorder=2) ax.add_collection(lc) log('Drew the graph edges in {:,.2f} seconds'.format(time.time()-start_time)) # scatter plot the nodes ax.scatter(node_Xs, node_Ys, s=node_size, c=node_color, alpha=node_alpha, edgecolor=node_edgecolor, zorder=node_zorder) # set the extent of the figure margin_ns = (north - south) * margin margin_ew = (east - west) * margin ax.set_ylim((south - margin_ns, north + margin_ns)) ax.set_xlim((west - margin_ew, east + margin_ew)) # configure axis appearance xaxis = ax.get_xaxis() yaxis = ax.get_yaxis() xaxis.get_major_formatter().set_useOffset(False) yaxis.get_major_formatter().set_useOffset(False) # if axis_off, turn off the axis display set the margins to zero and point # the ticks in so there's no space around the plot if axis_off: ax.axis('off') ax.margins(0) ax.tick_params(which='both', direction='in') xaxis.set_visible(False) yaxis.set_visible(False) fig.canvas.draw() if equal_aspect: # make everything square ax.set_aspect('equal') fig.canvas.draw() else: # if the graph is not projected, conform the aspect ratio to not stretch the plot if G.graph['crs'] == settings.default_crs: coslat = np.cos((min(node_Ys) + max(node_Ys)) / 2. / 180. * np.pi) ax.set_aspect(1. / coslat) fig.canvas.draw() # annotate the axis with node IDs if annotate=True if annotate: for node, data in G.nodes(data=True): ax.annotate(node, xy=(data['x'], data['y'])) # save and show the figure as specified fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off) return fig, ax
Plot a networkx spatial graph. Parameters ---------- G : networkx multidigraph bbox : tuple bounding box as north,south,east,west - if None will calculate from spatial extents of data. if passing a bbox, you probably also want to pass margin=0 to constrain it. fig_height : int matplotlib figure height in inches fig_width : int matplotlib figure width in inches margin : float relative margin around the figure axis_off : bool if True turn off the matplotlib axis equal_aspect : bool if True set the axis aspect ratio equal bgcolor : string the background color of the figure and axis show : bool if True, show the figure save : bool if True, save the figure as an image file to disk close : bool close the figure (only if show equals False) to prevent display file_format : string the format of the file to save (e.g., 'jpg', 'png', 'svg') filename : string the name of the file if saving dpi : int the resolution of the image file if saving annotate : bool if True, annotate the nodes in the figure node_color : string the color of the nodes node_size : int the size of the nodes node_alpha : float the opacity of the nodes node_edgecolor : string the color of the node's marker's border node_zorder : int zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot nodes beneath them or 3 to plot nodes atop them edge_color : string the color of the edges' lines edge_linewidth : float the width of the edges' lines edge_alpha : float the opacity of the edges' lines use_geom : bool if True, use the spatial geometry attribute of the edges to draw geographically accurate edges, rather than just lines straight from node to node Returns ------- fig, ax : tuple
Below is the the instruction that describes the task: ### Input: Plot a networkx spatial graph. Parameters ---------- G : networkx multidigraph bbox : tuple bounding box as north,south,east,west - if None will calculate from spatial extents of data. if passing a bbox, you probably also want to pass margin=0 to constrain it. fig_height : int matplotlib figure height in inches fig_width : int matplotlib figure width in inches margin : float relative margin around the figure axis_off : bool if True turn off the matplotlib axis equal_aspect : bool if True set the axis aspect ratio equal bgcolor : string the background color of the figure and axis show : bool if True, show the figure save : bool if True, save the figure as an image file to disk close : bool close the figure (only if show equals False) to prevent display file_format : string the format of the file to save (e.g., 'jpg', 'png', 'svg') filename : string the name of the file if saving dpi : int the resolution of the image file if saving annotate : bool if True, annotate the nodes in the figure node_color : string the color of the nodes node_size : int the size of the nodes node_alpha : float the opacity of the nodes node_edgecolor : string the color of the node's marker's border node_zorder : int zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot nodes beneath them or 3 to plot nodes atop them edge_color : string the color of the edges' lines edge_linewidth : float the width of the edges' lines edge_alpha : float the opacity of the edges' lines use_geom : bool if True, use the spatial geometry attribute of the edges to draw geographically accurate edges, rather than just lines straight from node to node Returns ------- fig, ax : tuple ### Response: def plot_graph(G, bbox=None, fig_height=6, fig_width=None, margin=0.02, axis_off=True, equal_aspect=False, bgcolor='w', show=True, save=False, close=True, file_format='png', filename='temp', dpi=300, annotate=False, node_color='#66ccff', node_size=15, node_alpha=1, node_edgecolor='none', node_zorder=1, edge_color='#999999', edge_linewidth=1, edge_alpha=1, use_geom=True): """ Plot a networkx spatial graph. Parameters ---------- G : networkx multidigraph bbox : tuple bounding box as north,south,east,west - if None will calculate from spatial extents of data. if passing a bbox, you probably also want to pass margin=0 to constrain it. fig_height : int matplotlib figure height in inches fig_width : int matplotlib figure width in inches margin : float relative margin around the figure axis_off : bool if True turn off the matplotlib axis equal_aspect : bool if True set the axis aspect ratio equal bgcolor : string the background color of the figure and axis show : bool if True, show the figure save : bool if True, save the figure as an image file to disk close : bool close the figure (only if show equals False) to prevent display file_format : string the format of the file to save (e.g., 'jpg', 'png', 'svg') filename : string the name of the file if saving dpi : int the resolution of the image file if saving annotate : bool if True, annotate the nodes in the figure node_color : string the color of the nodes node_size : int the size of the nodes node_alpha : float the opacity of the nodes node_edgecolor : string the color of the node's marker's border node_zorder : int zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot nodes beneath them or 3 to plot nodes atop them edge_color : string the color of the edges' lines edge_linewidth : float the width of the edges' lines edge_alpha : float the opacity of the edges' lines use_geom : bool if True, use the spatial geometry attribute of the edges to draw geographically accurate edges, rather than just lines straight from node to node Returns ------- fig, ax : tuple """ log('Begin plotting the graph...') node_Xs = [float(x) for _, x in G.nodes(data='x')] node_Ys = [float(y) for _, y in G.nodes(data='y')] # get north, south, east, west values either from bbox parameter or from the # spatial extent of the edges' geometries if bbox is None: edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True) west, south, east, north = edges.total_bounds else: north, south, east, west = bbox # if caller did not pass in a fig_width, calculate it proportionately from # the fig_height and bounding box aspect ratio bbox_aspect_ratio = (north-south)/(east-west) if fig_width is None: fig_width = fig_height / bbox_aspect_ratio # create the figure and axis fig, ax = plt.subplots(figsize=(fig_width, fig_height), facecolor=bgcolor) ax.set_facecolor(bgcolor) # draw the edges as lines from node to node start_time = time.time() lines = [] for u, v, data in G.edges(keys=False, data=True): if 'geometry' in data and use_geom: # if it has a geometry attribute (a list of line segments), add them # to the list of lines to plot xs, ys = data['geometry'].xy lines.append(list(zip(xs, ys))) else: # if it doesn't have a geometry attribute, the edge is a straight # line from node to node x1 = G.nodes[u]['x'] y1 = G.nodes[u]['y'] x2 = G.nodes[v]['x'] y2 = G.nodes[v]['y'] line = [(x1, y1), (x2, y2)] lines.append(line) # add the lines to the axis as a linecollection lc = LineCollection(lines, colors=edge_color, linewidths=edge_linewidth, alpha=edge_alpha, zorder=2) ax.add_collection(lc) log('Drew the graph edges in {:,.2f} seconds'.format(time.time()-start_time)) # scatter plot the nodes ax.scatter(node_Xs, node_Ys, s=node_size, c=node_color, alpha=node_alpha, edgecolor=node_edgecolor, zorder=node_zorder) # set the extent of the figure margin_ns = (north - south) * margin margin_ew = (east - west) * margin ax.set_ylim((south - margin_ns, north + margin_ns)) ax.set_xlim((west - margin_ew, east + margin_ew)) # configure axis appearance xaxis = ax.get_xaxis() yaxis = ax.get_yaxis() xaxis.get_major_formatter().set_useOffset(False) yaxis.get_major_formatter().set_useOffset(False) # if axis_off, turn off the axis display set the margins to zero and point # the ticks in so there's no space around the plot if axis_off: ax.axis('off') ax.margins(0) ax.tick_params(which='both', direction='in') xaxis.set_visible(False) yaxis.set_visible(False) fig.canvas.draw() if equal_aspect: # make everything square ax.set_aspect('equal') fig.canvas.draw() else: # if the graph is not projected, conform the aspect ratio to not stretch the plot if G.graph['crs'] == settings.default_crs: coslat = np.cos((min(node_Ys) + max(node_Ys)) / 2. / 180. * np.pi) ax.set_aspect(1. / coslat) fig.canvas.draw() # annotate the axis with node IDs if annotate=True if annotate: for node, data in G.nodes(data=True): ax.annotate(node, xy=(data['x'], data['y'])) # save and show the figure as specified fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off) return fig, ax
def participating_ec_states(self): '''The state of each execution context this component is participating in. ''' with self._mutex: if not self._participating_ec_states: if self.participating_ecs: states = [] for ec in self.participating_ecs: states.append(self._get_ec_state(ec)) self._participating_ec_states = states else: self._participating_ec_states = [] return self._participating_ec_states
The state of each execution context this component is participating in.
Below is the the instruction that describes the task: ### Input: The state of each execution context this component is participating in. ### Response: def participating_ec_states(self): '''The state of each execution context this component is participating in. ''' with self._mutex: if not self._participating_ec_states: if self.participating_ecs: states = [] for ec in self.participating_ecs: states.append(self._get_ec_state(ec)) self._participating_ec_states = states else: self._participating_ec_states = [] return self._participating_ec_states
def delete(self, ids): """ Method to delete vrf's by their id's :param ids: Identifiers of vrf's :return: None """ url = build_uri_with_ids('api/v3/vrf/%s/', ids) return super(ApiVrf, self).delete(url)
Method to delete vrf's by their id's :param ids: Identifiers of vrf's :return: None
Below is the the instruction that describes the task: ### Input: Method to delete vrf's by their id's :param ids: Identifiers of vrf's :return: None ### Response: def delete(self, ids): """ Method to delete vrf's by their id's :param ids: Identifiers of vrf's :return: None """ url = build_uri_with_ids('api/v3/vrf/%s/', ids) return super(ApiVrf, self).delete(url)
def add_ssh_key(self, name, ssh_pub_key): """ This method allows you to add a new public SSH key to your account. Required parameters name: String, the name you want to give this SSH key. ssh_pub_key: String, the actual public SSH key. """ params = {'name': name, 'ssh_pub_key': ssh_pub_key} json = self.request('/ssh_keys/new', method='GET', params=params) status = json.get('status') if status == 'OK': ssh_key_json = json.get('ssh_key') ssh_key = SSHKey.from_json(ssh_key_json) return ssh_key else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
This method allows you to add a new public SSH key to your account. Required parameters name: String, the name you want to give this SSH key. ssh_pub_key: String, the actual public SSH key.
Below is the the instruction that describes the task: ### Input: This method allows you to add a new public SSH key to your account. Required parameters name: String, the name you want to give this SSH key. ssh_pub_key: String, the actual public SSH key. ### Response: def add_ssh_key(self, name, ssh_pub_key): """ This method allows you to add a new public SSH key to your account. Required parameters name: String, the name you want to give this SSH key. ssh_pub_key: String, the actual public SSH key. """ params = {'name': name, 'ssh_pub_key': ssh_pub_key} json = self.request('/ssh_keys/new', method='GET', params=params) status = json.get('status') if status == 'OK': ssh_key_json = json.get('ssh_key') ssh_key = SSHKey.from_json(ssh_key_json) return ssh_key else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
def _extract_error(self, resp): """ Extract the actual error message from a solr response. """ reason = resp.headers.get('reason', None) full_response = None if reason is None: try: # if response is in json format reason = resp.json()['error']['msg'] except KeyError: # if json response has unexpected structure full_response = resp.content except ValueError: # otherwise we assume it's html reason, full_html = self._scrape_response(resp.headers, resp.content) full_response = unescape_html(full_html) msg = "[Reason: %s]" % reason if reason is None: msg += "\n%s" % full_response return msg
Extract the actual error message from a solr response.
Below is the the instruction that describes the task: ### Input: Extract the actual error message from a solr response. ### Response: def _extract_error(self, resp): """ Extract the actual error message from a solr response. """ reason = resp.headers.get('reason', None) full_response = None if reason is None: try: # if response is in json format reason = resp.json()['error']['msg'] except KeyError: # if json response has unexpected structure full_response = resp.content except ValueError: # otherwise we assume it's html reason, full_html = self._scrape_response(resp.headers, resp.content) full_response = unescape_html(full_html) msg = "[Reason: %s]" % reason if reason is None: msg += "\n%s" % full_response return msg
def __CombineGlobalParams(self, global_params, default_params): """Combine the given params with the defaults.""" util.Typecheck(global_params, (type(None), self.__client.params_type)) result = self.__client.params_type() global_params = global_params or self.__client.params_type() for field in result.all_fields(): value = global_params.get_assigned_value(field.name) if value is None: value = default_params.get_assigned_value(field.name) if value not in (None, [], ()): setattr(result, field.name, value) return result
Combine the given params with the defaults.
Below is the the instruction that describes the task: ### Input: Combine the given params with the defaults. ### Response: def __CombineGlobalParams(self, global_params, default_params): """Combine the given params with the defaults.""" util.Typecheck(global_params, (type(None), self.__client.params_type)) result = self.__client.params_type() global_params = global_params or self.__client.params_type() for field in result.all_fields(): value = global_params.get_assigned_value(field.name) if value is None: value = default_params.get_assigned_value(field.name) if value not in (None, [], ()): setattr(result, field.name, value) return result
def narrow(self): """Decrease the interval size.""" t, h = self.time, self.half_duration h /= self.scaling_coeff_x self.set_interval((t - h, t + h))
Decrease the interval size.
Below is the the instruction that describes the task: ### Input: Decrease the interval size. ### Response: def narrow(self): """Decrease the interval size.""" t, h = self.time, self.half_duration h /= self.scaling_coeff_x self.set_interval((t - h, t + h))
def get_instance(self, payload): """ Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.message.MessageInstance :rtype: twilio.rest.api.v2010.account.message.MessageInstance """ return MessageInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.message.MessageInstance :rtype: twilio.rest.api.v2010.account.message.MessageInstance
Below is the the instruction that describes the task: ### Input: Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.message.MessageInstance :rtype: twilio.rest.api.v2010.account.message.MessageInstance ### Response: def get_instance(self, payload): """ Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.message.MessageInstance :rtype: twilio.rest.api.v2010.account.message.MessageInstance """ return MessageInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def read_chip_sn(self): '''Reading Chip S/N Note ---- Bits [MSB-LSB] | [15] | [14-6] | [5-0] Content | reserved | wafer number | chip number ''' commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register_utils.send_commands(commands) with self.readout(fill_buffer=True, callback=None, errback=None): if self.register.fei4b: commands = [] self.register.set_global_register_value('Efuse_Sense', 1) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) commands.extend(self.register.get_commands("GlobalPulse", Width=0)) self.register.set_global_register_value('Efuse_Sense', 0) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) self.register_utils.send_commands(commands) commands = [] self.register.set_global_register_value('Conf_AddrEnable', 1) commands.extend(self.register.get_commands("WrRegister", name=['Conf_AddrEnable'])) chip_sn_address = self.register.get_global_register_attributes("addresses", name="Chip_SN") commands.extend(self.register.get_commands("RdRegister", addresses=chip_sn_address)) self.register_utils.send_commands(commands) data = self.read_data() if data.shape[0] == 0: logging.error('Chip S/N: No data') return read_values = [] for index, word in enumerate(np.nditer(data)): fei4_data_word = FEI4Record(word, self.register.chip_flavor) if fei4_data_word == 'AR': fei4_next_data_word = FEI4Record(data[index + 1], self.register.chip_flavor) if fei4_next_data_word == 'VR': read_value = fei4_next_data_word['value'] read_values.append(read_value) # commands = [] # commands.extend(self.register.get_commands("RunMode")) # self.register_utils.send_commands(commands) if len(read_values) == 0: logging.error('No Chip S/N was found') elif len(read_values) == 1: logging.info('Chip S/N: %d', read_values[0]) else: logging.warning('Ambiguous Chip S/N: %s', read_values)
Reading Chip S/N Note ---- Bits [MSB-LSB] | [15] | [14-6] | [5-0] Content | reserved | wafer number | chip number
Below is the the instruction that describes the task: ### Input: Reading Chip S/N Note ---- Bits [MSB-LSB] | [15] | [14-6] | [5-0] Content | reserved | wafer number | chip number ### Response: def read_chip_sn(self): '''Reading Chip S/N Note ---- Bits [MSB-LSB] | [15] | [14-6] | [5-0] Content | reserved | wafer number | chip number ''' commands = [] commands.extend(self.register.get_commands("ConfMode")) self.register_utils.send_commands(commands) with self.readout(fill_buffer=True, callback=None, errback=None): if self.register.fei4b: commands = [] self.register.set_global_register_value('Efuse_Sense', 1) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) commands.extend(self.register.get_commands("GlobalPulse", Width=0)) self.register.set_global_register_value('Efuse_Sense', 0) commands.extend(self.register.get_commands("WrRegister", name=['Efuse_Sense'])) self.register_utils.send_commands(commands) commands = [] self.register.set_global_register_value('Conf_AddrEnable', 1) commands.extend(self.register.get_commands("WrRegister", name=['Conf_AddrEnable'])) chip_sn_address = self.register.get_global_register_attributes("addresses", name="Chip_SN") commands.extend(self.register.get_commands("RdRegister", addresses=chip_sn_address)) self.register_utils.send_commands(commands) data = self.read_data() if data.shape[0] == 0: logging.error('Chip S/N: No data') return read_values = [] for index, word in enumerate(np.nditer(data)): fei4_data_word = FEI4Record(word, self.register.chip_flavor) if fei4_data_word == 'AR': fei4_next_data_word = FEI4Record(data[index + 1], self.register.chip_flavor) if fei4_next_data_word == 'VR': read_value = fei4_next_data_word['value'] read_values.append(read_value) # commands = [] # commands.extend(self.register.get_commands("RunMode")) # self.register_utils.send_commands(commands) if len(read_values) == 0: logging.error('No Chip S/N was found') elif len(read_values) == 1: logging.info('Chip S/N: %d', read_values[0]) else: logging.warning('Ambiguous Chip S/N: %s', read_values)
def classifier(self): """ Returns classifier from classifier.pkl """ clf = pickle.load(open(os.path.join(self.repopath, 'classifier.pkl'))) return clf
Returns classifier from classifier.pkl
Below is the the instruction that describes the task: ### Input: Returns classifier from classifier.pkl ### Response: def classifier(self): """ Returns classifier from classifier.pkl """ clf = pickle.load(open(os.path.join(self.repopath, 'classifier.pkl'))) return clf
def listen_loop(self): """Starts the listen loop and executes the receieve_datagram method whenever a packet is receieved. Args: None Returns: None """ while self.listening: try: data, address = self.sock.recvfrom(self.bufsize) self.receive_datagram(data, address) if self.stats_enabled: self.stats['bytes_recieved'] += len(data) except socket.error as error: if error.errno == errno.WSAECONNRESET: logger.info("connection reset") else: raise logger.info("Shutting down the listener...")
Starts the listen loop and executes the receieve_datagram method whenever a packet is receieved. Args: None Returns: None
Below is the the instruction that describes the task: ### Input: Starts the listen loop and executes the receieve_datagram method whenever a packet is receieved. Args: None Returns: None ### Response: def listen_loop(self): """Starts the listen loop and executes the receieve_datagram method whenever a packet is receieved. Args: None Returns: None """ while self.listening: try: data, address = self.sock.recvfrom(self.bufsize) self.receive_datagram(data, address) if self.stats_enabled: self.stats['bytes_recieved'] += len(data) except socket.error as error: if error.errno == errno.WSAECONNRESET: logger.info("connection reset") else: raise logger.info("Shutting down the listener...")