code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def generate_handler(): """Create the Blockade user and give them permissions.""" logger.debug("[#] Setting up user, group and permissions") client = boto3.client("iam", region_name=PRIMARY_REGION) # Create the user try: response = client.create_user( UserName=BLOCKADE_USER ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade user already exists") logger.info("[#] %s user successfully created" % (BLOCKADE_USER)) # Create the role try: logger.debug("[#] Creating %s role" % (BLOCKADE_ROLE)) response = client.create_role( RoleName=BLOCKADE_ROLE, AssumeRolePolicyDocument=BLOCKADE_ROLE_POLICY, Description="Allow a user to manage the administration of Blockade." ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade role already exists") logger.info("[#] %s role successfully created" % (BLOCKADE_ROLE)) # Create the group try: logger.debug("[#] Creating %s group" % (BLOCKADE_GROUP)) response = client.create_group( GroupName=BLOCKADE_GROUP, ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade group already exists") logger.info("[#] %s group successfully created" % (BLOCKADE_GROUP)) # Generate all policy items logger.debug("[#] Creating Blockade IAM policies") for label in BLOCKADE_POLICIES: logger.debug("[#] Creating %s policy" % (label)) try: response = client.create_policy( PolicyName=label, PolicyDocument=POLICIES[label], Description="Generated policy from Blockade bootstrap tool" ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade policy %s already exists" % (label)) logger.info("[#] Blockade %s policy successfully created" % (label)) logger.info("[#] Blockade policies successfully created") # Attach policies to all entity types iam = boto3.resource('iam') account_id = iam.CurrentUser().arn.split(':')[4] for label in BLOCKADE_POLICIES + ['PushToCloud', 'APIGatewayAdmin']: logger.debug("[#] Attaching %s policy" % (label)) arn = 'arn:aws:iam::{id}:policy/{policy}'.format(id=account_id, policy=label) if label == 'PushToCloud': arn = "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs" if label == 'APIGatewayAdmin': arn = "arn:aws:iam::aws:policy/AmazonAPIGatewayAdministrator" client.attach_role_policy(RoleName=BLOCKADE_ROLE, PolicyArn=arn) client.attach_group_policy(GroupName=BLOCKADE_GROUP, PolicyArn=arn) logger.info("[#] Blockade policies successfully attached") logger.debug("[#] Adding %s to %s group" % (BLOCKADE_USER, BLOCKADE_GROUP)) response = client.add_user_to_group( GroupName=BLOCKADE_GROUP, UserName=BLOCKADE_USER ) logger.info("[#] %s user is part of %s group" % (BLOCKADE_USER, BLOCKADE_GROUP)) return True
Create the Blockade user and give them permissions.
Below is the the instruction that describes the task: ### Input: Create the Blockade user and give them permissions. ### Response: def generate_handler(): """Create the Blockade user and give them permissions.""" logger.debug("[#] Setting up user, group and permissions") client = boto3.client("iam", region_name=PRIMARY_REGION) # Create the user try: response = client.create_user( UserName=BLOCKADE_USER ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade user already exists") logger.info("[#] %s user successfully created" % (BLOCKADE_USER)) # Create the role try: logger.debug("[#] Creating %s role" % (BLOCKADE_ROLE)) response = client.create_role( RoleName=BLOCKADE_ROLE, AssumeRolePolicyDocument=BLOCKADE_ROLE_POLICY, Description="Allow a user to manage the administration of Blockade." ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade role already exists") logger.info("[#] %s role successfully created" % (BLOCKADE_ROLE)) # Create the group try: logger.debug("[#] Creating %s group" % (BLOCKADE_GROUP)) response = client.create_group( GroupName=BLOCKADE_GROUP, ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade group already exists") logger.info("[#] %s group successfully created" % (BLOCKADE_GROUP)) # Generate all policy items logger.debug("[#] Creating Blockade IAM policies") for label in BLOCKADE_POLICIES: logger.debug("[#] Creating %s policy" % (label)) try: response = client.create_policy( PolicyName=label, PolicyDocument=POLICIES[label], Description="Generated policy from Blockade bootstrap tool" ) except client.exceptions.EntityAlreadyExistsException: logger.debug("[!] Blockade policy %s already exists" % (label)) logger.info("[#] Blockade %s policy successfully created" % (label)) logger.info("[#] Blockade policies successfully created") # Attach policies to all entity types iam = boto3.resource('iam') account_id = iam.CurrentUser().arn.split(':')[4] for label in BLOCKADE_POLICIES + ['PushToCloud', 'APIGatewayAdmin']: logger.debug("[#] Attaching %s policy" % (label)) arn = 'arn:aws:iam::{id}:policy/{policy}'.format(id=account_id, policy=label) if label == 'PushToCloud': arn = "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs" if label == 'APIGatewayAdmin': arn = "arn:aws:iam::aws:policy/AmazonAPIGatewayAdministrator" client.attach_role_policy(RoleName=BLOCKADE_ROLE, PolicyArn=arn) client.attach_group_policy(GroupName=BLOCKADE_GROUP, PolicyArn=arn) logger.info("[#] Blockade policies successfully attached") logger.debug("[#] Adding %s to %s group" % (BLOCKADE_USER, BLOCKADE_GROUP)) response = client.add_user_to_group( GroupName=BLOCKADE_GROUP, UserName=BLOCKADE_USER ) logger.info("[#] %s user is part of %s group" % (BLOCKADE_USER, BLOCKADE_GROUP)) return True
def get_schema(self, db_name, table_name): """ Parameters: - db_name - table_name """ self.send_get_schema(db_name, table_name) return self.recv_get_schema()
Parameters: - db_name - table_name
Below is the the instruction that describes the task: ### Input: Parameters: - db_name - table_name ### Response: def get_schema(self, db_name, table_name): """ Parameters: - db_name - table_name """ self.send_get_schema(db_name, table_name) return self.recv_get_schema()
def get_composition_search_session(self): """Gets a composition search session. return: (osid.repository.CompositionSearchSession) - a ``CompositionSearchSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_search()`` is ``true``.* """ if not self.supports_composition_search(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CompositionSearchSession(runtime=self._runtime)
Gets a composition search session. return: (osid.repository.CompositionSearchSession) - a ``CompositionSearchSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_search()`` is ``true``.*
Below is the the instruction that describes the task: ### Input: Gets a composition search session. return: (osid.repository.CompositionSearchSession) - a ``CompositionSearchSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_search()`` is ``true``.* ### Response: def get_composition_search_session(self): """Gets a composition search session. return: (osid.repository.CompositionSearchSession) - a ``CompositionSearchSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_composition_search()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_composition_search()`` is ``true``.* """ if not self.supports_composition_search(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CompositionSearchSession(runtime=self._runtime)
def Create(alias=None,location=None,session=None): """Claims a new network within a given account. https://www.ctl.io/api-docs/v2/#networks-claim-network Returns operation id and link to check status """ if not alias: alias = clc.v2.Account.GetAlias(session=session) if not location: location = clc.v2.Account.GetLocation(session=session) return clc.v2.Requests( clc.v2.API.Call('POST','/v2-experimental/networks/%s/%s/claim' % (alias, location),session=session), alias=alias, session=session)
Claims a new network within a given account. https://www.ctl.io/api-docs/v2/#networks-claim-network Returns operation id and link to check status
Below is the the instruction that describes the task: ### Input: Claims a new network within a given account. https://www.ctl.io/api-docs/v2/#networks-claim-network Returns operation id and link to check status ### Response: def Create(alias=None,location=None,session=None): """Claims a new network within a given account. https://www.ctl.io/api-docs/v2/#networks-claim-network Returns operation id and link to check status """ if not alias: alias = clc.v2.Account.GetAlias(session=session) if not location: location = clc.v2.Account.GetLocation(session=session) return clc.v2.Requests( clc.v2.API.Call('POST','/v2-experimental/networks/%s/%s/claim' % (alias, location),session=session), alias=alias, session=session)
def go_inactive(self, dt=datetime.utcnow().replace(tzinfo=pytz.UTC)): """Make the configuration object inactive. Keyword arguments: dt -- datetime of the moment when the configuration go inactive """ self.end = dt self.save()
Make the configuration object inactive. Keyword arguments: dt -- datetime of the moment when the configuration go inactive
Below is the the instruction that describes the task: ### Input: Make the configuration object inactive. Keyword arguments: dt -- datetime of the moment when the configuration go inactive ### Response: def go_inactive(self, dt=datetime.utcnow().replace(tzinfo=pytz.UTC)): """Make the configuration object inactive. Keyword arguments: dt -- datetime of the moment when the configuration go inactive """ self.end = dt self.save()
def _ParseValueData(self, parser_mediator, registry_key, registry_value): """Extracts event objects from a Explorer ProgramsCache value data. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. Raises: ParseError: if the value data could not be parsed. """ value_data = registry_value.data value_data_size = len(value_data) if value_data_size < 4: return header_map = self._GetDataTypeMap('programscache_header') try: header = self._ReadStructureFromByteStream( value_data, 0, header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse header value with error: {0!s}'.format( exception)) return if header.format_version not in (1, 9, 12, 19): parser_mediator.ProduceExtractionWarning( 'unsupported format version: {0:d}'.format(header.format_version)) return known_folder_identifier = None if header.format_version == 1: value_data_offset = 8 elif header.format_version == 9: value_data_offset = 6 elif header.format_version in (12, 19): known_folder_identifier = uuid.UUID(bytes_le=value_data[4:20]) value_data_offset = 20 entry_header_map = self._GetDataTypeMap('programscache_entry_header') entry_footer_map = self._GetDataTypeMap('programscache_entry_footer') sentinel = 0 if header.format_version != 9: try: entry_footer = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse sentinel at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) return value_data_offset += entry_footer_map.GetByteSize() sentinel = entry_footer.sentinel link_targets = [] while sentinel in (0x00, 0x01): if value_data_offset >= value_data_size: break try: entry_header = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse entry header at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) break value_data_offset += entry_header_map.GetByteSize() display_name = '{0:s} {1:s}'.format( registry_key.path, registry_value.name) shell_items_parser = shell_items.ShellItemsParser(display_name) shell_items_parser.ParseByteStream( parser_mediator, value_data[value_data_offset:], codepage=parser_mediator.codepage) link_target = shell_items_parser.CopyToPath() link_targets.append(link_target) value_data_offset += entry_header.data_size try: entry_footer = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse entry footer at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) return value_data_offset += entry_footer_map.GetByteSize() sentinel = entry_footer.sentinel # TODO: recover remaining items. if known_folder_identifier: known_folder_identifier = '{0!s}'.format(known_folder_identifier) event_data = windows_events.WindowsRegistryListEventData() event_data.key_path = registry_key.path event_data.known_folder_identifier = known_folder_identifier event_data.list_name = registry_value.name event_data.list_values = ' '.join([ '{0:d}: {1:s}'.format(index, link_target) for index, link_target in enumerate(link_targets)]) event_data.value_name = registry_value.name event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts event objects from a Explorer ProgramsCache value data. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. Raises: ParseError: if the value data could not be parsed.
Below is the the instruction that describes the task: ### Input: Extracts event objects from a Explorer ProgramsCache value data. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. Raises: ParseError: if the value data could not be parsed. ### Response: def _ParseValueData(self, parser_mediator, registry_key, registry_value): """Extracts event objects from a Explorer ProgramsCache value data. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. registry_value (dfwinreg.WinRegistryValue): Windows Registry value. Raises: ParseError: if the value data could not be parsed. """ value_data = registry_value.data value_data_size = len(value_data) if value_data_size < 4: return header_map = self._GetDataTypeMap('programscache_header') try: header = self._ReadStructureFromByteStream( value_data, 0, header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse header value with error: {0!s}'.format( exception)) return if header.format_version not in (1, 9, 12, 19): parser_mediator.ProduceExtractionWarning( 'unsupported format version: {0:d}'.format(header.format_version)) return known_folder_identifier = None if header.format_version == 1: value_data_offset = 8 elif header.format_version == 9: value_data_offset = 6 elif header.format_version in (12, 19): known_folder_identifier = uuid.UUID(bytes_le=value_data[4:20]) value_data_offset = 20 entry_header_map = self._GetDataTypeMap('programscache_entry_header') entry_footer_map = self._GetDataTypeMap('programscache_entry_footer') sentinel = 0 if header.format_version != 9: try: entry_footer = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse sentinel at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) return value_data_offset += entry_footer_map.GetByteSize() sentinel = entry_footer.sentinel link_targets = [] while sentinel in (0x00, 0x01): if value_data_offset >= value_data_size: break try: entry_header = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_header_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse entry header at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) break value_data_offset += entry_header_map.GetByteSize() display_name = '{0:s} {1:s}'.format( registry_key.path, registry_value.name) shell_items_parser = shell_items.ShellItemsParser(display_name) shell_items_parser.ParseByteStream( parser_mediator, value_data[value_data_offset:], codepage=parser_mediator.codepage) link_target = shell_items_parser.CopyToPath() link_targets.append(link_target) value_data_offset += entry_header.data_size try: entry_footer = self._ReadStructureFromByteStream( value_data[value_data_offset:], value_data_offset, entry_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse entry footer at offset: 0x{0:08x} ' 'with error: {1!s}').format(value_data_offset, exception)) return value_data_offset += entry_footer_map.GetByteSize() sentinel = entry_footer.sentinel # TODO: recover remaining items. if known_folder_identifier: known_folder_identifier = '{0!s}'.format(known_folder_identifier) event_data = windows_events.WindowsRegistryListEventData() event_data.key_path = registry_key.path event_data.known_folder_identifier = known_folder_identifier event_data.list_name = registry_value.name event_data.list_values = ' '.join([ '{0:d}: {1:s}'.format(index, link_target) for index, link_target in enumerate(link_targets)]) event_data.value_name = registry_value.name event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
def cachestr_repr(val): """ Representation of an object as a cache string. """ try: memview = memoryview(val) return memview.tobytes() except Exception: try: return to_json(val) except Exception: # SUPER HACK if repr(val.__class__) == "<class 'ibeis.control.IBEISControl.IBEISController'>": return val.get_dbname()
Representation of an object as a cache string.
Below is the the instruction that describes the task: ### Input: Representation of an object as a cache string. ### Response: def cachestr_repr(val): """ Representation of an object as a cache string. """ try: memview = memoryview(val) return memview.tobytes() except Exception: try: return to_json(val) except Exception: # SUPER HACK if repr(val.__class__) == "<class 'ibeis.control.IBEISControl.IBEISController'>": return val.get_dbname()
def init_user_ns(self): """Initialize all user-visible namespaces to their minimum defaults. Certain history lists are also initialized here, as they effectively act as user namespaces. Notes ----- All data structures here are only filled in, they are NOT reset by this method. If they were not empty before, data will simply be added to therm. """ # This function works in two parts: first we put a few things in # user_ns, and we sync that contents into user_ns_hidden so that these # initial variables aren't shown by %who. After the sync, we add the # rest of what we *do* want the user to see with %who even on a new # session (probably nothing, so theye really only see their own stuff) # The user dict must *always* have a __builtin__ reference to the # Python standard __builtin__ namespace, which must be imported. # This is so that certain operations in prompt evaluation can be # reliably executed with builtins. Note that we can NOT use # __builtins__ (note the 's'), because that can either be a dict or a # module, and can even mutate at runtime, depending on the context # (Python makes no guarantees on it). In contrast, __builtin__ is # always a module object, though it must be explicitly imported. # For more details: # http://mail.python.org/pipermail/python-dev/2001-April/014068.html ns = dict() # Put 'help' in the user namespace try: from site import _Helper ns['help'] = _Helper() except ImportError: warn('help() not available - check site.py') # make global variables for user access to the histories ns['_ih'] = self.history_manager.input_hist_parsed ns['_oh'] = self.history_manager.output_hist ns['_dh'] = self.history_manager.dir_hist ns['_sh'] = shadowns # user aliases to input and output histories. These shouldn't show up # in %who, as they can have very large reprs. ns['In'] = self.history_manager.input_hist_parsed ns['Out'] = self.history_manager.output_hist # Store myself as the public api!!! ns['get_ipython'] = self.get_ipython ns['exit'] = self.exiter ns['quit'] = self.exiter # Sync what we've added so far to user_ns_hidden so these aren't seen # by %who self.user_ns_hidden.update(ns) # Anything put into ns now would show up in %who. Think twice before # putting anything here, as we really want %who to show the user their # stuff, not our variables. # Finally, update the real user's namespace self.user_ns.update(ns)
Initialize all user-visible namespaces to their minimum defaults. Certain history lists are also initialized here, as they effectively act as user namespaces. Notes ----- All data structures here are only filled in, they are NOT reset by this method. If they were not empty before, data will simply be added to therm.
Below is the the instruction that describes the task: ### Input: Initialize all user-visible namespaces to their minimum defaults. Certain history lists are also initialized here, as they effectively act as user namespaces. Notes ----- All data structures here are only filled in, they are NOT reset by this method. If they were not empty before, data will simply be added to therm. ### Response: def init_user_ns(self): """Initialize all user-visible namespaces to their minimum defaults. Certain history lists are also initialized here, as they effectively act as user namespaces. Notes ----- All data structures here are only filled in, they are NOT reset by this method. If they were not empty before, data will simply be added to therm. """ # This function works in two parts: first we put a few things in # user_ns, and we sync that contents into user_ns_hidden so that these # initial variables aren't shown by %who. After the sync, we add the # rest of what we *do* want the user to see with %who even on a new # session (probably nothing, so theye really only see their own stuff) # The user dict must *always* have a __builtin__ reference to the # Python standard __builtin__ namespace, which must be imported. # This is so that certain operations in prompt evaluation can be # reliably executed with builtins. Note that we can NOT use # __builtins__ (note the 's'), because that can either be a dict or a # module, and can even mutate at runtime, depending on the context # (Python makes no guarantees on it). In contrast, __builtin__ is # always a module object, though it must be explicitly imported. # For more details: # http://mail.python.org/pipermail/python-dev/2001-April/014068.html ns = dict() # Put 'help' in the user namespace try: from site import _Helper ns['help'] = _Helper() except ImportError: warn('help() not available - check site.py') # make global variables for user access to the histories ns['_ih'] = self.history_manager.input_hist_parsed ns['_oh'] = self.history_manager.output_hist ns['_dh'] = self.history_manager.dir_hist ns['_sh'] = shadowns # user aliases to input and output histories. These shouldn't show up # in %who, as they can have very large reprs. ns['In'] = self.history_manager.input_hist_parsed ns['Out'] = self.history_manager.output_hist # Store myself as the public api!!! ns['get_ipython'] = self.get_ipython ns['exit'] = self.exiter ns['quit'] = self.exiter # Sync what we've added so far to user_ns_hidden so these aren't seen # by %who self.user_ns_hidden.update(ns) # Anything put into ns now would show up in %who. Think twice before # putting anything here, as we really want %who to show the user their # stuff, not our variables. # Finally, update the real user's namespace self.user_ns.update(ns)
def absent(name, service_name, auth=None, **kwargs): ''' Ensure an endpoint does not exists name Interface name url URL of the endpoint service_name Service name or ID region The region name to assign the endpoint ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} __salt__['keystoneng.setup_clouds'](auth) success, val = _, endpoint = _common(ret, name, service_name, kwargs) if not success: return val if endpoint: if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': endpoint.id} ret['comment'] = 'Endpoint will be deleted.' return ret __salt__['keystoneng.endpoint_delete'](id=endpoint.id) ret['changes']['id'] = endpoint.id ret['comment'] = 'Deleted endpoint' return ret
Ensure an endpoint does not exists name Interface name url URL of the endpoint service_name Service name or ID region The region name to assign the endpoint
Below is the the instruction that describes the task: ### Input: Ensure an endpoint does not exists name Interface name url URL of the endpoint service_name Service name or ID region The region name to assign the endpoint ### Response: def absent(name, service_name, auth=None, **kwargs): ''' Ensure an endpoint does not exists name Interface name url URL of the endpoint service_name Service name or ID region The region name to assign the endpoint ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} __salt__['keystoneng.setup_clouds'](auth) success, val = _, endpoint = _common(ret, name, service_name, kwargs) if not success: return val if endpoint: if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': endpoint.id} ret['comment'] = 'Endpoint will be deleted.' return ret __salt__['keystoneng.endpoint_delete'](id=endpoint.id) ret['changes']['id'] = endpoint.id ret['comment'] = 'Deleted endpoint' return ret
def mouseMoveEvent(self, event): """ Handle the mouse move event for a drag operation. """ #if event.buttons() & Qt.LeftButton and self._drag_origin is not None: #dist = (event.pos() - self._drag_origin).manhattanLength() #if dist >= QApplication.startDragDistance(): #self.do_drag(event.widget()) #self._drag_origin = None #return # Don't returns widget = self.widget type(widget).mouseMoveEvent(widget, event)
Handle the mouse move event for a drag operation.
Below is the the instruction that describes the task: ### Input: Handle the mouse move event for a drag operation. ### Response: def mouseMoveEvent(self, event): """ Handle the mouse move event for a drag operation. """ #if event.buttons() & Qt.LeftButton and self._drag_origin is not None: #dist = (event.pos() - self._drag_origin).manhattanLength() #if dist >= QApplication.startDragDistance(): #self.do_drag(event.widget()) #self._drag_origin = None #return # Don't returns widget = self.widget type(widget).mouseMoveEvent(widget, event)
def _get_components(self): """ Subclasses may override this method. """ return tuple([self._getitem__components(i) for i in range(self._len__components())])
Subclasses may override this method.
Below is the the instruction that describes the task: ### Input: Subclasses may override this method. ### Response: def _get_components(self): """ Subclasses may override this method. """ return tuple([self._getitem__components(i) for i in range(self._len__components())])
def lines_from_tree(tree, nodes_and_set:bool=False) -> iter: """Yield lines of bubble describing given BubbleTree""" NODE = 'NODE\t{}' INCL = 'IN\t{}\t{}' EDGE = 'EDGE\t{}\t{}\t1.0' SET = 'SET\t{}' if nodes_and_set: for node in tree.nodes(): yield NODE.format(node) for node in tree.powernodes(): yield SET.format(node) for node, includeds in tree.inclusions.items(): for included in includeds: yield INCL.format(included, node) for node, succs in tree.edges.items(): for succ in succs: yield EDGE.format(node, succ)
Yield lines of bubble describing given BubbleTree
Below is the the instruction that describes the task: ### Input: Yield lines of bubble describing given BubbleTree ### Response: def lines_from_tree(tree, nodes_and_set:bool=False) -> iter: """Yield lines of bubble describing given BubbleTree""" NODE = 'NODE\t{}' INCL = 'IN\t{}\t{}' EDGE = 'EDGE\t{}\t{}\t1.0' SET = 'SET\t{}' if nodes_and_set: for node in tree.nodes(): yield NODE.format(node) for node in tree.powernodes(): yield SET.format(node) for node, includeds in tree.inclusions.items(): for included in includeds: yield INCL.format(included, node) for node, succs in tree.edges.items(): for succ in succs: yield EDGE.format(node, succ)
def progress(iterator, enum=False, length=None): """ A helper utility to display a progress bar when iterating over a collection of a fixed length or a generator (with a declared length). If enum=True, then equivalent to enumerate with a progress bar. """ progress = ProgressBar() length = len(iterator) if length is None else length gen = enumerate(iterator) while True: i, val = next(gen) progress((i+1.0)/length * 100) if enum: yield i, val else: yield val
A helper utility to display a progress bar when iterating over a collection of a fixed length or a generator (with a declared length). If enum=True, then equivalent to enumerate with a progress bar.
Below is the the instruction that describes the task: ### Input: A helper utility to display a progress bar when iterating over a collection of a fixed length or a generator (with a declared length). If enum=True, then equivalent to enumerate with a progress bar. ### Response: def progress(iterator, enum=False, length=None): """ A helper utility to display a progress bar when iterating over a collection of a fixed length or a generator (with a declared length). If enum=True, then equivalent to enumerate with a progress bar. """ progress = ProgressBar() length = len(iterator) if length is None else length gen = enumerate(iterator) while True: i, val = next(gen) progress((i+1.0)/length * 100) if enum: yield i, val else: yield val
def mysql_aes_decrypt(encrypted_val, key): """Mysql AES decrypt value with secret key. :param encrypted_val: Encrypted value. :param key: The AES key. :returns: The AES value decrypted. """ assert isinstance(encrypted_val, binary_type) \ or isinstance(encrypted_val, text_type) assert isinstance(key, binary_type) or isinstance(key, text_type) k = _mysql_aes_key(_to_binary(key)) d = _mysql_aes_engine(_to_binary(k)).decryptor() return _mysql_aes_unpad(d.update(_to_binary(encrypted_val)) + d.finalize())
Mysql AES decrypt value with secret key. :param encrypted_val: Encrypted value. :param key: The AES key. :returns: The AES value decrypted.
Below is the the instruction that describes the task: ### Input: Mysql AES decrypt value with secret key. :param encrypted_val: Encrypted value. :param key: The AES key. :returns: The AES value decrypted. ### Response: def mysql_aes_decrypt(encrypted_val, key): """Mysql AES decrypt value with secret key. :param encrypted_val: Encrypted value. :param key: The AES key. :returns: The AES value decrypted. """ assert isinstance(encrypted_val, binary_type) \ or isinstance(encrypted_val, text_type) assert isinstance(key, binary_type) or isinstance(key, text_type) k = _mysql_aes_key(_to_binary(key)) d = _mysql_aes_engine(_to_binary(k)).decryptor() return _mysql_aes_unpad(d.update(_to_binary(encrypted_val)) + d.finalize())
def create_widget(self): """ Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute. """ self.widget = SubElement(self.parent_widget(), self.declaration.tag)
Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute.
Below is the the instruction that describes the task: ### Input: Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute. ### Response: def create_widget(self): """ Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute. """ self.widget = SubElement(self.parent_widget(), self.declaration.tag)
def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ result = self._source.get(self._source_type, deepcopy(query), context) LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source)) LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result)) for sink in self._before_transform: sink.put(result, context) LOGGER.info("Converting result \"{result}\" to request type".format(result=result)) result = self._transform(data=result, context=context) LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result)) for sink in self._after_transform: sink.put(result, context) return result
Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object.
Below is the the instruction that describes the task: ### Input: Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. ### Response: def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ result = self._source.get(self._source_type, deepcopy(query), context) LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source)) LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result)) for sink in self._before_transform: sink.put(result, context) LOGGER.info("Converting result \"{result}\" to request type".format(result=result)) result = self._transform(data=result, context=context) LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result)) for sink in self._after_transform: sink.put(result, context) return result
def trips(self, val): """ Update ``self._trips_i`` if ``self.trips`` changes. """ self._trips = val if val is not None and not val.empty: self._trips_i = self._trips.set_index("trip_id") else: self._trips_i = None
Update ``self._trips_i`` if ``self.trips`` changes.
Below is the the instruction that describes the task: ### Input: Update ``self._trips_i`` if ``self.trips`` changes. ### Response: def trips(self, val): """ Update ``self._trips_i`` if ``self.trips`` changes. """ self._trips = val if val is not None and not val.empty: self._trips_i = self._trips.set_index("trip_id") else: self._trips_i = None
def compute(self, activeColumns, basalInput, apicalInput=(), basalGrowthCandidates=None, apicalGrowthCandidates=None, learn=True): """ Perform one timestep. Use the basal and apical input to form a set of predictions, then activate the specified columns, then learn. @param activeColumns (numpy array) List of active columns @param basalInput (numpy array) List of active input bits for the basal dendrite segments @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param basalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new basal synapses to. If None, the basalInput is assumed to be growth candidates. @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to If None, the apicalInput is assumed to be growth candidates. @param learn (bool) Whether to grow / reinforce / punish synapses """ activeColumns = np.asarray(activeColumns) basalInput = np.asarray(basalInput) apicalInput = np.asarray(apicalInput) if basalGrowthCandidates is None: basalGrowthCandidates = basalInput basalGrowthCandidates = np.asarray(basalGrowthCandidates) if apicalGrowthCandidates is None: apicalGrowthCandidates = apicalInput apicalGrowthCandidates = np.asarray(apicalGrowthCandidates) self.depolarizeCells(basalInput, apicalInput, learn) self.activateCells(activeColumns, basalInput, apicalInput, basalGrowthCandidates, apicalGrowthCandidates, learn)
Perform one timestep. Use the basal and apical input to form a set of predictions, then activate the specified columns, then learn. @param activeColumns (numpy array) List of active columns @param basalInput (numpy array) List of active input bits for the basal dendrite segments @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param basalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new basal synapses to. If None, the basalInput is assumed to be growth candidates. @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to If None, the apicalInput is assumed to be growth candidates. @param learn (bool) Whether to grow / reinforce / punish synapses
Below is the the instruction that describes the task: ### Input: Perform one timestep. Use the basal and apical input to form a set of predictions, then activate the specified columns, then learn. @param activeColumns (numpy array) List of active columns @param basalInput (numpy array) List of active input bits for the basal dendrite segments @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param basalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new basal synapses to. If None, the basalInput is assumed to be growth candidates. @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to If None, the apicalInput is assumed to be growth candidates. @param learn (bool) Whether to grow / reinforce / punish synapses ### Response: def compute(self, activeColumns, basalInput, apicalInput=(), basalGrowthCandidates=None, apicalGrowthCandidates=None, learn=True): """ Perform one timestep. Use the basal and apical input to form a set of predictions, then activate the specified columns, then learn. @param activeColumns (numpy array) List of active columns @param basalInput (numpy array) List of active input bits for the basal dendrite segments @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param basalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new basal synapses to. If None, the basalInput is assumed to be growth candidates. @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to If None, the apicalInput is assumed to be growth candidates. @param learn (bool) Whether to grow / reinforce / punish synapses """ activeColumns = np.asarray(activeColumns) basalInput = np.asarray(basalInput) apicalInput = np.asarray(apicalInput) if basalGrowthCandidates is None: basalGrowthCandidates = basalInput basalGrowthCandidates = np.asarray(basalGrowthCandidates) if apicalGrowthCandidates is None: apicalGrowthCandidates = apicalInput apicalGrowthCandidates = np.asarray(apicalGrowthCandidates) self.depolarizeCells(basalInput, apicalInput, learn) self.activateCells(activeColumns, basalInput, apicalInput, basalGrowthCandidates, apicalGrowthCandidates, learn)
def call(self, itemMethod): """ Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked. """ item = itemMethod.im_self method = itemMethod.im_func.func_name return self.batchController.getProcess().addCallback( CallItemMethod(storepath=item.store.dbdir, storeid=item.storeID, method=method).do)
Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked.
Below is the the instruction that describes the task: ### Input: Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked. ### Response: def call(self, itemMethod): """ Invoke the given bound item method in the batch process. Return a Deferred which fires when the method has been invoked. """ item = itemMethod.im_self method = itemMethod.im_func.func_name return self.batchController.getProcess().addCallback( CallItemMethod(storepath=item.store.dbdir, storeid=item.storeID, method=method).do)
def calc_A(Ys): '''Return the matrix A from a list of Y vectors.''' return sum(np.dot(np.reshape(Y, (3,1)), np.reshape(Y, (1, 3))) for Y in Ys)
Return the matrix A from a list of Y vectors.
Below is the the instruction that describes the task: ### Input: Return the matrix A from a list of Y vectors. ### Response: def calc_A(Ys): '''Return the matrix A from a list of Y vectors.''' return sum(np.dot(np.reshape(Y, (3,1)), np.reshape(Y, (1, 3))) for Y in Ys)
def create_contract(self, price=0, address=None, caller=None, balance=0, init=None, gas=None): """ Create a contract account. Sends a transaction to initialize the contract :param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible. :param balance: the initial balance of the account in Wei :param init: the initialization code of the contract The way that the Solidity compiler expects the constructor arguments to be passed is by appending the arguments to the byte code produced by the Solidity compiler. The arguments are formatted as defined in the Ethereum ABI2. The arguments are then copied from the init byte array to the EVM memory through the CODECOPY opcode with appropriate values on the stack. This is done when the byte code in the init byte array is actually run on the network. """ expected_address = self.create_account(self.new_address(sender=caller)) if address is None: address = expected_address elif caller is not None and address != expected_address: raise EthereumError(f"Error: contract created from address {hex(caller)} with nonce {self.get_nonce(caller)} was expected to be at address {hex(expected_address)}, but create_contract was called with address={hex(address)}") self.start_transaction('CREATE', address, price, init, caller, balance, gas=gas) self._process_pending_transaction() return address
Create a contract account. Sends a transaction to initialize the contract :param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible. :param balance: the initial balance of the account in Wei :param init: the initialization code of the contract The way that the Solidity compiler expects the constructor arguments to be passed is by appending the arguments to the byte code produced by the Solidity compiler. The arguments are formatted as defined in the Ethereum ABI2. The arguments are then copied from the init byte array to the EVM memory through the CODECOPY opcode with appropriate values on the stack. This is done when the byte code in the init byte array is actually run on the network.
Below is the the instruction that describes the task: ### Input: Create a contract account. Sends a transaction to initialize the contract :param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible. :param balance: the initial balance of the account in Wei :param init: the initialization code of the contract The way that the Solidity compiler expects the constructor arguments to be passed is by appending the arguments to the byte code produced by the Solidity compiler. The arguments are formatted as defined in the Ethereum ABI2. The arguments are then copied from the init byte array to the EVM memory through the CODECOPY opcode with appropriate values on the stack. This is done when the byte code in the init byte array is actually run on the network. ### Response: def create_contract(self, price=0, address=None, caller=None, balance=0, init=None, gas=None): """ Create a contract account. Sends a transaction to initialize the contract :param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible. :param balance: the initial balance of the account in Wei :param init: the initialization code of the contract The way that the Solidity compiler expects the constructor arguments to be passed is by appending the arguments to the byte code produced by the Solidity compiler. The arguments are formatted as defined in the Ethereum ABI2. The arguments are then copied from the init byte array to the EVM memory through the CODECOPY opcode with appropriate values on the stack. This is done when the byte code in the init byte array is actually run on the network. """ expected_address = self.create_account(self.new_address(sender=caller)) if address is None: address = expected_address elif caller is not None and address != expected_address: raise EthereumError(f"Error: contract created from address {hex(caller)} with nonce {self.get_nonce(caller)} was expected to be at address {hex(expected_address)}, but create_contract was called with address={hex(address)}") self.start_transaction('CREATE', address, price, init, caller, balance, gas=gas) self._process_pending_transaction() return address
async def random_connection(self): """Connect to random agent from current :attr:`connections`. :returns: :class:`aiomas.Proxy` object for the connected agent. """ addr = choice(list(self._connections.keys())) return await self.env.connect(addr)
Connect to random agent from current :attr:`connections`. :returns: :class:`aiomas.Proxy` object for the connected agent.
Below is the the instruction that describes the task: ### Input: Connect to random agent from current :attr:`connections`. :returns: :class:`aiomas.Proxy` object for the connected agent. ### Response: async def random_connection(self): """Connect to random agent from current :attr:`connections`. :returns: :class:`aiomas.Proxy` object for the connected agent. """ addr = choice(list(self._connections.keys())) return await self.env.connect(addr)
def gradient(self): """The gradient operator.""" # First we store the functional in a variable functional = self # The class corresponding to the gradient operator. class MyGradientOperator(odl.Operator): """Class implementing the gradient operator.""" def __init__(self): """Initialize a new instance.""" super(MyGradientOperator, self).__init__( domain=functional.domain, range=functional.domain) def _call(self, x): """Evaluate the gradient.""" # Here we can access the store functional from a few lines # above return 2.0 * x + functional.y return MyGradientOperator()
The gradient operator.
Below is the the instruction that describes the task: ### Input: The gradient operator. ### Response: def gradient(self): """The gradient operator.""" # First we store the functional in a variable functional = self # The class corresponding to the gradient operator. class MyGradientOperator(odl.Operator): """Class implementing the gradient operator.""" def __init__(self): """Initialize a new instance.""" super(MyGradientOperator, self).__init__( domain=functional.domain, range=functional.domain) def _call(self, x): """Evaluate the gradient.""" # Here we can access the store functional from a few lines # above return 2.0 * x + functional.y return MyGradientOperator()
def SystemFee(self): """ Get the system fee. Returns: Fixed8: currently fixed to 0. """ tx_name = TransactionType.ToName(self.Type) return Fixed8.FromDecimal(settings.ALL_FEES.get(tx_name, 0))
Get the system fee. Returns: Fixed8: currently fixed to 0.
Below is the the instruction that describes the task: ### Input: Get the system fee. Returns: Fixed8: currently fixed to 0. ### Response: def SystemFee(self): """ Get the system fee. Returns: Fixed8: currently fixed to 0. """ tx_name = TransactionType.ToName(self.Type) return Fixed8.FromDecimal(settings.ALL_FEES.get(tx_name, 0))
def _analyze_pages_with_partial_read(self): """! @brief Estimate how many pages are the same by reading data. Pages are analyzed by reading the first 32 bytes and comparing with data to be programmed. """ # Quickly estimate how many pages are the same as current flash contents. # Init the flash algo in case it is required in order to access the flash memory. self._enable_read_access() for page in self.page_list: # Analyze pages that haven't been analyzed yet if page.same is None: size = min(PAGE_ESTIMATE_SIZE, len(page.data)) data = self.flash.target.read_memory_block8(page.addr, size) page_same = same(data, page.data[0:size]) if page_same is False: page.same = False else: # Save the data read for estimation so we don't need to read it again. page.cached_estimate_data = data
! @brief Estimate how many pages are the same by reading data. Pages are analyzed by reading the first 32 bytes and comparing with data to be programmed.
Below is the the instruction that describes the task: ### Input: ! @brief Estimate how many pages are the same by reading data. Pages are analyzed by reading the first 32 bytes and comparing with data to be programmed. ### Response: def _analyze_pages_with_partial_read(self): """! @brief Estimate how many pages are the same by reading data. Pages are analyzed by reading the first 32 bytes and comparing with data to be programmed. """ # Quickly estimate how many pages are the same as current flash contents. # Init the flash algo in case it is required in order to access the flash memory. self._enable_read_access() for page in self.page_list: # Analyze pages that haven't been analyzed yet if page.same is None: size = min(PAGE_ESTIMATE_SIZE, len(page.data)) data = self.flash.target.read_memory_block8(page.addr, size) page_same = same(data, page.data[0:size]) if page_same is False: page.same = False else: # Save the data read for estimation so we don't need to read it again. page.cached_estimate_data = data
def align_options(options): """ Indents flags and aligns help texts. """ l = 0 for opt in options: if len(opt[0]) > l: l = len(opt[0]) s = [] for opt in options: s.append(' {0}{1} {2}'.format(opt[0], ' ' * (l - len(opt[0])), opt[1])) return '\n'.join(s)
Indents flags and aligns help texts.
Below is the the instruction that describes the task: ### Input: Indents flags and aligns help texts. ### Response: def align_options(options): """ Indents flags and aligns help texts. """ l = 0 for opt in options: if len(opt[0]) > l: l = len(opt[0]) s = [] for opt in options: s.append(' {0}{1} {2}'.format(opt[0], ' ' * (l - len(opt[0])), opt[1])) return '\n'.join(s)
def _refresh(self): """Background refreshing thread.""" while not self._stopevent.isSet(): line = self._serial.readline() #this is for python2/python3 compatibility. Is there a better way? try: line = line.encode().decode('utf-8') except AttributeError: line = line.decode('utf-8') if LaCrosseSensor.re_reading.match(line): sensor = LaCrosseSensor(line) self.sensors[sensor.sensorid] = sensor if self._callback: self._callback(sensor, self._callback_data) if sensor.sensorid in self._registry: for cbs in self._registry[sensor.sensorid]: cbs[0](sensor, cbs[1])
Background refreshing thread.
Below is the the instruction that describes the task: ### Input: Background refreshing thread. ### Response: def _refresh(self): """Background refreshing thread.""" while not self._stopevent.isSet(): line = self._serial.readline() #this is for python2/python3 compatibility. Is there a better way? try: line = line.encode().decode('utf-8') except AttributeError: line = line.decode('utf-8') if LaCrosseSensor.re_reading.match(line): sensor = LaCrosseSensor(line) self.sensors[sensor.sensorid] = sensor if self._callback: self._callback(sensor, self._callback_data) if sensor.sensorid in self._registry: for cbs in self._registry[sensor.sensorid]: cbs[0](sensor, cbs[1])
def load_all_methods(self): r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters. ''' methods, methods_P = [], [] Tmins, Tmaxs = [], [] if self.CASRN in _VDISaturationDict: methods.append(VDI_TABULAR) Ts, props = VDI_tabular_data(self.CASRN, 'Mu (g)') self.VDI_Tmin = Ts[0] self.VDI_Tmax = Ts[-1] self.tabular_data[VDI_TABULAR] = (Ts, props) Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax) if has_CoolProp and self.CASRN in coolprop_dict: methods.append(COOLPROP); methods_P.append(COOLPROP) self.CP_f = coolprop_fluids[self.CASRN] Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tmax) if self.CASRN in Perrys2_312.index: methods.append(DIPPR_PERRY_8E) _, C1, C2, C3, C4, self.Perrys2_312_Tmin, self.Perrys2_312_Tmax = _Perrys2_312_values[Perrys2_312.index.get_loc(self.CASRN)].tolist() self.Perrys2_312_coeffs = [C1, C2, C3, C4] Tmins.append(self.Perrys2_312_Tmin); Tmaxs.append(self.Perrys2_312_Tmax) if self.CASRN in VDI_PPDS_8.index: methods.append(VDI_PPDS) self.VDI_PPDS_coeffs = _VDI_PPDS_8_values[VDI_PPDS_8.index.get_loc(self.CASRN)].tolist()[1:] self.VDI_PPDS_coeffs.reverse() # in format for horner's scheme if all([self.Tc, self.Pc, self.MW]): methods.append(GHARAGHEIZI) methods.append(YOON_THODOS) methods.append(STIEL_THODOS) Tmins.append(0); Tmaxs.append(5E3) # Intelligently set limit # GHARAGHEIZI turns nonsesical at ~15 K, YOON_THODOS fine to 0 K, # same as STIEL_THODOS if all([self.Tc, self.Pc, self.Zc, self.MW]): methods.append(LUCAS_GAS) Tmins.append(0); Tmaxs.append(1E3) self.all_methods = set(methods) self.all_methods_P = set(methods_P) if Tmins and Tmaxs: self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters.
Below is the the instruction that describes the task: ### Input: r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters. ### Response: def load_all_methods(self): r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters. ''' methods, methods_P = [], [] Tmins, Tmaxs = [], [] if self.CASRN in _VDISaturationDict: methods.append(VDI_TABULAR) Ts, props = VDI_tabular_data(self.CASRN, 'Mu (g)') self.VDI_Tmin = Ts[0] self.VDI_Tmax = Ts[-1] self.tabular_data[VDI_TABULAR] = (Ts, props) Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax) if has_CoolProp and self.CASRN in coolprop_dict: methods.append(COOLPROP); methods_P.append(COOLPROP) self.CP_f = coolprop_fluids[self.CASRN] Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tmax) if self.CASRN in Perrys2_312.index: methods.append(DIPPR_PERRY_8E) _, C1, C2, C3, C4, self.Perrys2_312_Tmin, self.Perrys2_312_Tmax = _Perrys2_312_values[Perrys2_312.index.get_loc(self.CASRN)].tolist() self.Perrys2_312_coeffs = [C1, C2, C3, C4] Tmins.append(self.Perrys2_312_Tmin); Tmaxs.append(self.Perrys2_312_Tmax) if self.CASRN in VDI_PPDS_8.index: methods.append(VDI_PPDS) self.VDI_PPDS_coeffs = _VDI_PPDS_8_values[VDI_PPDS_8.index.get_loc(self.CASRN)].tolist()[1:] self.VDI_PPDS_coeffs.reverse() # in format for horner's scheme if all([self.Tc, self.Pc, self.MW]): methods.append(GHARAGHEIZI) methods.append(YOON_THODOS) methods.append(STIEL_THODOS) Tmins.append(0); Tmaxs.append(5E3) # Intelligently set limit # GHARAGHEIZI turns nonsesical at ~15 K, YOON_THODOS fine to 0 K, # same as STIEL_THODOS if all([self.Tc, self.Pc, self.Zc, self.MW]): methods.append(LUCAS_GAS) Tmins.append(0); Tmaxs.append(1E3) self.all_methods = set(methods) self.all_methods_P = set(methods_P) if Tmins and Tmaxs: self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
def check_part(state, name, part_msg, missing_msg=None, expand_msg=None): """Return child state with name part as its ast tree""" if missing_msg is None: missing_msg = "Are you sure you defined the {{part}}? " if expand_msg is None: expand_msg = "Did you correctly specify the {{part}}? " if not part_msg: part_msg = name append_message = {"msg": expand_msg, "kwargs": {"part": part_msg}} has_part(state, name, missing_msg, append_message["kwargs"]) stu_part = state.student_parts[name] sol_part = state.solution_parts[name] assert_ast(state, sol_part, append_message["kwargs"]) return part_to_child(stu_part, sol_part, append_message, state)
Return child state with name part as its ast tree
Below is the the instruction that describes the task: ### Input: Return child state with name part as its ast tree ### Response: def check_part(state, name, part_msg, missing_msg=None, expand_msg=None): """Return child state with name part as its ast tree""" if missing_msg is None: missing_msg = "Are you sure you defined the {{part}}? " if expand_msg is None: expand_msg = "Did you correctly specify the {{part}}? " if not part_msg: part_msg = name append_message = {"msg": expand_msg, "kwargs": {"part": part_msg}} has_part(state, name, missing_msg, append_message["kwargs"]) stu_part = state.student_parts[name] sol_part = state.solution_parts[name] assert_ast(state, sol_part, append_message["kwargs"]) return part_to_child(stu_part, sol_part, append_message, state)
def setup(app, version): """Called by Sphinx during phase 0 (initialization). :param sphinx.application.Sphinx app: Sphinx application object. :param str version: Version of sphinxcontrib-imgur. :returns: Extension metadata. :rtype: dict """ app.add_config_value('imgur_api_cache_ttl', 172800, False) app.add_config_value('imgur_client_id', None, False) app.add_config_value('imgur_hide_post_details', False, True) app.add_config_value('imgur_target_default_gallery', False, True) app.add_config_value('imgur_target_default_largest', False, True) app.add_config_value('imgur_target_default_page', False, True) app.add_directive('imgur-embed', ImgurEmbedDirective) app.add_directive('imgur-image', ImgurImageDirective) app.add_node(ImgurEmbedNode, html=(ImgurEmbedNode.visit, ImgurEmbedNode.depart)) app.add_node(ImgurImageNode, html=(ImgurImageNode.visit, ImgurImageNode.depart)) app.add_node(ImgurJavaScriptNode, html=(ImgurJavaScriptNode.visit, ImgurJavaScriptNode.depart)) app.add_role('imgur-description', imgur_role) app.add_role('imgur-title', imgur_role) app.connect('env-before-read-docs', event_before_read_docs) app.connect('doctree-read', event_doctree_read) app.connect('env-merge-info', event_env_merge_info) app.connect('env-updated', event_env_updated) app.connect('doctree-resolved', event_doctree_resolved) return dict(parallel_read_safe=True, version=version)
Called by Sphinx during phase 0 (initialization). :param sphinx.application.Sphinx app: Sphinx application object. :param str version: Version of sphinxcontrib-imgur. :returns: Extension metadata. :rtype: dict
Below is the the instruction that describes the task: ### Input: Called by Sphinx during phase 0 (initialization). :param sphinx.application.Sphinx app: Sphinx application object. :param str version: Version of sphinxcontrib-imgur. :returns: Extension metadata. :rtype: dict ### Response: def setup(app, version): """Called by Sphinx during phase 0 (initialization). :param sphinx.application.Sphinx app: Sphinx application object. :param str version: Version of sphinxcontrib-imgur. :returns: Extension metadata. :rtype: dict """ app.add_config_value('imgur_api_cache_ttl', 172800, False) app.add_config_value('imgur_client_id', None, False) app.add_config_value('imgur_hide_post_details', False, True) app.add_config_value('imgur_target_default_gallery', False, True) app.add_config_value('imgur_target_default_largest', False, True) app.add_config_value('imgur_target_default_page', False, True) app.add_directive('imgur-embed', ImgurEmbedDirective) app.add_directive('imgur-image', ImgurImageDirective) app.add_node(ImgurEmbedNode, html=(ImgurEmbedNode.visit, ImgurEmbedNode.depart)) app.add_node(ImgurImageNode, html=(ImgurImageNode.visit, ImgurImageNode.depart)) app.add_node(ImgurJavaScriptNode, html=(ImgurJavaScriptNode.visit, ImgurJavaScriptNode.depart)) app.add_role('imgur-description', imgur_role) app.add_role('imgur-title', imgur_role) app.connect('env-before-read-docs', event_before_read_docs) app.connect('doctree-read', event_doctree_read) app.connect('env-merge-info', event_env_merge_info) app.connect('env-updated', event_env_updated) app.connect('doctree-resolved', event_doctree_resolved) return dict(parallel_read_safe=True, version=version)
def forward_search_history(self, e): # (C-s) u'''Search forward starting at the current line and moving down through the the history as necessary. This is an incremental search.''' log("fwd_search_history") self._init_incremental_search(self._history.forward_search_history, e) self.finalize()
u'''Search forward starting at the current line and moving down through the the history as necessary. This is an incremental search.
Below is the the instruction that describes the task: ### Input: u'''Search forward starting at the current line and moving down through the the history as necessary. This is an incremental search. ### Response: def forward_search_history(self, e): # (C-s) u'''Search forward starting at the current line and moving down through the the history as necessary. This is an incremental search.''' log("fwd_search_history") self._init_incremental_search(self._history.forward_search_history, e) self.finalize()
def predict_subsequences(self, sequence_dict, peptide_lengths=None): """Given a dictionary mapping unique keys to amino acid sequences, run MHC binding predictions on all candidate epitopes extracted from sequences and return a EpitopeCollection. Parameters ---------- fasta_dictionary : dict or string Mapping of protein identifiers to protein amino acid sequences. If string then converted to dictionary. """ sequence_dict = check_sequence_dictionary(sequence_dict) peptide_lengths = self._check_peptide_lengths(peptide_lengths) # take each mutated sequence in the dataframe # and general MHC binding scores for all k-mer substrings binding_predictions = [] expected_peptides = set([]) normalized_alleles = [] for key, amino_acid_sequence in sequence_dict.items(): for l in peptide_lengths: for i in range(len(amino_acid_sequence) - l + 1): expected_peptides.add(amino_acid_sequence[i:i + l]) self._check_peptide_inputs(expected_peptides) for allele in self.alleles: # IEDB MHCII predictor expects DRA1 to be omitted. allele = normalize_allele_name(allele, omit_dra1=True) normalized_alleles.append(allele) request = self._get_iedb_request_params( amino_acid_sequence, allele) logger.info( "Calling IEDB (%s) with request %s", self.url, request) response_df = _query_iedb(request, self.url) for _, row in response_df.iterrows(): binding_predictions.append( BindingPrediction( source_sequence_name=key, offset=row['start'] - 1, allele=row['allele'], peptide=row['peptide'], affinity=row['ic50'], percentile_rank=row['rank'], prediction_method_name="iedb-" + self.prediction_method)) self._check_results( binding_predictions, alleles=normalized_alleles, peptides=expected_peptides) return BindingPredictionCollection(binding_predictions)
Given a dictionary mapping unique keys to amino acid sequences, run MHC binding predictions on all candidate epitopes extracted from sequences and return a EpitopeCollection. Parameters ---------- fasta_dictionary : dict or string Mapping of protein identifiers to protein amino acid sequences. If string then converted to dictionary.
Below is the the instruction that describes the task: ### Input: Given a dictionary mapping unique keys to amino acid sequences, run MHC binding predictions on all candidate epitopes extracted from sequences and return a EpitopeCollection. Parameters ---------- fasta_dictionary : dict or string Mapping of protein identifiers to protein amino acid sequences. If string then converted to dictionary. ### Response: def predict_subsequences(self, sequence_dict, peptide_lengths=None): """Given a dictionary mapping unique keys to amino acid sequences, run MHC binding predictions on all candidate epitopes extracted from sequences and return a EpitopeCollection. Parameters ---------- fasta_dictionary : dict or string Mapping of protein identifiers to protein amino acid sequences. If string then converted to dictionary. """ sequence_dict = check_sequence_dictionary(sequence_dict) peptide_lengths = self._check_peptide_lengths(peptide_lengths) # take each mutated sequence in the dataframe # and general MHC binding scores for all k-mer substrings binding_predictions = [] expected_peptides = set([]) normalized_alleles = [] for key, amino_acid_sequence in sequence_dict.items(): for l in peptide_lengths: for i in range(len(amino_acid_sequence) - l + 1): expected_peptides.add(amino_acid_sequence[i:i + l]) self._check_peptide_inputs(expected_peptides) for allele in self.alleles: # IEDB MHCII predictor expects DRA1 to be omitted. allele = normalize_allele_name(allele, omit_dra1=True) normalized_alleles.append(allele) request = self._get_iedb_request_params( amino_acid_sequence, allele) logger.info( "Calling IEDB (%s) with request %s", self.url, request) response_df = _query_iedb(request, self.url) for _, row in response_df.iterrows(): binding_predictions.append( BindingPrediction( source_sequence_name=key, offset=row['start'] - 1, allele=row['allele'], peptide=row['peptide'], affinity=row['ic50'], percentile_rank=row['rank'], prediction_method_name="iedb-" + self.prediction_method)) self._check_results( binding_predictions, alleles=normalized_alleles, peptides=expected_peptides) return BindingPredictionCollection(binding_predictions)
def _ready_for_het_analysis(items): """Check if a sample has input information for heterogeneity analysis. We currently require a tumor/normal sample containing both CNV and variant calls. """ paired = vcfutils.get_paired_bams([dd.get_align_bam(d) for d in items], items) has_het = any(dd.get_hetcaller(d) for d in items) if has_het and paired: return get_variants(paired.tumor_data) and _get_calls(paired.tumor_data, cnv_only=True)
Check if a sample has input information for heterogeneity analysis. We currently require a tumor/normal sample containing both CNV and variant calls.
Below is the the instruction that describes the task: ### Input: Check if a sample has input information for heterogeneity analysis. We currently require a tumor/normal sample containing both CNV and variant calls. ### Response: def _ready_for_het_analysis(items): """Check if a sample has input information for heterogeneity analysis. We currently require a tumor/normal sample containing both CNV and variant calls. """ paired = vcfutils.get_paired_bams([dd.get_align_bam(d) for d in items], items) has_het = any(dd.get_hetcaller(d) for d in items) if has_het and paired: return get_variants(paired.tumor_data) and _get_calls(paired.tumor_data, cnv_only=True)
def optimizeTransforms(element, options): """ Attempts to optimise transform specifications on the given node and its children. Returns the number of bytes saved after performing these reductions. """ num = 0 for transformAttr in ['transform', 'patternTransform', 'gradientTransform']: val = element.getAttribute(transformAttr) if val != '': transform = svg_transform_parser.parse(val) optimizeTransform(transform) newVal = serializeTransform(transform) if len(newVal) < len(val): if len(newVal): element.setAttribute(transformAttr, newVal) else: element.removeAttribute(transformAttr) num += len(val) - len(newVal) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: num += optimizeTransforms(child, options) return num
Attempts to optimise transform specifications on the given node and its children. Returns the number of bytes saved after performing these reductions.
Below is the the instruction that describes the task: ### Input: Attempts to optimise transform specifications on the given node and its children. Returns the number of bytes saved after performing these reductions. ### Response: def optimizeTransforms(element, options): """ Attempts to optimise transform specifications on the given node and its children. Returns the number of bytes saved after performing these reductions. """ num = 0 for transformAttr in ['transform', 'patternTransform', 'gradientTransform']: val = element.getAttribute(transformAttr) if val != '': transform = svg_transform_parser.parse(val) optimizeTransform(transform) newVal = serializeTransform(transform) if len(newVal) < len(val): if len(newVal): element.setAttribute(transformAttr, newVal) else: element.removeAttribute(transformAttr) num += len(val) - len(newVal) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: num += optimizeTransforms(child, options) return num
def Runtime_callFunctionOn(self, functionDeclaration, **kwargs): """ Function path: Runtime.callFunctionOn Domain: Runtime Method name: callFunctionOn Parameters: Required arguments: 'functionDeclaration' (type: string) -> Declaration of the function to call. Optional arguments: 'objectId' (type: RemoteObjectId) -> Identifier of the object to call function on. Either objectId or executionContextId should be specified. 'arguments' (type: array) -> Call arguments. All call arguments must belong to the same JavaScript world as the target object. 'silent' (type: boolean) -> In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state. 'returnByValue' (type: boolean) -> Whether the result is expected to be a JSON object which should be sent by value. 'generatePreview' (type: boolean) -> Whether preview should be generated for the result. 'userGesture' (type: boolean) -> Whether execution should be treated as initiated by user in the UI. 'awaitPromise' (type: boolean) -> Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved. 'executionContextId' (type: ExecutionContextId) -> Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified. 'objectGroup' (type: string) -> Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object. Returns: 'result' (type: RemoteObject) -> Call result. 'exceptionDetails' (type: ExceptionDetails) -> Exception details. Description: Calls function with given declaration on the given object. Object group of the result is inherited from the target object. """ assert isinstance(functionDeclaration, (str,) ), "Argument 'functionDeclaration' must be of type '['str']'. Received type: '%s'" % type( functionDeclaration) if 'arguments' in kwargs: assert isinstance(kwargs['arguments'], (list, tuple) ), "Optional argument 'arguments' must be of type '['list', 'tuple']'. Received type: '%s'" % type( kwargs['arguments']) if 'silent' in kwargs: assert isinstance(kwargs['silent'], (bool,) ), "Optional argument 'silent' must be of type '['bool']'. Received type: '%s'" % type( kwargs['silent']) if 'returnByValue' in kwargs: assert isinstance(kwargs['returnByValue'], (bool,) ), "Optional argument 'returnByValue' must be of type '['bool']'. Received type: '%s'" % type( kwargs['returnByValue']) if 'generatePreview' in kwargs: assert isinstance(kwargs['generatePreview'], (bool,) ), "Optional argument 'generatePreview' must be of type '['bool']'. Received type: '%s'" % type( kwargs['generatePreview']) if 'userGesture' in kwargs: assert isinstance(kwargs['userGesture'], (bool,) ), "Optional argument 'userGesture' must be of type '['bool']'. Received type: '%s'" % type( kwargs['userGesture']) if 'awaitPromise' in kwargs: assert isinstance(kwargs['awaitPromise'], (bool,) ), "Optional argument 'awaitPromise' must be of type '['bool']'. Received type: '%s'" % type( kwargs['awaitPromise']) if 'objectGroup' in kwargs: assert isinstance(kwargs['objectGroup'], (str,) ), "Optional argument 'objectGroup' must be of type '['str']'. Received type: '%s'" % type( kwargs['objectGroup']) expected = ['objectId', 'arguments', 'silent', 'returnByValue', 'generatePreview', 'userGesture', 'awaitPromise', 'executionContextId', 'objectGroup'] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are ['objectId', 'arguments', 'silent', 'returnByValue', 'generatePreview', 'userGesture', 'awaitPromise', 'executionContextId', 'objectGroup']. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command('Runtime.callFunctionOn', functionDeclaration=functionDeclaration, **kwargs) return subdom_funcs
Function path: Runtime.callFunctionOn Domain: Runtime Method name: callFunctionOn Parameters: Required arguments: 'functionDeclaration' (type: string) -> Declaration of the function to call. Optional arguments: 'objectId' (type: RemoteObjectId) -> Identifier of the object to call function on. Either objectId or executionContextId should be specified. 'arguments' (type: array) -> Call arguments. All call arguments must belong to the same JavaScript world as the target object. 'silent' (type: boolean) -> In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state. 'returnByValue' (type: boolean) -> Whether the result is expected to be a JSON object which should be sent by value. 'generatePreview' (type: boolean) -> Whether preview should be generated for the result. 'userGesture' (type: boolean) -> Whether execution should be treated as initiated by user in the UI. 'awaitPromise' (type: boolean) -> Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved. 'executionContextId' (type: ExecutionContextId) -> Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified. 'objectGroup' (type: string) -> Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object. Returns: 'result' (type: RemoteObject) -> Call result. 'exceptionDetails' (type: ExceptionDetails) -> Exception details. Description: Calls function with given declaration on the given object. Object group of the result is inherited from the target object.
Below is the the instruction that describes the task: ### Input: Function path: Runtime.callFunctionOn Domain: Runtime Method name: callFunctionOn Parameters: Required arguments: 'functionDeclaration' (type: string) -> Declaration of the function to call. Optional arguments: 'objectId' (type: RemoteObjectId) -> Identifier of the object to call function on. Either objectId or executionContextId should be specified. 'arguments' (type: array) -> Call arguments. All call arguments must belong to the same JavaScript world as the target object. 'silent' (type: boolean) -> In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state. 'returnByValue' (type: boolean) -> Whether the result is expected to be a JSON object which should be sent by value. 'generatePreview' (type: boolean) -> Whether preview should be generated for the result. 'userGesture' (type: boolean) -> Whether execution should be treated as initiated by user in the UI. 'awaitPromise' (type: boolean) -> Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved. 'executionContextId' (type: ExecutionContextId) -> Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified. 'objectGroup' (type: string) -> Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object. Returns: 'result' (type: RemoteObject) -> Call result. 'exceptionDetails' (type: ExceptionDetails) -> Exception details. Description: Calls function with given declaration on the given object. Object group of the result is inherited from the target object. ### Response: def Runtime_callFunctionOn(self, functionDeclaration, **kwargs): """ Function path: Runtime.callFunctionOn Domain: Runtime Method name: callFunctionOn Parameters: Required arguments: 'functionDeclaration' (type: string) -> Declaration of the function to call. Optional arguments: 'objectId' (type: RemoteObjectId) -> Identifier of the object to call function on. Either objectId or executionContextId should be specified. 'arguments' (type: array) -> Call arguments. All call arguments must belong to the same JavaScript world as the target object. 'silent' (type: boolean) -> In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state. 'returnByValue' (type: boolean) -> Whether the result is expected to be a JSON object which should be sent by value. 'generatePreview' (type: boolean) -> Whether preview should be generated for the result. 'userGesture' (type: boolean) -> Whether execution should be treated as initiated by user in the UI. 'awaitPromise' (type: boolean) -> Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved. 'executionContextId' (type: ExecutionContextId) -> Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified. 'objectGroup' (type: string) -> Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object. Returns: 'result' (type: RemoteObject) -> Call result. 'exceptionDetails' (type: ExceptionDetails) -> Exception details. Description: Calls function with given declaration on the given object. Object group of the result is inherited from the target object. """ assert isinstance(functionDeclaration, (str,) ), "Argument 'functionDeclaration' must be of type '['str']'. Received type: '%s'" % type( functionDeclaration) if 'arguments' in kwargs: assert isinstance(kwargs['arguments'], (list, tuple) ), "Optional argument 'arguments' must be of type '['list', 'tuple']'. Received type: '%s'" % type( kwargs['arguments']) if 'silent' in kwargs: assert isinstance(kwargs['silent'], (bool,) ), "Optional argument 'silent' must be of type '['bool']'. Received type: '%s'" % type( kwargs['silent']) if 'returnByValue' in kwargs: assert isinstance(kwargs['returnByValue'], (bool,) ), "Optional argument 'returnByValue' must be of type '['bool']'. Received type: '%s'" % type( kwargs['returnByValue']) if 'generatePreview' in kwargs: assert isinstance(kwargs['generatePreview'], (bool,) ), "Optional argument 'generatePreview' must be of type '['bool']'. Received type: '%s'" % type( kwargs['generatePreview']) if 'userGesture' in kwargs: assert isinstance(kwargs['userGesture'], (bool,) ), "Optional argument 'userGesture' must be of type '['bool']'. Received type: '%s'" % type( kwargs['userGesture']) if 'awaitPromise' in kwargs: assert isinstance(kwargs['awaitPromise'], (bool,) ), "Optional argument 'awaitPromise' must be of type '['bool']'. Received type: '%s'" % type( kwargs['awaitPromise']) if 'objectGroup' in kwargs: assert isinstance(kwargs['objectGroup'], (str,) ), "Optional argument 'objectGroup' must be of type '['str']'. Received type: '%s'" % type( kwargs['objectGroup']) expected = ['objectId', 'arguments', 'silent', 'returnByValue', 'generatePreview', 'userGesture', 'awaitPromise', 'executionContextId', 'objectGroup'] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are ['objectId', 'arguments', 'silent', 'returnByValue', 'generatePreview', 'userGesture', 'awaitPromise', 'executionContextId', 'objectGroup']. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command('Runtime.callFunctionOn', functionDeclaration=functionDeclaration, **kwargs) return subdom_funcs
def sflow_profile_profile_sampling_rate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") sflow_profile = ET.SubElement(config, "sflow-profile", xmlns="urn:brocade.com:mgmt:brocade-sflow") profile_name_key = ET.SubElement(sflow_profile, "profile-name") profile_name_key.text = kwargs.pop('profile_name') profile_sampling_rate = ET.SubElement(sflow_profile, "profile-sampling-rate") profile_sampling_rate.text = kwargs.pop('profile_sampling_rate') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def sflow_profile_profile_sampling_rate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") sflow_profile = ET.SubElement(config, "sflow-profile", xmlns="urn:brocade.com:mgmt:brocade-sflow") profile_name_key = ET.SubElement(sflow_profile, "profile-name") profile_name_key.text = kwargs.pop('profile_name') profile_sampling_rate = ET.SubElement(sflow_profile, "profile-sampling-rate") profile_sampling_rate.text = kwargs.pop('profile_sampling_rate') callback = kwargs.pop('callback', self._callback) return callback(config)
def treewalk(self, root, parsed, unparsed): """ Recursively walks the syntax tree at root and returns the items parsed, unparsed and possible suggestions """ suggestions = dict() if not unparsed: logger.debug("no tokens left unparsed. returning %s, %s", parsed, suggestions) return parsed, unparsed, suggestions token = unparsed.pop().strip() logger.debug("begin parsing at %s w/ tokens: %s", root.node, unparsed) if root.node == token: logger.debug("root node: %s matches next token:%s", root.node, token) parsed.append(token) if self.peekForOption(unparsed): # check for localFlags and globalFlags logger.debug("option(s) upcoming %s", unparsed) parsed_opts, unparsed, suggestions = self.evalOptions(root, list(), unparsed[:]) if parsed_opts: logger.debug("parsed option(s): %s", parsed_opts) parsed.extend(parsed_opts) if unparsed and not self.peekForOption(unparsed): # unparsed bits without options logger.debug("begin subtree %s parsing", root.node) for child in root.children: parsed_subtree, unparsed, suggestions = self.treewalk(child, list(), unparsed[:]) if parsed_subtree: # subtree returned further parsed tokens parsed.extend(parsed_subtree) logger.debug("subtree at: %s has matches. %s, %s", child.node, parsed, unparsed) break else: # no matches found in command tree # return children of root as suggestions logger.debug("no matches in subtree: %s. returning children as suggestions", root.node) for child in root.children: suggestions[child.node] = child.help else: logger.debug("no token or option match") unparsed.append(token) return parsed, unparsed, suggestions
Recursively walks the syntax tree at root and returns the items parsed, unparsed and possible suggestions
Below is the the instruction that describes the task: ### Input: Recursively walks the syntax tree at root and returns the items parsed, unparsed and possible suggestions ### Response: def treewalk(self, root, parsed, unparsed): """ Recursively walks the syntax tree at root and returns the items parsed, unparsed and possible suggestions """ suggestions = dict() if not unparsed: logger.debug("no tokens left unparsed. returning %s, %s", parsed, suggestions) return parsed, unparsed, suggestions token = unparsed.pop().strip() logger.debug("begin parsing at %s w/ tokens: %s", root.node, unparsed) if root.node == token: logger.debug("root node: %s matches next token:%s", root.node, token) parsed.append(token) if self.peekForOption(unparsed): # check for localFlags and globalFlags logger.debug("option(s) upcoming %s", unparsed) parsed_opts, unparsed, suggestions = self.evalOptions(root, list(), unparsed[:]) if parsed_opts: logger.debug("parsed option(s): %s", parsed_opts) parsed.extend(parsed_opts) if unparsed and not self.peekForOption(unparsed): # unparsed bits without options logger.debug("begin subtree %s parsing", root.node) for child in root.children: parsed_subtree, unparsed, suggestions = self.treewalk(child, list(), unparsed[:]) if parsed_subtree: # subtree returned further parsed tokens parsed.extend(parsed_subtree) logger.debug("subtree at: %s has matches. %s, %s", child.node, parsed, unparsed) break else: # no matches found in command tree # return children of root as suggestions logger.debug("no matches in subtree: %s. returning children as suggestions", root.node) for child in root.children: suggestions[child.node] = child.help else: logger.debug("no token or option match") unparsed.append(token) return parsed, unparsed, suggestions
def true_num_reactions(model, custom_spont_id=None): """Return the number of reactions associated with a gene. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of reactions associated with a gene """ true_num = 0 for rxn in model.reactions: if len(rxn.genes) == 0: continue if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id): continue else: true_num += 1 return true_num
Return the number of reactions associated with a gene. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of reactions associated with a gene
Below is the the instruction that describes the task: ### Input: Return the number of reactions associated with a gene. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of reactions associated with a gene ### Response: def true_num_reactions(model, custom_spont_id=None): """Return the number of reactions associated with a gene. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of reactions associated with a gene """ true_num = 0 for rxn in model.reactions: if len(rxn.genes) == 0: continue if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id): continue else: true_num += 1 return true_num
def is_valid(self): '''checks if type is a valid type''' return (self.is_primitive and self.name) \ or (self.is_complex and self.name) \ or (self.is_list and self.nested) \ or (self.is_map and self.nested) \ or (self.is_model and self.nested)
checks if type is a valid type
Below is the the instruction that describes the task: ### Input: checks if type is a valid type ### Response: def is_valid(self): '''checks if type is a valid type''' return (self.is_primitive and self.name) \ or (self.is_complex and self.name) \ or (self.is_list and self.nested) \ or (self.is_map and self.nested) \ or (self.is_model and self.nested)
def _forward_outbound(self, channel): """ Forward outbound traffic (ssh -> websockets) """ try: while True: wait_read(channel.fileno()) data = channel.recv(1024) if not len(data): return self._websocket.send(json.dumps({'data': data})) finally: self.close()
Forward outbound traffic (ssh -> websockets)
Below is the the instruction that describes the task: ### Input: Forward outbound traffic (ssh -> websockets) ### Response: def _forward_outbound(self, channel): """ Forward outbound traffic (ssh -> websockets) """ try: while True: wait_read(channel.fileno()) data = channel.recv(1024) if not len(data): return self._websocket.send(json.dumps({'data': data})) finally: self.close()
def endpoint_deactivate(endpoint_id): """ Executor for `globus endpoint deactivate` """ client = get_client() res = client.endpoint_deactivate(endpoint_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
Executor for `globus endpoint deactivate`
Below is the the instruction that describes the task: ### Input: Executor for `globus endpoint deactivate` ### Response: def endpoint_deactivate(endpoint_id): """ Executor for `globus endpoint deactivate` """ client = get_client() res = client.endpoint_deactivate(endpoint_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def _set_radius_server(self, v, load=False): """ Setter method for radius_server, mapped from YANG variable /radius_server (container) If this variable is read-only (config: false) in the source YANG file, then _set_radius_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_radius_server() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=radius_server.radius_server, is_container='container', presence=False, yang_name="radius-server", rest_name="radius-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RADIUS server configuration', u'cli-incomplete-no': None, u'sort-priority': u'10'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """radius_server must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=radius_server.radius_server, is_container='container', presence=False, yang_name="radius-server", rest_name="radius-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RADIUS server configuration', u'cli-incomplete-no': None, u'sort-priority': u'10'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""", }) self.__radius_server = t if hasattr(self, '_set'): self._set()
Setter method for radius_server, mapped from YANG variable /radius_server (container) If this variable is read-only (config: false) in the source YANG file, then _set_radius_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_radius_server() directly.
Below is the the instruction that describes the task: ### Input: Setter method for radius_server, mapped from YANG variable /radius_server (container) If this variable is read-only (config: false) in the source YANG file, then _set_radius_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_radius_server() directly. ### Response: def _set_radius_server(self, v, load=False): """ Setter method for radius_server, mapped from YANG variable /radius_server (container) If this variable is read-only (config: false) in the source YANG file, then _set_radius_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_radius_server() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=radius_server.radius_server, is_container='container', presence=False, yang_name="radius-server", rest_name="radius-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RADIUS server configuration', u'cli-incomplete-no': None, u'sort-priority': u'10'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """radius_server must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=radius_server.radius_server, is_container='container', presence=False, yang_name="radius-server", rest_name="radius-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RADIUS server configuration', u'cli-incomplete-no': None, u'sort-priority': u'10'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""", }) self.__radius_server = t if hasattr(self, '_set'): self._set()
def process_files(): """ Use Manager and Counter as context managers """ with enlighten.Manager() as manager: with manager.counter(total=SPLINES, desc='Reticulating:', unit='splines') as retic: for num in range(SPLINES): # pylint: disable=unused-variable time.sleep(random.uniform(0.1, 0.5)) # Random processing time retic.update() with manager.counter(total=LLAMAS, desc='Herding:', unit='llamas') as herd: for num in range(SPLINES): # pylint: disable=unused-variable time.sleep(random.uniform(0.1, 0.5)) # Random processing time herd.update()
Use Manager and Counter as context managers
Below is the the instruction that describes the task: ### Input: Use Manager and Counter as context managers ### Response: def process_files(): """ Use Manager and Counter as context managers """ with enlighten.Manager() as manager: with manager.counter(total=SPLINES, desc='Reticulating:', unit='splines') as retic: for num in range(SPLINES): # pylint: disable=unused-variable time.sleep(random.uniform(0.1, 0.5)) # Random processing time retic.update() with manager.counter(total=LLAMAS, desc='Herding:', unit='llamas') as herd: for num in range(SPLINES): # pylint: disable=unused-variable time.sleep(random.uniform(0.1, 0.5)) # Random processing time herd.update()
def writeline(self, line): """print a line of python, indenting it according to the current indent level. this also adjusts the indentation counter according to the content of the line. """ if not self.in_indent_lines: self._flush_adjusted_lines() self.in_indent_lines = True if (line is None or re.match(r"^\s*#",line) or re.match(r"^\s*$", line) ): hastext = False else: hastext = True is_comment = line and len(line) and line[0] == '#' # see if this line should decrease the indentation level if (not is_comment and (not hastext or self._is_unindentor(line)) ): if self.indent > 0: self.indent -= 1 # if the indent_detail stack is empty, the user # probably put extra closures - the resulting # module wont compile. if len(self.indent_detail) == 0: raise exceptions.SyntaxException( "Too many whitespace closures") self.indent_detail.pop() if line is None: return # write the line self.stream.write(self._indent_line(line) + "\n") self._update_lineno(len(line.split("\n"))) # see if this line should increase the indentation level. # note that a line can both decrase (before printing) and # then increase (after printing) the indentation level. if re.search(r":[ \t]*(?:#.*)?$", line): # increment indentation count, and also # keep track of what the keyword was that indented us, # if it is a python compound statement keyword # where we might have to look for an "unindent" keyword match = re.match(r"^\s*(if|try|elif|while|for|with)", line) if match: # its a "compound" keyword, so we will check for "unindentors" indentor = match.group(1) self.indent += 1 self.indent_detail.append(indentor) else: indentor = None # its not a "compound" keyword. but lets also # test for valid Python keywords that might be indenting us, # else assume its a non-indenting line m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line) if m2: self.indent += 1 self.indent_detail.append(indentor)
print a line of python, indenting it according to the current indent level. this also adjusts the indentation counter according to the content of the line.
Below is the the instruction that describes the task: ### Input: print a line of python, indenting it according to the current indent level. this also adjusts the indentation counter according to the content of the line. ### Response: def writeline(self, line): """print a line of python, indenting it according to the current indent level. this also adjusts the indentation counter according to the content of the line. """ if not self.in_indent_lines: self._flush_adjusted_lines() self.in_indent_lines = True if (line is None or re.match(r"^\s*#",line) or re.match(r"^\s*$", line) ): hastext = False else: hastext = True is_comment = line and len(line) and line[0] == '#' # see if this line should decrease the indentation level if (not is_comment and (not hastext or self._is_unindentor(line)) ): if self.indent > 0: self.indent -= 1 # if the indent_detail stack is empty, the user # probably put extra closures - the resulting # module wont compile. if len(self.indent_detail) == 0: raise exceptions.SyntaxException( "Too many whitespace closures") self.indent_detail.pop() if line is None: return # write the line self.stream.write(self._indent_line(line) + "\n") self._update_lineno(len(line.split("\n"))) # see if this line should increase the indentation level. # note that a line can both decrase (before printing) and # then increase (after printing) the indentation level. if re.search(r":[ \t]*(?:#.*)?$", line): # increment indentation count, and also # keep track of what the keyword was that indented us, # if it is a python compound statement keyword # where we might have to look for an "unindent" keyword match = re.match(r"^\s*(if|try|elif|while|for|with)", line) if match: # its a "compound" keyword, so we will check for "unindentors" indentor = match.group(1) self.indent += 1 self.indent_detail.append(indentor) else: indentor = None # its not a "compound" keyword. but lets also # test for valid Python keywords that might be indenting us, # else assume its a non-indenting line m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line) if m2: self.indent += 1 self.indent_detail.append(indentor)
def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.): """ Produce a mock transient pulse source for the purposes of characterizing the detection success of the current pipeline. Assumes - Code to inject the transients does so by inserting at an array index - Noise level at the center of the data array is characteristic of the noise level throughout Input std - noise level in visibilities(?) at mid-point of segment DMmax - maximum DM at which mock transient can be inserted [pc/cm^3] Amin/Amax is amplitude in units of the std (calculated below) rmax/rmin is radius range in arcmin DMmin is min DM Returns loff - direction cosine offset of mock transient from phase center [radians] moff - direction cosine offset of mock transient from phase center [radians] A - amplitude of transient [std units] DM - dispersion measure of mock transient [pc/cm^3] """ rad_arcmin = math.pi/(180*60) phimin = 0.0 phimax = 2*math.pi # Amplitude of transient, done in units of the std # std is calculated assuming that noise level in the middle of the data, # at index d['readints']/2, is characteristic of that throughout the data A = random.uniform(Amin, Amax) * std # Position of transient, in direction cosines r = random.uniform(rmin, rmax) phi = random.uniform(phimin, phimax) loff = r*math.cos(phi) * rad_arcmin moff = r*math.sin(phi) * rad_arcmin # Dispersion measure DM = random.uniform(DMmin, DMmax) return loff, moff, A, DM
Produce a mock transient pulse source for the purposes of characterizing the detection success of the current pipeline. Assumes - Code to inject the transients does so by inserting at an array index - Noise level at the center of the data array is characteristic of the noise level throughout Input std - noise level in visibilities(?) at mid-point of segment DMmax - maximum DM at which mock transient can be inserted [pc/cm^3] Amin/Amax is amplitude in units of the std (calculated below) rmax/rmin is radius range in arcmin DMmin is min DM Returns loff - direction cosine offset of mock transient from phase center [radians] moff - direction cosine offset of mock transient from phase center [radians] A - amplitude of transient [std units] DM - dispersion measure of mock transient [pc/cm^3]
Below is the the instruction that describes the task: ### Input: Produce a mock transient pulse source for the purposes of characterizing the detection success of the current pipeline. Assumes - Code to inject the transients does so by inserting at an array index - Noise level at the center of the data array is characteristic of the noise level throughout Input std - noise level in visibilities(?) at mid-point of segment DMmax - maximum DM at which mock transient can be inserted [pc/cm^3] Amin/Amax is amplitude in units of the std (calculated below) rmax/rmin is radius range in arcmin DMmin is min DM Returns loff - direction cosine offset of mock transient from phase center [radians] moff - direction cosine offset of mock transient from phase center [radians] A - amplitude of transient [std units] DM - dispersion measure of mock transient [pc/cm^3] ### Response: def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.): """ Produce a mock transient pulse source for the purposes of characterizing the detection success of the current pipeline. Assumes - Code to inject the transients does so by inserting at an array index - Noise level at the center of the data array is characteristic of the noise level throughout Input std - noise level in visibilities(?) at mid-point of segment DMmax - maximum DM at which mock transient can be inserted [pc/cm^3] Amin/Amax is amplitude in units of the std (calculated below) rmax/rmin is radius range in arcmin DMmin is min DM Returns loff - direction cosine offset of mock transient from phase center [radians] moff - direction cosine offset of mock transient from phase center [radians] A - amplitude of transient [std units] DM - dispersion measure of mock transient [pc/cm^3] """ rad_arcmin = math.pi/(180*60) phimin = 0.0 phimax = 2*math.pi # Amplitude of transient, done in units of the std # std is calculated assuming that noise level in the middle of the data, # at index d['readints']/2, is characteristic of that throughout the data A = random.uniform(Amin, Amax) * std # Position of transient, in direction cosines r = random.uniform(rmin, rmax) phi = random.uniform(phimin, phimax) loff = r*math.cos(phi) * rad_arcmin moff = r*math.sin(phi) * rad_arcmin # Dispersion measure DM = random.uniform(DMmin, DMmax) return loff, moff, A, DM
def bisine_wahwah_wave(frequency): """Emit two sine waves with balance oscillating left and right.""" # # This is clearly intended to build on the bisine wave defined above, # so we can start by generating that. waves_a = bisine_wave(frequency) # # Then, by reversing axis 2, we swap the stereo channels. By mixing # this with `waves_a`, we'll be able to create the desired effect. waves_b = tf.reverse(waves_a, axis=[2]) # # Let's have the balance oscillate from left to right four times. iterations = 4 # # Now, we compute the balance for each sample: `ts` has values # in [0, 1] that indicate how much we should use `waves_a`. xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) thetas = xs / _samples() * iterations ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2 # # Finally, we can mix the two together, and we're done. wave = ts * waves_a + (1.0 - ts) * waves_b # # Alternately, we can make the effect more pronounced by exaggerating # the sample data. Let's emit both variations. exaggerated_wave = wave ** 3.0 return tf.concat([wave, exaggerated_wave], axis=0)
Emit two sine waves with balance oscillating left and right.
Below is the the instruction that describes the task: ### Input: Emit two sine waves with balance oscillating left and right. ### Response: def bisine_wahwah_wave(frequency): """Emit two sine waves with balance oscillating left and right.""" # # This is clearly intended to build on the bisine wave defined above, # so we can start by generating that. waves_a = bisine_wave(frequency) # # Then, by reversing axis 2, we swap the stereo channels. By mixing # this with `waves_a`, we'll be able to create the desired effect. waves_b = tf.reverse(waves_a, axis=[2]) # # Let's have the balance oscillate from left to right four times. iterations = 4 # # Now, we compute the balance for each sample: `ts` has values # in [0, 1] that indicate how much we should use `waves_a`. xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1]) thetas = xs / _samples() * iterations ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2 # # Finally, we can mix the two together, and we're done. wave = ts * waves_a + (1.0 - ts) * waves_b # # Alternately, we can make the effect more pronounced by exaggerating # the sample data. Let's emit both variations. exaggerated_wave = wave ** 3.0 return tf.concat([wave, exaggerated_wave], axis=0)
def create_avg_rsp(rsp_fn, skydir, ltc, event_class, event_types, x, egy, cth_bins, npts=None): """Calculate the weighted response function. """ if npts is None: npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.05)) wrsp = np.zeros((len(x), len(egy), len(cth_bins) - 1)) exps = np.zeros((len(egy), len(cth_bins) - 1)) cth_bins = utils.split_bin_edges(cth_bins, npts) cth = edge_to_center(cth_bins) ltw = ltc.get_skydir_lthist(skydir, cth_bins) ltw = ltw.reshape(-1, npts) for et in event_types: rsp = rsp_fn(event_class, et, x, egy, cth) aeff = create_aeff(event_class, et, egy, cth) rsp = rsp.reshape(wrsp.shape + (npts,)) aeff = aeff.reshape(exps.shape + (npts,)) wrsp += np.sum(rsp * aeff[np.newaxis, :, :, :] * ltw[np.newaxis, np.newaxis, :, :], axis=-1) exps += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1) exps_inv = np.zeros_like(exps) exps_inv[exps > 0] = 1./exps[exps>0] wrsp *= exps_inv[np.newaxis, :, :] return wrsp
Calculate the weighted response function.
Below is the the instruction that describes the task: ### Input: Calculate the weighted response function. ### Response: def create_avg_rsp(rsp_fn, skydir, ltc, event_class, event_types, x, egy, cth_bins, npts=None): """Calculate the weighted response function. """ if npts is None: npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.05)) wrsp = np.zeros((len(x), len(egy), len(cth_bins) - 1)) exps = np.zeros((len(egy), len(cth_bins) - 1)) cth_bins = utils.split_bin_edges(cth_bins, npts) cth = edge_to_center(cth_bins) ltw = ltc.get_skydir_lthist(skydir, cth_bins) ltw = ltw.reshape(-1, npts) for et in event_types: rsp = rsp_fn(event_class, et, x, egy, cth) aeff = create_aeff(event_class, et, egy, cth) rsp = rsp.reshape(wrsp.shape + (npts,)) aeff = aeff.reshape(exps.shape + (npts,)) wrsp += np.sum(rsp * aeff[np.newaxis, :, :, :] * ltw[np.newaxis, np.newaxis, :, :], axis=-1) exps += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1) exps_inv = np.zeros_like(exps) exps_inv[exps > 0] = 1./exps[exps>0] wrsp *= exps_inv[np.newaxis, :, :] return wrsp
def get_ajax_xeditable_choices(self, request, *args, **kwargs): """ AJAX GET handler for xeditable queries asking for field choice lists. """ field_name = request.GET.get(self.xeditable_fieldname_param) if not field_name: return HttpResponseBadRequest("Field name must be given") queryset = self.get_queryset() if not self.model: self.model = queryset.model # Sanitize the requested field name by limiting valid names to the datatable_options columns from datatableview.views import legacy if isinstance(self, legacy.LegacyDatatableMixin): columns = self._get_datatable_options()['columns'] for name in columns: if isinstance(name, (list, tuple)): name = name[1] if name == field_name: break else: return HttpResponseBadRequest("Invalid field name") else: datatable = self.get_datatable() if not hasattr(datatable, 'config'): datatable.configure() if field_name not in datatable.config['columns']: return HttpResponseBadRequest("Invalid field name") field = self.model._meta.get_field(field_name) choices = self.get_field_choices(field, field_name) return HttpResponse(json.dumps(choices))
AJAX GET handler for xeditable queries asking for field choice lists.
Below is the the instruction that describes the task: ### Input: AJAX GET handler for xeditable queries asking for field choice lists. ### Response: def get_ajax_xeditable_choices(self, request, *args, **kwargs): """ AJAX GET handler for xeditable queries asking for field choice lists. """ field_name = request.GET.get(self.xeditable_fieldname_param) if not field_name: return HttpResponseBadRequest("Field name must be given") queryset = self.get_queryset() if not self.model: self.model = queryset.model # Sanitize the requested field name by limiting valid names to the datatable_options columns from datatableview.views import legacy if isinstance(self, legacy.LegacyDatatableMixin): columns = self._get_datatable_options()['columns'] for name in columns: if isinstance(name, (list, tuple)): name = name[1] if name == field_name: break else: return HttpResponseBadRequest("Invalid field name") else: datatable = self.get_datatable() if not hasattr(datatable, 'config'): datatable.configure() if field_name not in datatable.config['columns']: return HttpResponseBadRequest("Invalid field name") field = self.model._meta.get_field(field_name) choices = self.get_field_choices(field, field_name) return HttpResponse(json.dumps(choices))
def process_rr(data, record_type, record_keys, field, template): """ Meta method: Replace $field in template with the serialized $record_type records, using @record_key from each datum. """ if data is None: return template.replace(field, "") if type(record_keys) == list: pass elif type(record_keys) == str: record_keys = [record_keys] else: raise ValueError("Invalid record keys") assert type(data) == list, "Data must be a list" record = "" for i in xrange(0, len(data)): for record_key in record_keys: assert record_key in data[i].keys(), "Missing '%s'" % record_key record_data = [] record_data.append( str(data[i].get('name', '@')) ) if data[i].get('ttl') is not None: record_data.append( str(data[i]['ttl']) ) record_data.append(record_type) record_data += [str(data[i][record_key]) for record_key in record_keys] record += " ".join(record_data) + "\n" return template.replace(field, record)
Meta method: Replace $field in template with the serialized $record_type records, using @record_key from each datum.
Below is the the instruction that describes the task: ### Input: Meta method: Replace $field in template with the serialized $record_type records, using @record_key from each datum. ### Response: def process_rr(data, record_type, record_keys, field, template): """ Meta method: Replace $field in template with the serialized $record_type records, using @record_key from each datum. """ if data is None: return template.replace(field, "") if type(record_keys) == list: pass elif type(record_keys) == str: record_keys = [record_keys] else: raise ValueError("Invalid record keys") assert type(data) == list, "Data must be a list" record = "" for i in xrange(0, len(data)): for record_key in record_keys: assert record_key in data[i].keys(), "Missing '%s'" % record_key record_data = [] record_data.append( str(data[i].get('name', '@')) ) if data[i].get('ttl') is not None: record_data.append( str(data[i]['ttl']) ) record_data.append(record_type) record_data += [str(data[i][record_key]) for record_key in record_keys] record += " ".join(record_data) + "\n" return template.replace(field, record)
def get(query, *args, **kwargs): """Get action definitions to dump.""" run_sql = _get_run_sql() actions = [ dict(id=row[0], name=row[1], allowedkeywords=row[2], optional=row[3]) for action in query.split(',') for row in run_sql( 'select id, name, description, allowedkeywords, optional ' 'from accACTION where name like %s', (action, ), run_on_slave=True) ] return len(actions), actions
Get action definitions to dump.
Below is the the instruction that describes the task: ### Input: Get action definitions to dump. ### Response: def get(query, *args, **kwargs): """Get action definitions to dump.""" run_sql = _get_run_sql() actions = [ dict(id=row[0], name=row[1], allowedkeywords=row[2], optional=row[3]) for action in query.split(',') for row in run_sql( 'select id, name, description, allowedkeywords, optional ' 'from accACTION where name like %s', (action, ), run_on_slave=True) ] return len(actions), actions
def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r
Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed.
Below is the the instruction that describes the task: ### Input: Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. ### Response: def add_review(self, reviewer, product, review, date=None): """Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. """ if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r
def salt_ssh(): ''' Execute the salt-ssh system ''' import salt.cli.ssh if '' in sys.path: sys.path.remove('') try: client = salt.cli.ssh.SaltSSH() _install_signal_handlers(client) client.run() except SaltClientError as err: trace = traceback.format_exc() try: hardcrash = client.options.hard_crash except (AttributeError, KeyError): hardcrash = False _handle_interrupt( SystemExit(err), err, hardcrash, trace=trace)
Execute the salt-ssh system
Below is the the instruction that describes the task: ### Input: Execute the salt-ssh system ### Response: def salt_ssh(): ''' Execute the salt-ssh system ''' import salt.cli.ssh if '' in sys.path: sys.path.remove('') try: client = salt.cli.ssh.SaltSSH() _install_signal_handlers(client) client.run() except SaltClientError as err: trace = traceback.format_exc() try: hardcrash = client.options.hard_crash except (AttributeError, KeyError): hardcrash = False _handle_interrupt( SystemExit(err), err, hardcrash, trace=trace)
def quick_response(self, status_code): """ Quickly construct response using a status code """ translator = Translator(environ=self.environ) if status_code == 404: self.status(404) self.message(translator.trans('http_messages.404')) elif status_code == 401: self.status(401) self.message(translator.trans('http_messages.401')) elif status_code == 400: self.status(400) self.message(translator.trans('http_messages.400')) elif status_code == 200: self.status(200) self.message(translator.trans('http_messages.200'))
Quickly construct response using a status code
Below is the the instruction that describes the task: ### Input: Quickly construct response using a status code ### Response: def quick_response(self, status_code): """ Quickly construct response using a status code """ translator = Translator(environ=self.environ) if status_code == 404: self.status(404) self.message(translator.trans('http_messages.404')) elif status_code == 401: self.status(401) self.message(translator.trans('http_messages.401')) elif status_code == 400: self.status(400) self.message(translator.trans('http_messages.400')) elif status_code == 200: self.status(200) self.message(translator.trans('http_messages.200'))
def get_current_user(self, objectmask=None): """Calls SoftLayer_Account::getCurrentUser""" if objectmask is None: objectmask = "mask[userStatus[name], parent[id, username]]" return self.account_service.getCurrentUser(mask=objectmask)
Calls SoftLayer_Account::getCurrentUser
Below is the the instruction that describes the task: ### Input: Calls SoftLayer_Account::getCurrentUser ### Response: def get_current_user(self, objectmask=None): """Calls SoftLayer_Account::getCurrentUser""" if objectmask is None: objectmask = "mask[userStatus[name], parent[id, username]]" return self.account_service.getCurrentUser(mask=objectmask)
def _get_key_redis_key(bank, key): ''' Return the Redis key given the bank name and the key name. ''' opts = _get_redis_keys_opts() return '{prefix}{separator}{bank}/{key}'.format( prefix=opts['key_prefix'], separator=opts['separator'], bank=bank, key=key )
Return the Redis key given the bank name and the key name.
Below is the the instruction that describes the task: ### Input: Return the Redis key given the bank name and the key name. ### Response: def _get_key_redis_key(bank, key): ''' Return the Redis key given the bank name and the key name. ''' opts = _get_redis_keys_opts() return '{prefix}{separator}{bank}/{key}'.format( prefix=opts['key_prefix'], separator=opts['separator'], bank=bank, key=key )
def upload(df, gfile="/New Spreadsheet", wks_name=None, col_names=True, row_names=True, clean=True, credentials=None, start_cell = 'A1', df_size = False, new_sheet_dimensions = (1000,100)): ''' Upload given Pandas DataFrame to Google Drive and returns gspread Worksheet object :param df: Pandas DataFrame :param gfile: path to Google Spreadsheet or gspread ID :param wks_name: worksheet name :param col_names: passing top row to column names for Pandas DataFrame :param row_names: passing left column to row names for Pandas DataFrame :param clean: clean all data in worksheet before uploading :param credentials: provide own credentials :param start_cell: specify where to insert the DataFrame; default is A1 :param df_size: -If True and worksheet name does NOT exist, will create a new worksheet that is the size of the df; otherwise, by default, creates sheet of 1000x100 cells. -If True and worksheet does exist, will resize larger or smaller to fit new dataframe. -If False and dataframe is larger than existing sheet, will resize the sheet larger. -If False and dataframe is smaller than existing sheet, does not resize. :param new_sheet_dimensions: tuple of (row, cols) for size of a new sheet :type df: class 'pandas.core.frame.DataFrame' :type gfile: str :type wks_name: str :type col_names: bool :type row_names: bool :type clean: bool :type credentials: class 'oauth2client.client.OAuth2Credentials' :type start_cell: str :type df_size: bool :type new_sheet_dimensions: tuple :returns: gspread Worksheet :rtype: class 'gspread.models.Worksheet' :Example: >>> from df2gspread import df2gspread as d2g >>> import pandas as pd >>> df = pd.DataFrame([1 2 3]) >>> wks = d2g.upload(df, wks_name='Example worksheet') >>> wks.title 'Example worksheet' ''' # access credentials credentials = get_credentials(credentials) # auth for gspread gc = gspread.authorize(credentials) try: gc.open_by_key(gfile).__repr__() gfile_id = gfile except: gfile_id = get_file_id(credentials, gfile, write_access=True) # Tuple of rows, cols in the dataframe. # If user did not explicitly specify to resize sheet to dataframe size # then for new sheets set it to new_sheet_dimensions, which is by default 1000x100 if df_size: new_sheet_dimensions = (len(df), len(df.columns)) wks = get_worksheet(gc, gfile_id, wks_name, write_access=True, new_sheet_dimensions=new_sheet_dimensions) if clean: wks = clean_worksheet(wks, gfile_id, wks_name, credentials) start_col = re.split(r'(\d+)',start_cell)[0].upper() start_row = re.split(r'(\d+)',start_cell)[1] start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell) # find last index and column name (A B ... Z AA AB ... AZ BA) num_rows = len(df.index) + 1 if col_names else len(df.index) last_idx_adjust = start_row_int - 1 last_idx = num_rows + last_idx_adjust num_cols = len(df.columns) + 1 if row_names else len(df.columns) last_col_adjust = start_col_int - 1 last_col_int = num_cols + last_col_adjust last_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, last_col_int)))[0].upper() # If user requested to resize sheet to fit dataframe, go ahead and # resize larger or smaller to better match new size of pandas dataframe. # Otherwise, leave it the same size unless the sheet needs to be expanded # to accomodate a larger dataframe. if df_size: wks.resize(rows=len(df.index) + col_names, cols=len(df.columns) + row_names) if len(df.index) + col_names + last_idx_adjust > wks.row_count: wks.add_rows(len(df.index) - wks.row_count + col_names + last_idx_adjust) if len(df.columns) + row_names + last_col_adjust > wks.col_count: wks.add_cols(len(df.columns) - wks.col_count + row_names + last_col_adjust) # Define first cell for rows and columns first_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, start_col_int + 1)))[0].upper() if row_names else start_col first_row = str(start_row_int + 1) if col_names else start_row # Addition of col names if col_names: cell_list = wks.range('%s%s:%s%s' % (first_col, start_row, last_col, start_row)) for idx, cell in enumerate(cell_list): cell.value = df.columns.astype(str)[idx] wks.update_cells(cell_list) # Addition of row names if row_names: cell_list = wks.range('%s%s:%s%d' % ( start_col, first_row, start_col, last_idx)) for idx, cell in enumerate(cell_list): cell.value = df.index.astype(str)[idx] wks.update_cells(cell_list) # convert df values to string df = df.applymap(str) # Addition of cell values cell_list = wks.range('%s%s:%s%d' % ( first_col, first_row, last_col, last_idx)) for j, idx in enumerate(df.index): for i, col in enumerate(df.columns.values): if not pd.isnull(df[col][idx]): cell_list[i + j * len(df.columns.values)].value = df[col][idx] wks.update_cells(cell_list) return wks
Upload given Pandas DataFrame to Google Drive and returns gspread Worksheet object :param df: Pandas DataFrame :param gfile: path to Google Spreadsheet or gspread ID :param wks_name: worksheet name :param col_names: passing top row to column names for Pandas DataFrame :param row_names: passing left column to row names for Pandas DataFrame :param clean: clean all data in worksheet before uploading :param credentials: provide own credentials :param start_cell: specify where to insert the DataFrame; default is A1 :param df_size: -If True and worksheet name does NOT exist, will create a new worksheet that is the size of the df; otherwise, by default, creates sheet of 1000x100 cells. -If True and worksheet does exist, will resize larger or smaller to fit new dataframe. -If False and dataframe is larger than existing sheet, will resize the sheet larger. -If False and dataframe is smaller than existing sheet, does not resize. :param new_sheet_dimensions: tuple of (row, cols) for size of a new sheet :type df: class 'pandas.core.frame.DataFrame' :type gfile: str :type wks_name: str :type col_names: bool :type row_names: bool :type clean: bool :type credentials: class 'oauth2client.client.OAuth2Credentials' :type start_cell: str :type df_size: bool :type new_sheet_dimensions: tuple :returns: gspread Worksheet :rtype: class 'gspread.models.Worksheet' :Example: >>> from df2gspread import df2gspread as d2g >>> import pandas as pd >>> df = pd.DataFrame([1 2 3]) >>> wks = d2g.upload(df, wks_name='Example worksheet') >>> wks.title 'Example worksheet'
Below is the the instruction that describes the task: ### Input: Upload given Pandas DataFrame to Google Drive and returns gspread Worksheet object :param df: Pandas DataFrame :param gfile: path to Google Spreadsheet or gspread ID :param wks_name: worksheet name :param col_names: passing top row to column names for Pandas DataFrame :param row_names: passing left column to row names for Pandas DataFrame :param clean: clean all data in worksheet before uploading :param credentials: provide own credentials :param start_cell: specify where to insert the DataFrame; default is A1 :param df_size: -If True and worksheet name does NOT exist, will create a new worksheet that is the size of the df; otherwise, by default, creates sheet of 1000x100 cells. -If True and worksheet does exist, will resize larger or smaller to fit new dataframe. -If False and dataframe is larger than existing sheet, will resize the sheet larger. -If False and dataframe is smaller than existing sheet, does not resize. :param new_sheet_dimensions: tuple of (row, cols) for size of a new sheet :type df: class 'pandas.core.frame.DataFrame' :type gfile: str :type wks_name: str :type col_names: bool :type row_names: bool :type clean: bool :type credentials: class 'oauth2client.client.OAuth2Credentials' :type start_cell: str :type df_size: bool :type new_sheet_dimensions: tuple :returns: gspread Worksheet :rtype: class 'gspread.models.Worksheet' :Example: >>> from df2gspread import df2gspread as d2g >>> import pandas as pd >>> df = pd.DataFrame([1 2 3]) >>> wks = d2g.upload(df, wks_name='Example worksheet') >>> wks.title 'Example worksheet' ### Response: def upload(df, gfile="/New Spreadsheet", wks_name=None, col_names=True, row_names=True, clean=True, credentials=None, start_cell = 'A1', df_size = False, new_sheet_dimensions = (1000,100)): ''' Upload given Pandas DataFrame to Google Drive and returns gspread Worksheet object :param df: Pandas DataFrame :param gfile: path to Google Spreadsheet or gspread ID :param wks_name: worksheet name :param col_names: passing top row to column names for Pandas DataFrame :param row_names: passing left column to row names for Pandas DataFrame :param clean: clean all data in worksheet before uploading :param credentials: provide own credentials :param start_cell: specify where to insert the DataFrame; default is A1 :param df_size: -If True and worksheet name does NOT exist, will create a new worksheet that is the size of the df; otherwise, by default, creates sheet of 1000x100 cells. -If True and worksheet does exist, will resize larger or smaller to fit new dataframe. -If False and dataframe is larger than existing sheet, will resize the sheet larger. -If False and dataframe is smaller than existing sheet, does not resize. :param new_sheet_dimensions: tuple of (row, cols) for size of a new sheet :type df: class 'pandas.core.frame.DataFrame' :type gfile: str :type wks_name: str :type col_names: bool :type row_names: bool :type clean: bool :type credentials: class 'oauth2client.client.OAuth2Credentials' :type start_cell: str :type df_size: bool :type new_sheet_dimensions: tuple :returns: gspread Worksheet :rtype: class 'gspread.models.Worksheet' :Example: >>> from df2gspread import df2gspread as d2g >>> import pandas as pd >>> df = pd.DataFrame([1 2 3]) >>> wks = d2g.upload(df, wks_name='Example worksheet') >>> wks.title 'Example worksheet' ''' # access credentials credentials = get_credentials(credentials) # auth for gspread gc = gspread.authorize(credentials) try: gc.open_by_key(gfile).__repr__() gfile_id = gfile except: gfile_id = get_file_id(credentials, gfile, write_access=True) # Tuple of rows, cols in the dataframe. # If user did not explicitly specify to resize sheet to dataframe size # then for new sheets set it to new_sheet_dimensions, which is by default 1000x100 if df_size: new_sheet_dimensions = (len(df), len(df.columns)) wks = get_worksheet(gc, gfile_id, wks_name, write_access=True, new_sheet_dimensions=new_sheet_dimensions) if clean: wks = clean_worksheet(wks, gfile_id, wks_name, credentials) start_col = re.split(r'(\d+)',start_cell)[0].upper() start_row = re.split(r'(\d+)',start_cell)[1] start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell) # find last index and column name (A B ... Z AA AB ... AZ BA) num_rows = len(df.index) + 1 if col_names else len(df.index) last_idx_adjust = start_row_int - 1 last_idx = num_rows + last_idx_adjust num_cols = len(df.columns) + 1 if row_names else len(df.columns) last_col_adjust = start_col_int - 1 last_col_int = num_cols + last_col_adjust last_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, last_col_int)))[0].upper() # If user requested to resize sheet to fit dataframe, go ahead and # resize larger or smaller to better match new size of pandas dataframe. # Otherwise, leave it the same size unless the sheet needs to be expanded # to accomodate a larger dataframe. if df_size: wks.resize(rows=len(df.index) + col_names, cols=len(df.columns) + row_names) if len(df.index) + col_names + last_idx_adjust > wks.row_count: wks.add_rows(len(df.index) - wks.row_count + col_names + last_idx_adjust) if len(df.columns) + row_names + last_col_adjust > wks.col_count: wks.add_cols(len(df.columns) - wks.col_count + row_names + last_col_adjust) # Define first cell for rows and columns first_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, start_col_int + 1)))[0].upper() if row_names else start_col first_row = str(start_row_int + 1) if col_names else start_row # Addition of col names if col_names: cell_list = wks.range('%s%s:%s%s' % (first_col, start_row, last_col, start_row)) for idx, cell in enumerate(cell_list): cell.value = df.columns.astype(str)[idx] wks.update_cells(cell_list) # Addition of row names if row_names: cell_list = wks.range('%s%s:%s%d' % ( start_col, first_row, start_col, last_idx)) for idx, cell in enumerate(cell_list): cell.value = df.index.astype(str)[idx] wks.update_cells(cell_list) # convert df values to string df = df.applymap(str) # Addition of cell values cell_list = wks.range('%s%s:%s%d' % ( first_col, first_row, last_col, last_idx)) for j, idx in enumerate(df.index): for i, col in enumerate(df.columns.values): if not pd.isnull(df[col][idx]): cell_list[i + j * len(df.columns.values)].value = df[col][idx] wks.update_cells(cell_list) return wks
def is_leaf(obj): ''' the below is for nested-dict any type is not dict will be treated as a leaf empty dict will be treated as a leaf from edict.edict import * is_leaf(1) is_leaf({1:2}) is_leaf({}) ''' if(is_dict(obj)): length = obj.__len__() if(length == 0): return(True) else: return(False) else: return(True)
the below is for nested-dict any type is not dict will be treated as a leaf empty dict will be treated as a leaf from edict.edict import * is_leaf(1) is_leaf({1:2}) is_leaf({})
Below is the the instruction that describes the task: ### Input: the below is for nested-dict any type is not dict will be treated as a leaf empty dict will be treated as a leaf from edict.edict import * is_leaf(1) is_leaf({1:2}) is_leaf({}) ### Response: def is_leaf(obj): ''' the below is for nested-dict any type is not dict will be treated as a leaf empty dict will be treated as a leaf from edict.edict import * is_leaf(1) is_leaf({1:2}) is_leaf({}) ''' if(is_dict(obj)): length = obj.__len__() if(length == 0): return(True) else: return(False) else: return(True)
def from_array(array): """ Deserialize a new EncryptedPassportElement from a given dictionary. :return: new EncryptedPassportElement instance. :rtype: EncryptedPassportElement """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") from pytgbot.api_types.receivable.passport import PassportFile data = {} data['type'] = u(array.get('type')) data['hash'] = u(array.get('hash')) data['data'] = u(array.get('data')) if array.get('data') is not None else None data['phone_number'] = u(array.get('phone_number')) if array.get('phone_number') is not None else None data['email'] = u(array.get('email')) if array.get('email') is not None else None data['files'] = PassportFile.from_array_list(array.get('files'), list_level=1) if array.get('files') is not None else None data['front_side'] = PassportFile.from_array(array.get('front_side')) if array.get('front_side') is not None else None data['reverse_side'] = PassportFile.from_array(array.get('reverse_side')) if array.get('reverse_side') is not None else None data['selfie'] = PassportFile.from_array(array.get('selfie')) if array.get('selfie') is not None else None data['translation'] = PassportFile.from_array_list(array.get('translation'), list_level=1) if array.get('translation') is not None else None data['_raw'] = array return EncryptedPassportElement(**data)
Deserialize a new EncryptedPassportElement from a given dictionary. :return: new EncryptedPassportElement instance. :rtype: EncryptedPassportElement
Below is the the instruction that describes the task: ### Input: Deserialize a new EncryptedPassportElement from a given dictionary. :return: new EncryptedPassportElement instance. :rtype: EncryptedPassportElement ### Response: def from_array(array): """ Deserialize a new EncryptedPassportElement from a given dictionary. :return: new EncryptedPassportElement instance. :rtype: EncryptedPassportElement """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") from pytgbot.api_types.receivable.passport import PassportFile data = {} data['type'] = u(array.get('type')) data['hash'] = u(array.get('hash')) data['data'] = u(array.get('data')) if array.get('data') is not None else None data['phone_number'] = u(array.get('phone_number')) if array.get('phone_number') is not None else None data['email'] = u(array.get('email')) if array.get('email') is not None else None data['files'] = PassportFile.from_array_list(array.get('files'), list_level=1) if array.get('files') is not None else None data['front_side'] = PassportFile.from_array(array.get('front_side')) if array.get('front_side') is not None else None data['reverse_side'] = PassportFile.from_array(array.get('reverse_side')) if array.get('reverse_side') is not None else None data['selfie'] = PassportFile.from_array(array.get('selfie')) if array.get('selfie') is not None else None data['translation'] = PassportFile.from_array_list(array.get('translation'), list_level=1) if array.get('translation') is not None else None data['_raw'] = array return EncryptedPassportElement(**data)
def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): """Error 307 -- relocated, but turn POST into error.""" if data is None: return self.http_error_302(url, fp, errcode, errmsg, headers, data) else: return self.http_error_default(url, fp, errcode, errmsg, headers)
Error 307 -- relocated, but turn POST into error.
Below is the the instruction that describes the task: ### Input: Error 307 -- relocated, but turn POST into error. ### Response: def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): """Error 307 -- relocated, but turn POST into error.""" if data is None: return self.http_error_302(url, fp, errcode, errmsg, headers, data) else: return self.http_error_default(url, fp, errcode, errmsg, headers)
def get_key_section_header(self, key, spaces): """Get the key of the header section :param key: the key name :param spaces: spaces to set at the beginning of the header """ header = super(NumpydocTools, self).get_key_section_header(key, spaces) header = spaces + header + '\n' + spaces + '-' * len(header) + '\n' return header
Get the key of the header section :param key: the key name :param spaces: spaces to set at the beginning of the header
Below is the the instruction that describes the task: ### Input: Get the key of the header section :param key: the key name :param spaces: spaces to set at the beginning of the header ### Response: def get_key_section_header(self, key, spaces): """Get the key of the header section :param key: the key name :param spaces: spaces to set at the beginning of the header """ header = super(NumpydocTools, self).get_key_section_header(key, spaces) header = spaces + header + '\n' + spaces + '-' * len(header) + '\n' return header
def compare(cls, match, subject): """ Accepts two OrganizationName objects and returns an arbitrary, numerical score based upon how well the names match. """ if match.expand().lower() == subject.expand().lower(): return 4 elif match.kernel().lower() == subject.kernel().lower(): return 3 # law and lobbying firms in CRP data typically list only the first two partners # before 'et al' elif ',' in subject.expand(): # we may have a list of partners if subject.crp_style_firm_name() == str(match).lower(): return 3 else: return 2
Accepts two OrganizationName objects and returns an arbitrary, numerical score based upon how well the names match.
Below is the the instruction that describes the task: ### Input: Accepts two OrganizationName objects and returns an arbitrary, numerical score based upon how well the names match. ### Response: def compare(cls, match, subject): """ Accepts two OrganizationName objects and returns an arbitrary, numerical score based upon how well the names match. """ if match.expand().lower() == subject.expand().lower(): return 4 elif match.kernel().lower() == subject.kernel().lower(): return 3 # law and lobbying firms in CRP data typically list only the first two partners # before 'et al' elif ',' in subject.expand(): # we may have a list of partners if subject.crp_style_firm_name() == str(match).lower(): return 3 else: return 2
def get_field_min_max(self, name, **query_dict): """Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max] """ param_dict = query_dict.copy() param_dict.update({'rows': 1, 'fl': name, 'sort': '%s asc' % name}) try: min_resp_dict = self._post_query(**param_dict) param_dict['sort'] = '%s desc' % name max_resp_dict = self._post_query(**param_dict) return ( min_resp_dict['response']['docs'][0][name], max_resp_dict['response']['docs'][0][name], ) except Exception: self._log.exception('Exception') raise
Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max]
Below is the the instruction that describes the task: ### Input: Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max] ### Response: def get_field_min_max(self, name, **query_dict): """Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max] """ param_dict = query_dict.copy() param_dict.update({'rows': 1, 'fl': name, 'sort': '%s asc' % name}) try: min_resp_dict = self._post_query(**param_dict) param_dict['sort'] = '%s desc' % name max_resp_dict = self._post_query(**param_dict) return ( min_resp_dict['response']['docs'][0][name], max_resp_dict['response']['docs'][0][name], ) except Exception: self._log.exception('Exception') raise
def transform_region(self, from_, to, transform_callback): """ Transform a part of the input string. :param from_: (int) start position. :param to: (int) end position. :param transform_callback: Callable which accepts a string and returns the transformed string. """ assert from_ < to self.text = ''.join([ self.text[:from_] + transform_callback(self.text[from_:to]) + self.text[to:] ])
Transform a part of the input string. :param from_: (int) start position. :param to: (int) end position. :param transform_callback: Callable which accepts a string and returns the transformed string.
Below is the the instruction that describes the task: ### Input: Transform a part of the input string. :param from_: (int) start position. :param to: (int) end position. :param transform_callback: Callable which accepts a string and returns the transformed string. ### Response: def transform_region(self, from_, to, transform_callback): """ Transform a part of the input string. :param from_: (int) start position. :param to: (int) end position. :param transform_callback: Callable which accepts a string and returns the transformed string. """ assert from_ < to self.text = ''.join([ self.text[:from_] + transform_callback(self.text[from_:to]) + self.text[to:] ])
def _check_status(self, ibsta): """Checks ibsta value.""" if ibsta & 0x4000: raise LinuxGpib.Timeout() elif ibsta & 0x8000: raise LinuxGpib.Error(self.error_status)
Checks ibsta value.
Below is the the instruction that describes the task: ### Input: Checks ibsta value. ### Response: def _check_status(self, ibsta): """Checks ibsta value.""" if ibsta & 0x4000: raise LinuxGpib.Timeout() elif ibsta & 0x8000: raise LinuxGpib.Error(self.error_status)
def dataframe(df, **kwargs): """Print table with data from the given pandas DataFrame Parameters ---------- df : DataFrame A pandas DataFrame with the table to print """ table(df.values, list(df.columns), **kwargs)
Print table with data from the given pandas DataFrame Parameters ---------- df : DataFrame A pandas DataFrame with the table to print
Below is the the instruction that describes the task: ### Input: Print table with data from the given pandas DataFrame Parameters ---------- df : DataFrame A pandas DataFrame with the table to print ### Response: def dataframe(df, **kwargs): """Print table with data from the given pandas DataFrame Parameters ---------- df : DataFrame A pandas DataFrame with the table to print """ table(df.values, list(df.columns), **kwargs)
def set_options(self, options): """Sets the given options as instance attributes (only if they are known). :parameters: options : Dict All known instance attributes and more if the childclass has defined them before this call. :rtype: None """ for key, val in options.items(): key = key.lstrip('_') if hasattr(self, key): setattr(self, key, val)
Sets the given options as instance attributes (only if they are known). :parameters: options : Dict All known instance attributes and more if the childclass has defined them before this call. :rtype: None
Below is the the instruction that describes the task: ### Input: Sets the given options as instance attributes (only if they are known). :parameters: options : Dict All known instance attributes and more if the childclass has defined them before this call. :rtype: None ### Response: def set_options(self, options): """Sets the given options as instance attributes (only if they are known). :parameters: options : Dict All known instance attributes and more if the childclass has defined them before this call. :rtype: None """ for key, val in options.items(): key = key.lstrip('_') if hasattr(self, key): setattr(self, key, val)
def get_available_label_stores(self, usefield='tryall'): """ Get the label store values that may be used for writing this annotation. Available store values include: - the undefined values in the standard wfdb labels - the store values not used in the current annotation object. - the store values whose standard wfdb symbols/descriptions match those of the custom labels (if custom_labels exists) If 'usefield' is explicitly specified, the function will use that field to figure out available label stores. If 'usefield' is set to 'tryall', the function will choose one of the contained attributes by checking availability in the order: label_store, symbol, description """ # Figure out which field to use to get available labels stores. if usefield == 'tryall': if self.label_store is not None: usefield = 'label_store' elif self.symbol is not None: usefield = 'symbol' elif self.description is not None: usefield = 'description' else: raise ValueError('No label fields are defined. At least one of the following is required: ', ann_label_fields) return self.get_available_label_stores(usefield = usefield) # Use the explicitly stated field to get available stores. else: # If usefield == 'label_store', there are slightly fewer/different steps # compared to if it were another option contained_field = getattr(self, usefield) # Get the unused label_store values if usefield == 'label_store': unused_label_stores = set(ann_label_table['label_store'].values) - contained_field else: # the label_store values from the standard wfdb annotation labels # whose symbols are not contained in this annotation unused_field = set(ann_label_table[usefield].values) - contained_field unused_label_stores = ann_label_table.loc[ann_label_table[usefield] in unused_field, 'label_store'].values # Get the standard wfdb label_store values overwritten by the # custom_labels if any if self.custom_symbols is not None: custom_field = set(self.get_custom_label_attribute(usefield)) if usefield == 'label_store': overwritten_label_stores = set(custom_field).intersection(set(ann_label_table['label_store'])) else: overwritten_fields = set(custom_field).intersection(set(ann_label_table[usefield])) overwritten_label_stores = ann_label_table.loc[ann_label_table[usefield] in overwritten_fields, 'label_store'].values else: overwritten_label_stores = set() # The undefined values in the standard wfdb labels undefined_label_stores = self.get_undefined_label_stores() # Final available label stores = undefined + unused + overwritten available_label_stores = set(undefined_label_stores).union(set(unused_label_stores)).union(overwritten_label_stores) return available_label_stores
Get the label store values that may be used for writing this annotation. Available store values include: - the undefined values in the standard wfdb labels - the store values not used in the current annotation object. - the store values whose standard wfdb symbols/descriptions match those of the custom labels (if custom_labels exists) If 'usefield' is explicitly specified, the function will use that field to figure out available label stores. If 'usefield' is set to 'tryall', the function will choose one of the contained attributes by checking availability in the order: label_store, symbol, description
Below is the the instruction that describes the task: ### Input: Get the label store values that may be used for writing this annotation. Available store values include: - the undefined values in the standard wfdb labels - the store values not used in the current annotation object. - the store values whose standard wfdb symbols/descriptions match those of the custom labels (if custom_labels exists) If 'usefield' is explicitly specified, the function will use that field to figure out available label stores. If 'usefield' is set to 'tryall', the function will choose one of the contained attributes by checking availability in the order: label_store, symbol, description ### Response: def get_available_label_stores(self, usefield='tryall'): """ Get the label store values that may be used for writing this annotation. Available store values include: - the undefined values in the standard wfdb labels - the store values not used in the current annotation object. - the store values whose standard wfdb symbols/descriptions match those of the custom labels (if custom_labels exists) If 'usefield' is explicitly specified, the function will use that field to figure out available label stores. If 'usefield' is set to 'tryall', the function will choose one of the contained attributes by checking availability in the order: label_store, symbol, description """ # Figure out which field to use to get available labels stores. if usefield == 'tryall': if self.label_store is not None: usefield = 'label_store' elif self.symbol is not None: usefield = 'symbol' elif self.description is not None: usefield = 'description' else: raise ValueError('No label fields are defined. At least one of the following is required: ', ann_label_fields) return self.get_available_label_stores(usefield = usefield) # Use the explicitly stated field to get available stores. else: # If usefield == 'label_store', there are slightly fewer/different steps # compared to if it were another option contained_field = getattr(self, usefield) # Get the unused label_store values if usefield == 'label_store': unused_label_stores = set(ann_label_table['label_store'].values) - contained_field else: # the label_store values from the standard wfdb annotation labels # whose symbols are not contained in this annotation unused_field = set(ann_label_table[usefield].values) - contained_field unused_label_stores = ann_label_table.loc[ann_label_table[usefield] in unused_field, 'label_store'].values # Get the standard wfdb label_store values overwritten by the # custom_labels if any if self.custom_symbols is not None: custom_field = set(self.get_custom_label_attribute(usefield)) if usefield == 'label_store': overwritten_label_stores = set(custom_field).intersection(set(ann_label_table['label_store'])) else: overwritten_fields = set(custom_field).intersection(set(ann_label_table[usefield])) overwritten_label_stores = ann_label_table.loc[ann_label_table[usefield] in overwritten_fields, 'label_store'].values else: overwritten_label_stores = set() # The undefined values in the standard wfdb labels undefined_label_stores = self.get_undefined_label_stores() # Final available label stores = undefined + unused + overwritten available_label_stores = set(undefined_label_stores).union(set(unused_label_stores)).union(overwritten_label_stores) return available_label_stores
def get_local_time(index): """Localize datetime for better output in graphs :param pandas.DateTimeIndex index: pandas datetime index :return: aware time objet :rtype: datetime.time """ dt = index.to_pydatetime() dt = dt.replace(tzinfo=pytz.utc) return dt.astimezone(tzlocal()).time()
Localize datetime for better output in graphs :param pandas.DateTimeIndex index: pandas datetime index :return: aware time objet :rtype: datetime.time
Below is the the instruction that describes the task: ### Input: Localize datetime for better output in graphs :param pandas.DateTimeIndex index: pandas datetime index :return: aware time objet :rtype: datetime.time ### Response: def get_local_time(index): """Localize datetime for better output in graphs :param pandas.DateTimeIndex index: pandas datetime index :return: aware time objet :rtype: datetime.time """ dt = index.to_pydatetime() dt = dt.replace(tzinfo=pytz.utc) return dt.astimezone(tzlocal()).time()
def black(cls): "Make the text foreground color black." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK #wAttributes |= win32.FOREGROUND_BLACK cls._set_text_attributes(wAttributes)
Make the text foreground color black.
Below is the the instruction that describes the task: ### Input: Make the text foreground color black. ### Response: def black(cls): "Make the text foreground color black." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK #wAttributes |= win32.FOREGROUND_BLACK cls._set_text_attributes(wAttributes)
def modelsGetResultAndStatus(self, modelIDs): """ Get the results string and other status fields for a set of models. WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! For each model, this returns a tuple containing: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash Parameters: ---------------------------------------------------------------- modelIDs: list of model IDs retval: list of result tuples. Each tuple contains: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash) """ assert isinstance(modelIDs, self._SEQUENCE_TYPES), ( "Wrong modelIDs type: %r") % type(modelIDs) assert len(modelIDs) >= 1, "modelIDs is empty" rows = self._getMatchingRowsWithRetries( self._models, {'model_id' : modelIDs}, [self._models.pubToDBNameDict[f] for f in self._models.getResultAndStatusNamedTuple._fields]) # NOTE: assertion will also fail when modelIDs contains duplicates assert len(rows) == len(modelIDs), "Didn't find modelIDs: %r" % ( (set(modelIDs) - set(r[0] for r in rows)),) # Return the results as a list of namedtuples return [self._models.getResultAndStatusNamedTuple._make(r) for r in rows]
Get the results string and other status fields for a set of models. WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! For each model, this returns a tuple containing: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash Parameters: ---------------------------------------------------------------- modelIDs: list of model IDs retval: list of result tuples. Each tuple contains: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash)
Below is the the instruction that describes the task: ### Input: Get the results string and other status fields for a set of models. WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! For each model, this returns a tuple containing: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash Parameters: ---------------------------------------------------------------- modelIDs: list of model IDs retval: list of result tuples. Each tuple contains: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash) ### Response: def modelsGetResultAndStatus(self, modelIDs): """ Get the results string and other status fields for a set of models. WARNING!!!: The order of the results are NOT necessarily in the same order as the order of the model IDs passed in!!! For each model, this returns a tuple containing: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash Parameters: ---------------------------------------------------------------- modelIDs: list of model IDs retval: list of result tuples. Each tuple contains: (modelID, results, status, updateCounter, numRecords, completionReason, completionMsg, engParamsHash) """ assert isinstance(modelIDs, self._SEQUENCE_TYPES), ( "Wrong modelIDs type: %r") % type(modelIDs) assert len(modelIDs) >= 1, "modelIDs is empty" rows = self._getMatchingRowsWithRetries( self._models, {'model_id' : modelIDs}, [self._models.pubToDBNameDict[f] for f in self._models.getResultAndStatusNamedTuple._fields]) # NOTE: assertion will also fail when modelIDs contains duplicates assert len(rows) == len(modelIDs), "Didn't find modelIDs: %r" % ( (set(modelIDs) - set(r[0] for r in rows)),) # Return the results as a list of namedtuples return [self._models.getResultAndStatusNamedTuple._make(r) for r in rows]
def duration(self): """Duration of this series in seconds :type: `~astropy.units.Quantity` scalar """ return units.Quantity(self.span[1] - self.span[0], self.xunit, dtype=float)
Duration of this series in seconds :type: `~astropy.units.Quantity` scalar
Below is the the instruction that describes the task: ### Input: Duration of this series in seconds :type: `~astropy.units.Quantity` scalar ### Response: def duration(self): """Duration of this series in seconds :type: `~astropy.units.Quantity` scalar """ return units.Quantity(self.span[1] - self.span[0], self.xunit, dtype=float)
def splitext_files_only(filepath): "Custom version of splitext that doesn't perform splitext on directories" return ( (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath) )
Custom version of splitext that doesn't perform splitext on directories
Below is the the instruction that describes the task: ### Input: Custom version of splitext that doesn't perform splitext on directories ### Response: def splitext_files_only(filepath): "Custom version of splitext that doesn't perform splitext on directories" return ( (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath) )
def read_bonedata(self, fid): """Read bone data from an acclaim skeleton file stream.""" bone_count = 0 lin = self.read_line(fid) while lin[0]!=':': parts = lin.split() if parts[0] == 'begin': bone_count += 1 self.vertices.append(vertex(name = '', id=np.NaN, meta={'name': [], 'id': [], 'offset': [], 'orientation': [], 'axis': [0., 0., 0.], 'axis_order': [], 'C': np.eye(3), 'Cinv': np.eye(3), 'channels': [], 'bodymass': [], 'confmass': [], 'order': [], 'rot_ind': [], 'pos_ind': [], 'limits': [], 'xyz': np.array([0., 0., 0.]), 'rot': np.eye(3)})) lin = self.read_line(fid) elif parts[0]=='id': self.vertices[bone_count].id = int(parts[1]) lin = self.read_line(fid) self.vertices[bone_count].children = [] elif parts[0]=='name': self.vertices[bone_count].name = parts[1] lin = self.read_line(fid) elif parts[0]=='direction': direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) lin = self.read_line(fid) elif parts[0]=='length': lgth = float(parts[1]) lin = self.read_line(fid) elif parts[0]=='axis': self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) # order is reversed compared to bvh self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower() lin = self.read_line(fid) elif parts[0]=='dof': order = [] for i in range(1, len(parts)): if parts[i]== 'rx': chan = 'Xrotation' order.append('x') elif parts[i] =='ry': chan = 'Yrotation' order.append('y') elif parts[i] == 'rz': chan = 'Zrotation' order.append('z') elif parts[i] == 'tx': chan = 'Xposition' elif parts[i] == 'ty': chan = 'Yposition' elif parts[i] == 'tz': chan = 'Zposition' elif parts[i] == 'l': chan = 'length' self.vertices[bone_count].meta['channels'].append(chan) # order is reversed compared to bvh self.vertices[bone_count].meta['order'] = order[::-1] lin = self.read_line(fid) elif parts[0]=='limits': self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]] lin = self.read_line(fid) while lin !='end': parts = lin.split() self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])]) lin = self.read_line(fid) self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits']) elif parts[0]=='end': self.vertices[bone_count].meta['offset'] = direction*lgth lin = self.read_line(fid) return lin
Read bone data from an acclaim skeleton file stream.
Below is the the instruction that describes the task: ### Input: Read bone data from an acclaim skeleton file stream. ### Response: def read_bonedata(self, fid): """Read bone data from an acclaim skeleton file stream.""" bone_count = 0 lin = self.read_line(fid) while lin[0]!=':': parts = lin.split() if parts[0] == 'begin': bone_count += 1 self.vertices.append(vertex(name = '', id=np.NaN, meta={'name': [], 'id': [], 'offset': [], 'orientation': [], 'axis': [0., 0., 0.], 'axis_order': [], 'C': np.eye(3), 'Cinv': np.eye(3), 'channels': [], 'bodymass': [], 'confmass': [], 'order': [], 'rot_ind': [], 'pos_ind': [], 'limits': [], 'xyz': np.array([0., 0., 0.]), 'rot': np.eye(3)})) lin = self.read_line(fid) elif parts[0]=='id': self.vertices[bone_count].id = int(parts[1]) lin = self.read_line(fid) self.vertices[bone_count].children = [] elif parts[0]=='name': self.vertices[bone_count].name = parts[1] lin = self.read_line(fid) elif parts[0]=='direction': direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) lin = self.read_line(fid) elif parts[0]=='length': lgth = float(parts[1]) lin = self.read_line(fid) elif parts[0]=='axis': self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) # order is reversed compared to bvh self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower() lin = self.read_line(fid) elif parts[0]=='dof': order = [] for i in range(1, len(parts)): if parts[i]== 'rx': chan = 'Xrotation' order.append('x') elif parts[i] =='ry': chan = 'Yrotation' order.append('y') elif parts[i] == 'rz': chan = 'Zrotation' order.append('z') elif parts[i] == 'tx': chan = 'Xposition' elif parts[i] == 'ty': chan = 'Yposition' elif parts[i] == 'tz': chan = 'Zposition' elif parts[i] == 'l': chan = 'length' self.vertices[bone_count].meta['channels'].append(chan) # order is reversed compared to bvh self.vertices[bone_count].meta['order'] = order[::-1] lin = self.read_line(fid) elif parts[0]=='limits': self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]] lin = self.read_line(fid) while lin !='end': parts = lin.split() self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])]) lin = self.read_line(fid) self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits']) elif parts[0]=='end': self.vertices[bone_count].meta['offset'] = direction*lgth lin = self.read_line(fid) return lin
def vol_per_rev_LS(id_number): """Look up the volume per revolution output by a Masterflex L/S pump through L/S tubing of the given ID number. :param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36. :type id_number: int :return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS >>> from aguaclara.core.units import unit_registry as u >>> vol_per_rev_LS(13) <Quantity(0.06, 'milliliter / turn')> >>> vol_per_rev_LS(18) <Quantity(3.8, 'milliliter / turn')> """ tubing_data_path = os.path.join(os.path.dirname(__file__), "data", "LS_tubing.txt") df = pd.read_csv(tubing_data_path, delimiter='\t') idx = df["Number"] == id_number return df[idx]['Flow (mL/rev)'].values[0] * u.mL/u.turn
Look up the volume per revolution output by a Masterflex L/S pump through L/S tubing of the given ID number. :param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36. :type id_number: int :return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS >>> from aguaclara.core.units import unit_registry as u >>> vol_per_rev_LS(13) <Quantity(0.06, 'milliliter / turn')> >>> vol_per_rev_LS(18) <Quantity(3.8, 'milliliter / turn')>
Below is the the instruction that describes the task: ### Input: Look up the volume per revolution output by a Masterflex L/S pump through L/S tubing of the given ID number. :param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36. :type id_number: int :return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS >>> from aguaclara.core.units import unit_registry as u >>> vol_per_rev_LS(13) <Quantity(0.06, 'milliliter / turn')> >>> vol_per_rev_LS(18) <Quantity(3.8, 'milliliter / turn')> ### Response: def vol_per_rev_LS(id_number): """Look up the volume per revolution output by a Masterflex L/S pump through L/S tubing of the given ID number. :param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36. :type id_number: int :return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS >>> from aguaclara.core.units import unit_registry as u >>> vol_per_rev_LS(13) <Quantity(0.06, 'milliliter / turn')> >>> vol_per_rev_LS(18) <Quantity(3.8, 'milliliter / turn')> """ tubing_data_path = os.path.join(os.path.dirname(__file__), "data", "LS_tubing.txt") df = pd.read_csv(tubing_data_path, delimiter='\t') idx = df["Number"] == id_number return df[idx]['Flow (mL/rev)'].values[0] * u.mL/u.turn
def route( self, uri, methods=frozenset({"GET"}), host=None, strict_slashes=None, stream=False, version=None, name=None, ): """Create a blueprint route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param stream: If the route should provide a streaming support :param version: Blueprint Version :param name: Unique name to identify the Route :return a decorated method that when invoked will return an object of type :class:`FutureRoute` """ if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): route = FutureRoute( handler, uri, methods, host, strict_slashes, stream, version, name, ) self.routes.append(route) return handler return decorator
Create a blueprint route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param stream: If the route should provide a streaming support :param version: Blueprint Version :param name: Unique name to identify the Route :return a decorated method that when invoked will return an object of type :class:`FutureRoute`
Below is the the instruction that describes the task: ### Input: Create a blueprint route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param stream: If the route should provide a streaming support :param version: Blueprint Version :param name: Unique name to identify the Route :return a decorated method that when invoked will return an object of type :class:`FutureRoute` ### Response: def route( self, uri, methods=frozenset({"GET"}), host=None, strict_slashes=None, stream=False, version=None, name=None, ): """Create a blueprint route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param stream: If the route should provide a streaming support :param version: Blueprint Version :param name: Unique name to identify the Route :return a decorated method that when invoked will return an object of type :class:`FutureRoute` """ if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): route = FutureRoute( handler, uri, methods, host, strict_slashes, stream, version, name, ) self.routes.append(route) return handler return decorator
def isIn(val, schema, name = None): # pylint: disable-msg=W0613 """ !~~isIn(data) """ if name is None: name = schema if not _lists.has_key(name): return False try: return val in _lists[name] except TypeError: return False
!~~isIn(data)
Below is the the instruction that describes the task: ### Input: !~~isIn(data) ### Response: def isIn(val, schema, name = None): # pylint: disable-msg=W0613 """ !~~isIn(data) """ if name is None: name = schema if not _lists.has_key(name): return False try: return val in _lists[name] except TypeError: return False
def assertStructIsInline(self, obj): """ Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere. """ N.enforce_number(obj, N.UOffsetTFlags) if obj != self.Offset(): msg = ("flatbuffers: Tried to write a Struct at an Offset that " "is different from the current Offset of the Builder.") raise StructIsNotInlineError(msg)
Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere.
Below is the the instruction that describes the task: ### Input: Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere. ### Response: def assertStructIsInline(self, obj): """ Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere. """ N.enforce_number(obj, N.UOffsetTFlags) if obj != self.Offset(): msg = ("flatbuffers: Tried to write a Struct at an Offset that " "is different from the current Offset of the Builder.") raise StructIsNotInlineError(msg)
def fixed_point_quantized_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, w_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True, quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True,): """Fixed-Point Quantized Convolution. Fixed-Point Quantized Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j}, where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. quantize_bias (bool): Quantize bias if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: N-D array. """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, False) # Link computation graph real_w_q = F.fixed_point_quantize(w, quantize=quantize_w, sign=sign_w, n=n_w, delta=delta_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", (outmaps,), b_init, False) # Link computation graph real_b_q = F.fixed_point_quantize(b, quantize=quantize_b, sign=sign_b, n=n_b, delta=delta_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group)
Fixed-Point Quantized Convolution. Fixed-Point Quantized Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j}, where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. quantize_bias (bool): Quantize bias if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: N-D array.
Below is the the instruction that describes the task: ### Input: Fixed-Point Quantized Convolution. Fixed-Point Quantized Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j}, where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. quantize_bias (bool): Quantize bias if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: N-D array. ### Response: def fixed_point_quantized_convolution(inp, outmaps, kernel, pad=None, stride=None, dilation=None, group=1, w_init=None, b_init=None, base_axis=1, fix_parameters=False, rng=None, with_bias=True, quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True, quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True,): """Fixed-Point Quantized Convolution. Fixed-Point Quantized Convolution is the convolution function, except the definition of the inner product is modified. The input-output relation of this function is as follows: .. math:: y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j}, where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function. .. note:: 1) if you would like to share weights between some layers, please make sure to share the standard, floating value weights (`weight`) and not the quantized weights (`quantized weight`) 2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called, and not after a call to :func:`~nnabla._variable.Variable.backward`. To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the float weights and the quantized weights will not be in sync. 3) CPU and GPU implementations now use float value for `quantized weight`, since this function is only for simulation purposes. Args: inp (~nnabla.Variable): N-D array. outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5). pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions. stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions. dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions. group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. rng (numpy.random.RandomState): Random generator for Initializer. with_bias (bool): Specify whether to include the bias term. quantize_w (bool): Quantize weights if `True`. quantize_bias (bool): Quantize bias if `True`. sign_w (bool): Use signed quantization if `True`. n_w (int): Bit width used for weight. delta_w (float): Step size for weight. ste_fine_grained_w (bool): STE is fine-grained if `True`. quantize_b (bool): Quantize bias if `True`. n_b (int): Bit width used for bias. delta_w (float): Step size for bias. ste_fine_grained_b (bool): STE is fine-grained if `True`. Returns: :class:`~nnabla.Variable`: N-D array. """ if w_init is None: w_init = UniformInitializer( calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng) if with_bias and b_init is None: b_init = ConstantInitializer() # Floating Weight w = get_parameter_or_create( "W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, True, not fix_parameters) # Quantized Weight if quantize_w: w_q = get_parameter_or_create( "W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel), w_init, False) # Link computation graph real_w_q = F.fixed_point_quantize(w, quantize=quantize_w, sign=sign_w, n=n_w, delta=delta_w, ste_fine_grained=ste_fine_grained_w, outputs=[w_q.data]) real_w_q.persistent = True else: real_w_q = w # Bias # Floating b = None b_q = None real_b_q = None if with_bias: b = get_parameter_or_create( "b", (outmaps,), b_init, True, not fix_parameters) if quantize_b: b_q = get_parameter_or_create( "b_q", (outmaps,), b_init, False) # Link computation graph real_b_q = F.fixed_point_quantize(b, quantize=quantize_b, sign=sign_b, n=n_b, delta=delta_b, ste_fine_grained=ste_fine_grained_b, outputs=[b_q.data]) real_b_q.persistent = True else: real_b_q = b return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group)
def get_nts(self): """Read csv/tsv file and return specified data in a list of lists.""" data = [] nt_obj = None with open(self.fin) as fin_stream: for lnum, line in enumerate(fin_stream, 1): try: line = line.rstrip('\r\n') # chomp # Obtain Data if headers have been collected from the first line if nt_obj is not None: flds = re.split(self.sep, line) self.convert_ints_floats(flds) flds[6] = [s.strip() for s in flds[6].split(',')] ntdata = nt_obj._make(flds) data.append(ntdata) # Obtain the header else: nt_obj = self._init_nt_hdr(line) except RuntimeError: # Print headers #if nt_obj is not None: # sys.stdout.write("{HDRS}\n".format(HDRS='\n'.join(nt_obj._fields))) flds = re.split(self.sep, line) print(len(flds), "FIELDS") print(flds) #raise Exception("{FIN}({LNUM}): {LINE}\n".format( # FIN=self.fin, LNUM=lnum, LINE=line)) # JUST SKIP LINES WITH INCOMPLETE DATA, BUT PRINT ERROR MESSAGE sys.stdout.write("**ERROR: {FIN}({LNUM}): {LINE}\n".format( FIN=self.fin, LNUM=lnum, LINE=line)) if self.log is not None: self.log.write(" {:9} lines READ: {}\n".format(len(data), self.fin)) return data
Read csv/tsv file and return specified data in a list of lists.
Below is the the instruction that describes the task: ### Input: Read csv/tsv file and return specified data in a list of lists. ### Response: def get_nts(self): """Read csv/tsv file and return specified data in a list of lists.""" data = [] nt_obj = None with open(self.fin) as fin_stream: for lnum, line in enumerate(fin_stream, 1): try: line = line.rstrip('\r\n') # chomp # Obtain Data if headers have been collected from the first line if nt_obj is not None: flds = re.split(self.sep, line) self.convert_ints_floats(flds) flds[6] = [s.strip() for s in flds[6].split(',')] ntdata = nt_obj._make(flds) data.append(ntdata) # Obtain the header else: nt_obj = self._init_nt_hdr(line) except RuntimeError: # Print headers #if nt_obj is not None: # sys.stdout.write("{HDRS}\n".format(HDRS='\n'.join(nt_obj._fields))) flds = re.split(self.sep, line) print(len(flds), "FIELDS") print(flds) #raise Exception("{FIN}({LNUM}): {LINE}\n".format( # FIN=self.fin, LNUM=lnum, LINE=line)) # JUST SKIP LINES WITH INCOMPLETE DATA, BUT PRINT ERROR MESSAGE sys.stdout.write("**ERROR: {FIN}({LNUM}): {LINE}\n".format( FIN=self.fin, LNUM=lnum, LINE=line)) if self.log is not None: self.log.write(" {:9} lines READ: {}\n".format(len(data), self.fin)) return data
def p_expression_binop(t): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression''' if t[2] == '+' : t[0] = t[1] + t[3] elif t[2] == '-': t[0] = t[1] - t[3] elif t[2] == '*': t[0] = t[1] * t[3] elif t[2] == '/': t[0] = t[1] / t[3]
expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression
Below is the the instruction that describes the task: ### Input: expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression ### Response: def p_expression_binop(t): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression''' if t[2] == '+' : t[0] = t[1] + t[3] elif t[2] == '-': t[0] = t[1] - t[3] elif t[2] == '*': t[0] = t[1] * t[3] elif t[2] == '/': t[0] = t[1] / t[3]
async def create(self, query, *, dc=None): """Creates a new prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: New query ID The create operation expects a body that defines the prepared query, like this example:: { "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "", "Near": "node1", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) } } Only the **Service** field inside the **Service** structure is mandatory, all other fields will take their default values if they are not included. **Name** is an optional friendly name that can be used to execute a query instead of using its ID. **Session** provides a way to automatically remove a prepared query when the given session is invalidated. This is optional, and if not given the prepared query must be manually removed when no longer needed. **Token**, if specified, is a captured ACL Token that is reused as the ACL Token every time the query is executed. This allows queries to be executed by clients with lesser or even no ACL Token, so this should be used with care. The token itself can only be seen by clients with a management token. If the **Token** field is left blank or omitted, the client's ACL Token will be used to determine if they have access to the service being queried. If the client does not supply an ACL Token, the anonymous token will be used. **Near** allows specifying a particular node to sort near based on distance sorting using Network Coordinates. The nearest instance to the specified node will be returned first, and subsequent nodes in the response will be sorted in ascending order of estimated round-trip times. If the node given does not exist, the nodes in the response will be shuffled. Using the magic **_agent** value is supported, and will automatically return results nearest the agent servicing the request. If unspecified, the response will be shuffled by default. The set of fields inside the **Service** structure define the query's behavior. **Service** is the name of the service to query. This is required. **Failover** contains two fields, both of which are optional, and determine what happens if no healthy nodes are available in the local datacenter when the query is executed. It allows the use of nodes in other datacenters with very little configuration. If **NearestN** is set to a value greater than zero, then the query will be forwarded to up to **NearestN** other datacenters based on their estimated network round trip time using Network Coordinates from the WAN gossip pool. The median round trip time from the server handling the query to the servers in the remote datacenter is used to determine the priority. The default value is zero. All Consul servers must be running version 0.6.0 or above in order for this feature to work correctly. If any servers are not running the required version of Consul they will be considered last since they won't have any available network coordinate information. **Datacenters** contains a fixed list of remote datacenters to forward the query to if there are no healthy nodes in the local datacenter. Datacenters are queried in the order given in the list. If this option is combined with **NearestN**, then the **NearestN** queries will be performed first, followed by the list given by **Datacenters**. A given datacenter will only be queried one time during a failover, even if it is selected by both **NearestN** and is listed in **Datacenters**. The default value is an empty list. **OnlyPassing** controls the behavior of the query's health check filtering. If this is set to false, the results will include nodes with checks in the passing as well as the warning states. If this is set to true, only nodes with checks in the passing state will be returned. The default value is False. **Tags** provides a list of service tags to filter the query results. For a service to pass the tag filter it must have all of the required tags, and none of the excluded tags (prefixed with ``!``). The default value is an empty list, which does no tag filtering. **TTL** in the **DNS** structure is a duration string that can use "s" as a suffix for seconds. It controls how the TTL is set when query results are served over DNS. If this isn't specified, then the Consul agent configuration for the given service will be used (see DNS Caching). If this is specified, it will take precedence over any Consul agent-specific configuration. If no TTL is specified here or at the Consul agent level, then the TTL will default to 0. It returns the ID of the created query:: { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05" } """ if "Token" in query: # in case of a full token object... query["Token"] = extract_attr(query["Token"], keys=["ID"]) response = await self._api.post("/v1/query", params={"dc": dc}, data=query) return response.body
Creates a new prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: New query ID The create operation expects a body that defines the prepared query, like this example:: { "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "", "Near": "node1", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) } } Only the **Service** field inside the **Service** structure is mandatory, all other fields will take their default values if they are not included. **Name** is an optional friendly name that can be used to execute a query instead of using its ID. **Session** provides a way to automatically remove a prepared query when the given session is invalidated. This is optional, and if not given the prepared query must be manually removed when no longer needed. **Token**, if specified, is a captured ACL Token that is reused as the ACL Token every time the query is executed. This allows queries to be executed by clients with lesser or even no ACL Token, so this should be used with care. The token itself can only be seen by clients with a management token. If the **Token** field is left blank or omitted, the client's ACL Token will be used to determine if they have access to the service being queried. If the client does not supply an ACL Token, the anonymous token will be used. **Near** allows specifying a particular node to sort near based on distance sorting using Network Coordinates. The nearest instance to the specified node will be returned first, and subsequent nodes in the response will be sorted in ascending order of estimated round-trip times. If the node given does not exist, the nodes in the response will be shuffled. Using the magic **_agent** value is supported, and will automatically return results nearest the agent servicing the request. If unspecified, the response will be shuffled by default. The set of fields inside the **Service** structure define the query's behavior. **Service** is the name of the service to query. This is required. **Failover** contains two fields, both of which are optional, and determine what happens if no healthy nodes are available in the local datacenter when the query is executed. It allows the use of nodes in other datacenters with very little configuration. If **NearestN** is set to a value greater than zero, then the query will be forwarded to up to **NearestN** other datacenters based on their estimated network round trip time using Network Coordinates from the WAN gossip pool. The median round trip time from the server handling the query to the servers in the remote datacenter is used to determine the priority. The default value is zero. All Consul servers must be running version 0.6.0 or above in order for this feature to work correctly. If any servers are not running the required version of Consul they will be considered last since they won't have any available network coordinate information. **Datacenters** contains a fixed list of remote datacenters to forward the query to if there are no healthy nodes in the local datacenter. Datacenters are queried in the order given in the list. If this option is combined with **NearestN**, then the **NearestN** queries will be performed first, followed by the list given by **Datacenters**. A given datacenter will only be queried one time during a failover, even if it is selected by both **NearestN** and is listed in **Datacenters**. The default value is an empty list. **OnlyPassing** controls the behavior of the query's health check filtering. If this is set to false, the results will include nodes with checks in the passing as well as the warning states. If this is set to true, only nodes with checks in the passing state will be returned. The default value is False. **Tags** provides a list of service tags to filter the query results. For a service to pass the tag filter it must have all of the required tags, and none of the excluded tags (prefixed with ``!``). The default value is an empty list, which does no tag filtering. **TTL** in the **DNS** structure is a duration string that can use "s" as a suffix for seconds. It controls how the TTL is set when query results are served over DNS. If this isn't specified, then the Consul agent configuration for the given service will be used (see DNS Caching). If this is specified, it will take precedence over any Consul agent-specific configuration. If no TTL is specified here or at the Consul agent level, then the TTL will default to 0. It returns the ID of the created query:: { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05" }
Below is the the instruction that describes the task: ### Input: Creates a new prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: New query ID The create operation expects a body that defines the prepared query, like this example:: { "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "", "Near": "node1", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) } } Only the **Service** field inside the **Service** structure is mandatory, all other fields will take their default values if they are not included. **Name** is an optional friendly name that can be used to execute a query instead of using its ID. **Session** provides a way to automatically remove a prepared query when the given session is invalidated. This is optional, and if not given the prepared query must be manually removed when no longer needed. **Token**, if specified, is a captured ACL Token that is reused as the ACL Token every time the query is executed. This allows queries to be executed by clients with lesser or even no ACL Token, so this should be used with care. The token itself can only be seen by clients with a management token. If the **Token** field is left blank or omitted, the client's ACL Token will be used to determine if they have access to the service being queried. If the client does not supply an ACL Token, the anonymous token will be used. **Near** allows specifying a particular node to sort near based on distance sorting using Network Coordinates. The nearest instance to the specified node will be returned first, and subsequent nodes in the response will be sorted in ascending order of estimated round-trip times. If the node given does not exist, the nodes in the response will be shuffled. Using the magic **_agent** value is supported, and will automatically return results nearest the agent servicing the request. If unspecified, the response will be shuffled by default. The set of fields inside the **Service** structure define the query's behavior. **Service** is the name of the service to query. This is required. **Failover** contains two fields, both of which are optional, and determine what happens if no healthy nodes are available in the local datacenter when the query is executed. It allows the use of nodes in other datacenters with very little configuration. If **NearestN** is set to a value greater than zero, then the query will be forwarded to up to **NearestN** other datacenters based on their estimated network round trip time using Network Coordinates from the WAN gossip pool. The median round trip time from the server handling the query to the servers in the remote datacenter is used to determine the priority. The default value is zero. All Consul servers must be running version 0.6.0 or above in order for this feature to work correctly. If any servers are not running the required version of Consul they will be considered last since they won't have any available network coordinate information. **Datacenters** contains a fixed list of remote datacenters to forward the query to if there are no healthy nodes in the local datacenter. Datacenters are queried in the order given in the list. If this option is combined with **NearestN**, then the **NearestN** queries will be performed first, followed by the list given by **Datacenters**. A given datacenter will only be queried one time during a failover, even if it is selected by both **NearestN** and is listed in **Datacenters**. The default value is an empty list. **OnlyPassing** controls the behavior of the query's health check filtering. If this is set to false, the results will include nodes with checks in the passing as well as the warning states. If this is set to true, only nodes with checks in the passing state will be returned. The default value is False. **Tags** provides a list of service tags to filter the query results. For a service to pass the tag filter it must have all of the required tags, and none of the excluded tags (prefixed with ``!``). The default value is an empty list, which does no tag filtering. **TTL** in the **DNS** structure is a duration string that can use "s" as a suffix for seconds. It controls how the TTL is set when query results are served over DNS. If this isn't specified, then the Consul agent configuration for the given service will be used (see DNS Caching). If this is specified, it will take precedence over any Consul agent-specific configuration. If no TTL is specified here or at the Consul agent level, then the TTL will default to 0. It returns the ID of the created query:: { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05" } ### Response: async def create(self, query, *, dc=None): """Creates a new prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: New query ID The create operation expects a body that defines the prepared query, like this example:: { "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "", "Near": "node1", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) } } Only the **Service** field inside the **Service** structure is mandatory, all other fields will take their default values if they are not included. **Name** is an optional friendly name that can be used to execute a query instead of using its ID. **Session** provides a way to automatically remove a prepared query when the given session is invalidated. This is optional, and if not given the prepared query must be manually removed when no longer needed. **Token**, if specified, is a captured ACL Token that is reused as the ACL Token every time the query is executed. This allows queries to be executed by clients with lesser or even no ACL Token, so this should be used with care. The token itself can only be seen by clients with a management token. If the **Token** field is left blank or omitted, the client's ACL Token will be used to determine if they have access to the service being queried. If the client does not supply an ACL Token, the anonymous token will be used. **Near** allows specifying a particular node to sort near based on distance sorting using Network Coordinates. The nearest instance to the specified node will be returned first, and subsequent nodes in the response will be sorted in ascending order of estimated round-trip times. If the node given does not exist, the nodes in the response will be shuffled. Using the magic **_agent** value is supported, and will automatically return results nearest the agent servicing the request. If unspecified, the response will be shuffled by default. The set of fields inside the **Service** structure define the query's behavior. **Service** is the name of the service to query. This is required. **Failover** contains two fields, both of which are optional, and determine what happens if no healthy nodes are available in the local datacenter when the query is executed. It allows the use of nodes in other datacenters with very little configuration. If **NearestN** is set to a value greater than zero, then the query will be forwarded to up to **NearestN** other datacenters based on their estimated network round trip time using Network Coordinates from the WAN gossip pool. The median round trip time from the server handling the query to the servers in the remote datacenter is used to determine the priority. The default value is zero. All Consul servers must be running version 0.6.0 or above in order for this feature to work correctly. If any servers are not running the required version of Consul they will be considered last since they won't have any available network coordinate information. **Datacenters** contains a fixed list of remote datacenters to forward the query to if there are no healthy nodes in the local datacenter. Datacenters are queried in the order given in the list. If this option is combined with **NearestN**, then the **NearestN** queries will be performed first, followed by the list given by **Datacenters**. A given datacenter will only be queried one time during a failover, even if it is selected by both **NearestN** and is listed in **Datacenters**. The default value is an empty list. **OnlyPassing** controls the behavior of the query's health check filtering. If this is set to false, the results will include nodes with checks in the passing as well as the warning states. If this is set to true, only nodes with checks in the passing state will be returned. The default value is False. **Tags** provides a list of service tags to filter the query results. For a service to pass the tag filter it must have all of the required tags, and none of the excluded tags (prefixed with ``!``). The default value is an empty list, which does no tag filtering. **TTL** in the **DNS** structure is a duration string that can use "s" as a suffix for seconds. It controls how the TTL is set when query results are served over DNS. If this isn't specified, then the Consul agent configuration for the given service will be used (see DNS Caching). If this is specified, it will take precedence over any Consul agent-specific configuration. If no TTL is specified here or at the Consul agent level, then the TTL will default to 0. It returns the ID of the created query:: { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05" } """ if "Token" in query: # in case of a full token object... query["Token"] = extract_attr(query["Token"], keys=["ID"]) response = await self._api.post("/v1/query", params={"dc": dc}, data=query) return response.body
def main(sample_id, assembly_file, coverage_file, coverage_bp_file, bam_file, opts, gsize): """Main executor of the process_assembly_mapping template. Parameters ---------- sample_id : str Sample Identification string. assembly_file : str Path to assembly file in Fasta format. coverage_file : str Path to TSV file with coverage information for each contig. coverage_bp_file : str Path to TSV file with coverage information for each base. bam_file : str Path to BAM file. opts : list List of options for processing assembly mapping. gsize : int Expected genome size """ min_assembly_coverage, max_contigs = opts logger.info("Starting assembly mapping processing") # Get coverage info, total size and total coverage from the assembly logger.info("Parsing coverage table") coverage_info, a_cov = parse_coverage_table(coverage_file) a_size, contig_size = get_assembly_size(assembly_file) logger.info("Assembly processed with a total size of '{}' and coverage" " of '{}'".format(a_size, a_cov)) # Get number of assembled bp after filters logger.info("Parsing coverage per bp table") coverage_bp_data = get_coverage_from_file(coverage_bp_file) # Assess the minimum assembly coverage min_coverage = evaluate_min_coverage(min_assembly_coverage, a_cov, a_size) # Check if filtering the assembly using the provided min_coverage will # reduce the final bp number to less than 80% of the estimated genome # size. # If the check below passes with True, then the filtered assembly # is above the 80% genome size threshold. filtered_assembly = "{}_filt.fasta".format( os.path.splitext(assembly_file)[0]) filtered_bam = "filtered.bam" logger.info("Checking filtered assembly") if check_filtered_assembly(coverage_info, coverage_bp_data, min_coverage, gsize, contig_size, int(max_contigs), sample_id): # Filter assembly contigs based on the minimum coverage. logger.info("Filtered assembly passed minimum size threshold") logger.info("Writting filtered assembly") filter_assembly(assembly_file, min_coverage, coverage_info, filtered_assembly) logger.info("Filtering BAM file according to saved contigs") filter_bam(coverage_info, bam_file, min_coverage, filtered_bam) # Could not filter the assembly as it would drop below acceptable # length levels. Copy the original assembly to the output assembly file # for compliance with the output channel else: shutil.copy(assembly_file, filtered_assembly) shutil.copy(bam_file, filtered_bam) shutil.copy(bam_file + ".bai", filtered_bam + ".bai") with open(".status", "w") as status_fh: status_fh.write("pass")
Main executor of the process_assembly_mapping template. Parameters ---------- sample_id : str Sample Identification string. assembly_file : str Path to assembly file in Fasta format. coverage_file : str Path to TSV file with coverage information for each contig. coverage_bp_file : str Path to TSV file with coverage information for each base. bam_file : str Path to BAM file. opts : list List of options for processing assembly mapping. gsize : int Expected genome size
Below is the the instruction that describes the task: ### Input: Main executor of the process_assembly_mapping template. Parameters ---------- sample_id : str Sample Identification string. assembly_file : str Path to assembly file in Fasta format. coverage_file : str Path to TSV file with coverage information for each contig. coverage_bp_file : str Path to TSV file with coverage information for each base. bam_file : str Path to BAM file. opts : list List of options for processing assembly mapping. gsize : int Expected genome size ### Response: def main(sample_id, assembly_file, coverage_file, coverage_bp_file, bam_file, opts, gsize): """Main executor of the process_assembly_mapping template. Parameters ---------- sample_id : str Sample Identification string. assembly_file : str Path to assembly file in Fasta format. coverage_file : str Path to TSV file with coverage information for each contig. coverage_bp_file : str Path to TSV file with coverage information for each base. bam_file : str Path to BAM file. opts : list List of options for processing assembly mapping. gsize : int Expected genome size """ min_assembly_coverage, max_contigs = opts logger.info("Starting assembly mapping processing") # Get coverage info, total size and total coverage from the assembly logger.info("Parsing coverage table") coverage_info, a_cov = parse_coverage_table(coverage_file) a_size, contig_size = get_assembly_size(assembly_file) logger.info("Assembly processed with a total size of '{}' and coverage" " of '{}'".format(a_size, a_cov)) # Get number of assembled bp after filters logger.info("Parsing coverage per bp table") coverage_bp_data = get_coverage_from_file(coverage_bp_file) # Assess the minimum assembly coverage min_coverage = evaluate_min_coverage(min_assembly_coverage, a_cov, a_size) # Check if filtering the assembly using the provided min_coverage will # reduce the final bp number to less than 80% of the estimated genome # size. # If the check below passes with True, then the filtered assembly # is above the 80% genome size threshold. filtered_assembly = "{}_filt.fasta".format( os.path.splitext(assembly_file)[0]) filtered_bam = "filtered.bam" logger.info("Checking filtered assembly") if check_filtered_assembly(coverage_info, coverage_bp_data, min_coverage, gsize, contig_size, int(max_contigs), sample_id): # Filter assembly contigs based on the minimum coverage. logger.info("Filtered assembly passed minimum size threshold") logger.info("Writting filtered assembly") filter_assembly(assembly_file, min_coverage, coverage_info, filtered_assembly) logger.info("Filtering BAM file according to saved contigs") filter_bam(coverage_info, bam_file, min_coverage, filtered_bam) # Could not filter the assembly as it would drop below acceptable # length levels. Copy the original assembly to the output assembly file # for compliance with the output channel else: shutil.copy(assembly_file, filtered_assembly) shutil.copy(bam_file, filtered_bam) shutil.copy(bam_file + ".bai", filtered_bam + ".bai") with open(".status", "w") as status_fh: status_fh.write("pass")
def valuecounter(table, *field, **kwargs): """ Find distinct values for the given field and count the number of occurrences. Returns a :class:`dict` mapping values to counts. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['a', True], ... ['b'], ... ['b', True], ... ['c', False]] >>> etl.valuecounter(table, 'foo') Counter({'b': 2, 'a': 1, 'c': 1}) The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes. """ missing = kwargs.get('missing', None) counter = Counter() for v in values(table, field, missing=missing): try: counter[v] += 1 except IndexError: pass # short row return counter
Find distinct values for the given field and count the number of occurrences. Returns a :class:`dict` mapping values to counts. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['a', True], ... ['b'], ... ['b', True], ... ['c', False]] >>> etl.valuecounter(table, 'foo') Counter({'b': 2, 'a': 1, 'c': 1}) The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes.
Below is the the instruction that describes the task: ### Input: Find distinct values for the given field and count the number of occurrences. Returns a :class:`dict` mapping values to counts. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['a', True], ... ['b'], ... ['b', True], ... ['c', False]] >>> etl.valuecounter(table, 'foo') Counter({'b': 2, 'a': 1, 'c': 1}) The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes. ### Response: def valuecounter(table, *field, **kwargs): """ Find distinct values for the given field and count the number of occurrences. Returns a :class:`dict` mapping values to counts. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['a', True], ... ['b'], ... ['b', True], ... ['c', False]] >>> etl.valuecounter(table, 'foo') Counter({'b': 2, 'a': 1, 'c': 1}) The `field` argument can be a single field name or index (starting from zero) or a tuple of field names and/or indexes. """ missing = kwargs.get('missing', None) counter = Counter() for v in values(table, field, missing=missing): try: counter[v] += 1 except IndexError: pass # short row return counter
def write_wonambi(data, filename, subj_id='', dtype='float64'): """Write file in simple Wonambi format. Parameters ---------- data : instance of ChanTime data with only one trial filename : path to file file to export to (the extensions .won and .dat will be added) subj_id : str subject id dtype : str numpy dtype in which you want to save the data Notes ----- Wonambi format creates two files, one .won with the dataset info as json file and one .dat with the memmap recordings. It will happily overwrite any existing file with the same name. Memory-mapped matrices are column-major, Fortran-style, to be compatible with Matlab. """ filename = Path(filename) json_file = filename.with_suffix('.won') memmap_file = filename.with_suffix('.dat') start_time = data.start_time + timedelta(seconds=data.axis['time'][0][0]) start_time_str = start_time.strftime('%Y-%m-%d %H:%M:%S.%f') dataset = {'subj_id': subj_id, 'start_time': start_time_str, 's_freq': data.s_freq, 'chan_name': list(data.axis['chan'][0]), 'n_samples': int(data.number_of('time')[0]), 'dtype': dtype, } with json_file.open('w') as f: dump(dataset, f, sort_keys=True, indent=4) memshape = (len(dataset['chan_name']), dataset['n_samples']) mem = memmap(str(memmap_file), dtype, mode='w+', shape=memshape, order='F') mem[:, :] = data.data[0] mem.flush()
Write file in simple Wonambi format. Parameters ---------- data : instance of ChanTime data with only one trial filename : path to file file to export to (the extensions .won and .dat will be added) subj_id : str subject id dtype : str numpy dtype in which you want to save the data Notes ----- Wonambi format creates two files, one .won with the dataset info as json file and one .dat with the memmap recordings. It will happily overwrite any existing file with the same name. Memory-mapped matrices are column-major, Fortran-style, to be compatible with Matlab.
Below is the the instruction that describes the task: ### Input: Write file in simple Wonambi format. Parameters ---------- data : instance of ChanTime data with only one trial filename : path to file file to export to (the extensions .won and .dat will be added) subj_id : str subject id dtype : str numpy dtype in which you want to save the data Notes ----- Wonambi format creates two files, one .won with the dataset info as json file and one .dat with the memmap recordings. It will happily overwrite any existing file with the same name. Memory-mapped matrices are column-major, Fortran-style, to be compatible with Matlab. ### Response: def write_wonambi(data, filename, subj_id='', dtype='float64'): """Write file in simple Wonambi format. Parameters ---------- data : instance of ChanTime data with only one trial filename : path to file file to export to (the extensions .won and .dat will be added) subj_id : str subject id dtype : str numpy dtype in which you want to save the data Notes ----- Wonambi format creates two files, one .won with the dataset info as json file and one .dat with the memmap recordings. It will happily overwrite any existing file with the same name. Memory-mapped matrices are column-major, Fortran-style, to be compatible with Matlab. """ filename = Path(filename) json_file = filename.with_suffix('.won') memmap_file = filename.with_suffix('.dat') start_time = data.start_time + timedelta(seconds=data.axis['time'][0][0]) start_time_str = start_time.strftime('%Y-%m-%d %H:%M:%S.%f') dataset = {'subj_id': subj_id, 'start_time': start_time_str, 's_freq': data.s_freq, 'chan_name': list(data.axis['chan'][0]), 'n_samples': int(data.number_of('time')[0]), 'dtype': dtype, } with json_file.open('w') as f: dump(dataset, f, sort_keys=True, indent=4) memshape = (len(dataset['chan_name']), dataset['n_samples']) mem = memmap(str(memmap_file), dtype, mode='w+', shape=memshape, order='F') mem[:, :] = data.data[0] mem.flush()
def steps(current, target, max_steps): """ Steps between two values. :param current: Current value (0.0-1.0). :param target: Target value (0.0-1.0). :param max_steps: Maximum number of steps. """ if current < 0 or current > 1.0: raise ValueError("current value %s is out of bounds (0.0-1.0)", current) if target < 0 or target > 1.0: raise ValueError("target value %s is out of bounds (0.0-1.0)", target) return int(abs((current * max_steps) - (target * max_steps)))
Steps between two values. :param current: Current value (0.0-1.0). :param target: Target value (0.0-1.0). :param max_steps: Maximum number of steps.
Below is the the instruction that describes the task: ### Input: Steps between two values. :param current: Current value (0.0-1.0). :param target: Target value (0.0-1.0). :param max_steps: Maximum number of steps. ### Response: def steps(current, target, max_steps): """ Steps between two values. :param current: Current value (0.0-1.0). :param target: Target value (0.0-1.0). :param max_steps: Maximum number of steps. """ if current < 0 or current > 1.0: raise ValueError("current value %s is out of bounds (0.0-1.0)", current) if target < 0 or target > 1.0: raise ValueError("target value %s is out of bounds (0.0-1.0)", target) return int(abs((current * max_steps) - (target * max_steps)))
def _validate_compute_chunk_params( self, dates, symbols, initial_workspace): """ Verify that the values passed to compute_chunk are well-formed. """ root = self._root_mask_term clsname = type(self).__name__ # Writing this out explicitly so this errors in testing if we change # the name without updating this line. compute_chunk_name = self.compute_chunk.__name__ if root not in initial_workspace: raise AssertionError( "root_mask values not supplied to {cls}.{method}".format( cls=clsname, method=compute_chunk_name, ) ) shape = initial_workspace[root].shape implied_shape = len(dates), len(symbols) if shape != implied_shape: raise AssertionError( "root_mask shape is {shape}, but received dates/symbols " "imply that shape should be {implied}".format( shape=shape, implied=implied_shape, ) )
Verify that the values passed to compute_chunk are well-formed.
Below is the the instruction that describes the task: ### Input: Verify that the values passed to compute_chunk are well-formed. ### Response: def _validate_compute_chunk_params( self, dates, symbols, initial_workspace): """ Verify that the values passed to compute_chunk are well-formed. """ root = self._root_mask_term clsname = type(self).__name__ # Writing this out explicitly so this errors in testing if we change # the name without updating this line. compute_chunk_name = self.compute_chunk.__name__ if root not in initial_workspace: raise AssertionError( "root_mask values not supplied to {cls}.{method}".format( cls=clsname, method=compute_chunk_name, ) ) shape = initial_workspace[root].shape implied_shape = len(dates), len(symbols) if shape != implied_shape: raise AssertionError( "root_mask shape is {shape}, but received dates/symbols " "imply that shape should be {implied}".format( shape=shape, implied=implied_shape, ) )
def convert_conf_for_unreachable(params): """ The 'u' state for UNREACHABLE has been rewritten in 'x' in: * flap_detection_options * notification_options * snapshot_criteria So convert value from config file to keep compatibility with Nagios :param params: parameters of the host before put in properties :type params: dict :return: None """ if params is None: return for prop in ['flap_detection_options', 'notification_options', 'snapshot_criteria', 'stalking_options']: if prop in params: params[prop] = [p.replace('u', 'x') for p in params[prop]] if 'initial_state' in params and \ (params['initial_state'] == 'u' or params['initial_state'] == ['u']): params['initial_state'] = 'x' if 'freshness_state' in params and \ (params['freshness_state'] == 'u' or params['freshness_state'] == ['u']): params['freshness_state'] = 'x'
The 'u' state for UNREACHABLE has been rewritten in 'x' in: * flap_detection_options * notification_options * snapshot_criteria So convert value from config file to keep compatibility with Nagios :param params: parameters of the host before put in properties :type params: dict :return: None
Below is the the instruction that describes the task: ### Input: The 'u' state for UNREACHABLE has been rewritten in 'x' in: * flap_detection_options * notification_options * snapshot_criteria So convert value from config file to keep compatibility with Nagios :param params: parameters of the host before put in properties :type params: dict :return: None ### Response: def convert_conf_for_unreachable(params): """ The 'u' state for UNREACHABLE has been rewritten in 'x' in: * flap_detection_options * notification_options * snapshot_criteria So convert value from config file to keep compatibility with Nagios :param params: parameters of the host before put in properties :type params: dict :return: None """ if params is None: return for prop in ['flap_detection_options', 'notification_options', 'snapshot_criteria', 'stalking_options']: if prop in params: params[prop] = [p.replace('u', 'x') for p in params[prop]] if 'initial_state' in params and \ (params['initial_state'] == 'u' or params['initial_state'] == ['u']): params['initial_state'] = 'x' if 'freshness_state' in params and \ (params['freshness_state'] == 'u' or params['freshness_state'] == ['u']): params['freshness_state'] = 'x'
def create_stoichiometric_matrix(model, array_type='dense', dtype=None): """Return a stoichiometric array representation of the given model. The the columns represent the reactions and rows represent metabolites. S[i,j] therefore contains the quantity of metabolite `i` produced (negative for consumed) by reaction `j`. Parameters ---------- model : cobra.Model The cobra model to construct the matrix for. array_type : string The type of array to construct. if 'dense', return a standard numpy.array, 'dok', or 'lil' will construct a sparse array using scipy of the corresponding type and 'DataFrame' will give a pandas `DataFrame` with metabolite indices and reaction columns dtype : data-type The desired data-type for the array. If not given, defaults to float. Returns ------- matrix of class `dtype` The stoichiometric matrix for the given model. """ if array_type not in ('DataFrame', 'dense') and not dok_matrix: raise ValueError('Sparse matrices require scipy') if dtype is None: dtype = np.float64 array_constructor = { 'dense': np.zeros, 'dok': dok_matrix, 'lil': lil_matrix, 'DataFrame': np.zeros, } n_metabolites = len(model.metabolites) n_reactions = len(model.reactions) array = array_constructor[array_type]((n_metabolites, n_reactions), dtype=dtype) m_ind = model.metabolites.index r_ind = model.reactions.index for reaction in model.reactions: for metabolite, stoich in iteritems(reaction.metabolites): array[m_ind(metabolite), r_ind(reaction)] = stoich if array_type == 'DataFrame': metabolite_ids = [met.id for met in model.metabolites] reaction_ids = [rxn.id for rxn in model.reactions] return pd.DataFrame(array, index=metabolite_ids, columns=reaction_ids) else: return array
Return a stoichiometric array representation of the given model. The the columns represent the reactions and rows represent metabolites. S[i,j] therefore contains the quantity of metabolite `i` produced (negative for consumed) by reaction `j`. Parameters ---------- model : cobra.Model The cobra model to construct the matrix for. array_type : string The type of array to construct. if 'dense', return a standard numpy.array, 'dok', or 'lil' will construct a sparse array using scipy of the corresponding type and 'DataFrame' will give a pandas `DataFrame` with metabolite indices and reaction columns dtype : data-type The desired data-type for the array. If not given, defaults to float. Returns ------- matrix of class `dtype` The stoichiometric matrix for the given model.
Below is the the instruction that describes the task: ### Input: Return a stoichiometric array representation of the given model. The the columns represent the reactions and rows represent metabolites. S[i,j] therefore contains the quantity of metabolite `i` produced (negative for consumed) by reaction `j`. Parameters ---------- model : cobra.Model The cobra model to construct the matrix for. array_type : string The type of array to construct. if 'dense', return a standard numpy.array, 'dok', or 'lil' will construct a sparse array using scipy of the corresponding type and 'DataFrame' will give a pandas `DataFrame` with metabolite indices and reaction columns dtype : data-type The desired data-type for the array. If not given, defaults to float. Returns ------- matrix of class `dtype` The stoichiometric matrix for the given model. ### Response: def create_stoichiometric_matrix(model, array_type='dense', dtype=None): """Return a stoichiometric array representation of the given model. The the columns represent the reactions and rows represent metabolites. S[i,j] therefore contains the quantity of metabolite `i` produced (negative for consumed) by reaction `j`. Parameters ---------- model : cobra.Model The cobra model to construct the matrix for. array_type : string The type of array to construct. if 'dense', return a standard numpy.array, 'dok', or 'lil' will construct a sparse array using scipy of the corresponding type and 'DataFrame' will give a pandas `DataFrame` with metabolite indices and reaction columns dtype : data-type The desired data-type for the array. If not given, defaults to float. Returns ------- matrix of class `dtype` The stoichiometric matrix for the given model. """ if array_type not in ('DataFrame', 'dense') and not dok_matrix: raise ValueError('Sparse matrices require scipy') if dtype is None: dtype = np.float64 array_constructor = { 'dense': np.zeros, 'dok': dok_matrix, 'lil': lil_matrix, 'DataFrame': np.zeros, } n_metabolites = len(model.metabolites) n_reactions = len(model.reactions) array = array_constructor[array_type]((n_metabolites, n_reactions), dtype=dtype) m_ind = model.metabolites.index r_ind = model.reactions.index for reaction in model.reactions: for metabolite, stoich in iteritems(reaction.metabolites): array[m_ind(metabolite), r_ind(reaction)] = stoich if array_type == 'DataFrame': metabolite_ids = [met.id for met in model.metabolites] reaction_ids = [rxn.id for rxn in model.reactions] return pd.DataFrame(array, index=metabolite_ids, columns=reaction_ids) else: return array
def grant_permission_to_users(self, permission, **kwargs): # noqa: E501 """Grants a specific user permission to multiple users # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.grant_permission_to_users(permission, async_req=True) >>> result = thread.get() :param async_req bool :param str permission: Permission to grant to the users. Please note that 'host_tag_management' is the equivalent of the 'Source Tag Management' permission (required) :param list[str] body: list of users which should be revoked by specified permission :return: UserModel If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.grant_permission_to_users_with_http_info(permission, **kwargs) # noqa: E501 else: (data) = self.grant_permission_to_users_with_http_info(permission, **kwargs) # noqa: E501 return data
Grants a specific user permission to multiple users # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.grant_permission_to_users(permission, async_req=True) >>> result = thread.get() :param async_req bool :param str permission: Permission to grant to the users. Please note that 'host_tag_management' is the equivalent of the 'Source Tag Management' permission (required) :param list[str] body: list of users which should be revoked by specified permission :return: UserModel If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Grants a specific user permission to multiple users # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.grant_permission_to_users(permission, async_req=True) >>> result = thread.get() :param async_req bool :param str permission: Permission to grant to the users. Please note that 'host_tag_management' is the equivalent of the 'Source Tag Management' permission (required) :param list[str] body: list of users which should be revoked by specified permission :return: UserModel If the method is called asynchronously, returns the request thread. ### Response: def grant_permission_to_users(self, permission, **kwargs): # noqa: E501 """Grants a specific user permission to multiple users # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.grant_permission_to_users(permission, async_req=True) >>> result = thread.get() :param async_req bool :param str permission: Permission to grant to the users. Please note that 'host_tag_management' is the equivalent of the 'Source Tag Management' permission (required) :param list[str] body: list of users which should be revoked by specified permission :return: UserModel If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.grant_permission_to_users_with_http_info(permission, **kwargs) # noqa: E501 else: (data) = self.grant_permission_to_users_with_http_info(permission, **kwargs) # noqa: E501 return data
def create_template(self, s, provider_name=None): """Creates a template from the given string based on the specified provider or the provider with highest precedence. Args: s: The string to convert to a template. provider_name: The name of the provider to use to create the template. """ if provider_name is None: provider_name = self.supported_providers[0] return template_exception_handler( lambda: self.get_provider(provider_name).create_template(s), self.error_context )
Creates a template from the given string based on the specified provider or the provider with highest precedence. Args: s: The string to convert to a template. provider_name: The name of the provider to use to create the template.
Below is the the instruction that describes the task: ### Input: Creates a template from the given string based on the specified provider or the provider with highest precedence. Args: s: The string to convert to a template. provider_name: The name of the provider to use to create the template. ### Response: def create_template(self, s, provider_name=None): """Creates a template from the given string based on the specified provider or the provider with highest precedence. Args: s: The string to convert to a template. provider_name: The name of the provider to use to create the template. """ if provider_name is None: provider_name = self.supported_providers[0] return template_exception_handler( lambda: self.get_provider(provider_name).create_template(s), self.error_context )
def _checkMode(self, ax_args): """Raise an exception if the mode in the attribute exchange arguments does not match what is expected for this class. @raises NotAXMessage: When there is no mode value in ax_args at all. @raises AXError: When mode does not match. """ mode = ax_args.get('mode') if mode != self.mode: if not mode: raise NotAXMessage() else: raise AXError( 'Expected mode %r; got %r' % (self.mode, mode))
Raise an exception if the mode in the attribute exchange arguments does not match what is expected for this class. @raises NotAXMessage: When there is no mode value in ax_args at all. @raises AXError: When mode does not match.
Below is the the instruction that describes the task: ### Input: Raise an exception if the mode in the attribute exchange arguments does not match what is expected for this class. @raises NotAXMessage: When there is no mode value in ax_args at all. @raises AXError: When mode does not match. ### Response: def _checkMode(self, ax_args): """Raise an exception if the mode in the attribute exchange arguments does not match what is expected for this class. @raises NotAXMessage: When there is no mode value in ax_args at all. @raises AXError: When mode does not match. """ mode = ax_args.get('mode') if mode != self.mode: if not mode: raise NotAXMessage() else: raise AXError( 'Expected mode %r; got %r' % (self.mode, mode))
def check(self, feature): """Check that fit can be called on reference data""" mapper = feature.as_dataframe_mapper() mapper.fit(self.X, y=self.y)
Check that fit can be called on reference data
Below is the the instruction that describes the task: ### Input: Check that fit can be called on reference data ### Response: def check(self, feature): """Check that fit can be called on reference data""" mapper = feature.as_dataframe_mapper() mapper.fit(self.X, y=self.y)
def get_circulations(elements: T) -> Iterable[T]: """Iterate over all possible circulations of an ordered collection (tuple or list). Example: >>> list(get_circulations([1, 2, 3])) [[1, 2, 3], [2, 3, 1], [3, 1, 2]] """ for i in range(len(elements)): yield elements[i:] + elements[:i]
Iterate over all possible circulations of an ordered collection (tuple or list). Example: >>> list(get_circulations([1, 2, 3])) [[1, 2, 3], [2, 3, 1], [3, 1, 2]]
Below is the the instruction that describes the task: ### Input: Iterate over all possible circulations of an ordered collection (tuple or list). Example: >>> list(get_circulations([1, 2, 3])) [[1, 2, 3], [2, 3, 1], [3, 1, 2]] ### Response: def get_circulations(elements: T) -> Iterable[T]: """Iterate over all possible circulations of an ordered collection (tuple or list). Example: >>> list(get_circulations([1, 2, 3])) [[1, 2, 3], [2, 3, 1], [3, 1, 2]] """ for i in range(len(elements)): yield elements[i:] + elements[:i]
async def _pb_request(self, endpoint, request_pb, response_pb): """Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails. """ logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) res = await self._base_request( 'https://clients6.google.com/chat/v1/{}'.format(endpoint), 'application/x-protobuf', # Request body is Protocol Buffer. 'proto', # Response body is Protocol Buffer. request_pb.SerializeToString() ) try: response_pb.ParseFromString(base64.b64decode(res.body)) except binascii.Error as e: raise exceptions.NetworkError( 'Failed to decode base64 response: {}'.format(e) ) except google.protobuf.message.DecodeError as e: raise exceptions.NetworkError( 'Failed to decode Protocol Buffer response: {}'.format(e) ) logger.debug('Received Protocol Buffer response:\n%s', response_pb) status = response_pb.response_header.status if status != hangouts_pb2.RESPONSE_STATUS_OK: description = response_pb.response_header.error_description raise exceptions.NetworkError( 'Request failed with status {}: \'{}\'' .format(status, description) )
Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails.
Below is the the instruction that describes the task: ### Input: Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails. ### Response: async def _pb_request(self, endpoint, request_pb, response_pb): """Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails. """ logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) res = await self._base_request( 'https://clients6.google.com/chat/v1/{}'.format(endpoint), 'application/x-protobuf', # Request body is Protocol Buffer. 'proto', # Response body is Protocol Buffer. request_pb.SerializeToString() ) try: response_pb.ParseFromString(base64.b64decode(res.body)) except binascii.Error as e: raise exceptions.NetworkError( 'Failed to decode base64 response: {}'.format(e) ) except google.protobuf.message.DecodeError as e: raise exceptions.NetworkError( 'Failed to decode Protocol Buffer response: {}'.format(e) ) logger.debug('Received Protocol Buffer response:\n%s', response_pb) status = response_pb.response_header.status if status != hangouts_pb2.RESPONSE_STATUS_OK: description = response_pb.response_header.error_description raise exceptions.NetworkError( 'Request failed with status {}: \'{}\'' .format(status, description) )
def dynamic_content_item_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/dynamic_content#show-item" api_path = "/api/v2/dynamic_content/items/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/dynamic_content#show-item
Below is the the instruction that describes the task: ### Input: https://developer.zendesk.com/rest_api/docs/core/dynamic_content#show-item ### Response: def dynamic_content_item_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/dynamic_content#show-item" api_path = "/api/v2/dynamic_content/items/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
def update_module_types(): """ Download the repositories for all of the firmware_module_type records and update them using the `module.json` files from the repositories themselves. Currently only works for git repositories. """ local_url = config["local_server"]["url"] server = Server(local_url) db = server[FIRMWARE_MODULE_TYPE] temp_folder = mkdtemp() for _id in db: if _id.startswith("_"): continue obj = db[_id] new_obj = update_record(FirmwareModuleType(obj), temp_folder) new_obj["_rev"] = obj["_rev"] if new_obj != obj: db[_id] = new_obj rmtree(temp_folder)
Download the repositories for all of the firmware_module_type records and update them using the `module.json` files from the repositories themselves. Currently only works for git repositories.
Below is the the instruction that describes the task: ### Input: Download the repositories for all of the firmware_module_type records and update them using the `module.json` files from the repositories themselves. Currently only works for git repositories. ### Response: def update_module_types(): """ Download the repositories for all of the firmware_module_type records and update them using the `module.json` files from the repositories themselves. Currently only works for git repositories. """ local_url = config["local_server"]["url"] server = Server(local_url) db = server[FIRMWARE_MODULE_TYPE] temp_folder = mkdtemp() for _id in db: if _id.startswith("_"): continue obj = db[_id] new_obj = update_record(FirmwareModuleType(obj), temp_folder) new_obj["_rev"] = obj["_rev"] if new_obj != obj: db[_id] = new_obj rmtree(temp_folder)
def update_from_dict(self, d): """ Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``. """ d = d.copy() if 'query' in d: self.query._proxied = Q(d.pop('query')) if 'script' in d: self._script = d.pop('script') self._extra = d return self
Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``.
Below is the the instruction that describes the task: ### Input: Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``. ### Response: def update_from_dict(self, d): """ Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``. """ d = d.copy() if 'query' in d: self.query._proxied = Q(d.pop('query')) if 'script' in d: self._script = d.pop('script') self._extra = d return self
def insert(self, cache_key, paths, overwrite=False): """Cache the output of a build. By default, checks cache.has(key) first, only proceeding to create and insert an artifact if it is not already in the cache (though `overwrite` can be used to skip the check and unconditionally insert). :param CacheKey cache_key: A CacheKey object. :param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root. :param bool overwrite: Skip check for existing, insert even if already in cache. """ missing_files = [f for f in paths if not os.path.exists(f)] if missing_files: raise ArtifactCacheError('Tried to cache nonexistent files {0}'.format(missing_files)) if not overwrite: if self.has(cache_key): logger.debug('Skipping insert of existing artifact: {0}'.format(cache_key)) return False try: self.try_insert(cache_key, paths) return True except NonfatalArtifactCacheError as e: logger.error('Error while writing to artifact cache: {0}'.format(e)) return False
Cache the output of a build. By default, checks cache.has(key) first, only proceeding to create and insert an artifact if it is not already in the cache (though `overwrite` can be used to skip the check and unconditionally insert). :param CacheKey cache_key: A CacheKey object. :param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root. :param bool overwrite: Skip check for existing, insert even if already in cache.
Below is the the instruction that describes the task: ### Input: Cache the output of a build. By default, checks cache.has(key) first, only proceeding to create and insert an artifact if it is not already in the cache (though `overwrite` can be used to skip the check and unconditionally insert). :param CacheKey cache_key: A CacheKey object. :param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root. :param bool overwrite: Skip check for existing, insert even if already in cache. ### Response: def insert(self, cache_key, paths, overwrite=False): """Cache the output of a build. By default, checks cache.has(key) first, only proceeding to create and insert an artifact if it is not already in the cache (though `overwrite` can be used to skip the check and unconditionally insert). :param CacheKey cache_key: A CacheKey object. :param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root. :param bool overwrite: Skip check for existing, insert even if already in cache. """ missing_files = [f for f in paths if not os.path.exists(f)] if missing_files: raise ArtifactCacheError('Tried to cache nonexistent files {0}'.format(missing_files)) if not overwrite: if self.has(cache_key): logger.debug('Skipping insert of existing artifact: {0}'.format(cache_key)) return False try: self.try_insert(cache_key, paths) return True except NonfatalArtifactCacheError as e: logger.error('Error while writing to artifact cache: {0}'.format(e)) return False
def resolve_url(self, resource_name): """Return a URL to a local copy of a resource, suitable for get_generator()""" if self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE: # For CSV packages, need to get the package and open it to get the resoruce URL, becuase # they are always absolute web URLs and may not be related to the location of the metadata. s = self.get_resource() rs = s.doc.resource(resource_name) return parse_app_url(rs.url) else: jt = self.join_target(resource_name) rs = jt.get_resource() t = rs.get_target() return t
Return a URL to a local copy of a resource, suitable for get_generator()
Below is the the instruction that describes the task: ### Input: Return a URL to a local copy of a resource, suitable for get_generator() ### Response: def resolve_url(self, resource_name): """Return a URL to a local copy of a resource, suitable for get_generator()""" if self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE: # For CSV packages, need to get the package and open it to get the resoruce URL, becuase # they are always absolute web URLs and may not be related to the location of the metadata. s = self.get_resource() rs = s.doc.resource(resource_name) return parse_app_url(rs.url) else: jt = self.join_target(resource_name) rs = jt.get_resource() t = rs.get_target() return t
def from_rfc3339(cls, stamp): """Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (str): RFC 3339 stamp, with up to nanosecond precision Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp string Raises: ValueError: if `stamp` does not match the expected format """ with_nanos = _RFC3339_NANOS.match(stamp) if with_nanos is None: raise ValueError( "Timestamp: {}, does not match pattern: {}".format( stamp, _RFC3339_NANOS.pattern ) ) bare = datetime.datetime.strptime( with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION ) fraction = with_nanos.group("nanos") if fraction is None: nanos = 0 else: scale = 9 - len(fraction) nanos = int(fraction) * (10 ** scale) return cls( bare.year, bare.month, bare.day, bare.hour, bare.minute, bare.second, nanosecond=nanos, tzinfo=pytz.UTC, )
Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (str): RFC 3339 stamp, with up to nanosecond precision Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp string Raises: ValueError: if `stamp` does not match the expected format
Below is the the instruction that describes the task: ### Input: Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (str): RFC 3339 stamp, with up to nanosecond precision Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp string Raises: ValueError: if `stamp` does not match the expected format ### Response: def from_rfc3339(cls, stamp): """Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (str): RFC 3339 stamp, with up to nanosecond precision Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp string Raises: ValueError: if `stamp` does not match the expected format """ with_nanos = _RFC3339_NANOS.match(stamp) if with_nanos is None: raise ValueError( "Timestamp: {}, does not match pattern: {}".format( stamp, _RFC3339_NANOS.pattern ) ) bare = datetime.datetime.strptime( with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION ) fraction = with_nanos.group("nanos") if fraction is None: nanos = 0 else: scale = 9 - len(fraction) nanos = int(fraction) * (10 ** scale) return cls( bare.year, bare.month, bare.day, bare.hour, bare.minute, bare.second, nanosecond=nanos, tzinfo=pytz.UTC, )