code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def fill_traversals(traversals, edges, edges_hash=None): """ Convert a traversal of a list of edges into a sequence of traversals where every pair of consecutive node indexes is an edge in a passed edge list Parameters ------------- traversals : sequence of (m,) int Node indexes of traversals of a graph edges : (n, 2) int Pairs of connected node indexes edges_hash : None, or (n,) int Edges sorted along axis 1 then hashed using grouping.hashable_rows Returns -------------- splits : sequence of (p,) int Node indexes of connected traversals """ # make sure edges are correct type edges = np.asanyarray(edges, dtype=np.int64) # make sure edges are sorted edges.sort(axis=1) # if there are no traversals just return edges if len(traversals) == 0: return edges.copy() # hash edges for contains checks if edges_hash is None: edges_hash = grouping.hashable_rows(edges) splits = [] for nodes in traversals: # split traversals to remove edges # that don't actually exist splits.extend(split_traversal( traversal=nodes, edges=edges, edges_hash=edges_hash)) # turn the split traversals back into (n,2) edges included = util.vstack_empty([np.column_stack((i[:-1], i[1:])) for i in splits]) if len(included) > 0: # sort included edges in place included.sort(axis=1) # make sure any edges not included in split traversals # are just added as a length 2 traversal splits.extend(grouping.boolean_rows( edges, included, operation=np.setdiff1d)) else: # no edges were included, so our filled traversal # is just the original edges copied over splits = edges.copy() return splits
Convert a traversal of a list of edges into a sequence of traversals where every pair of consecutive node indexes is an edge in a passed edge list Parameters ------------- traversals : sequence of (m,) int Node indexes of traversals of a graph edges : (n, 2) int Pairs of connected node indexes edges_hash : None, or (n,) int Edges sorted along axis 1 then hashed using grouping.hashable_rows Returns -------------- splits : sequence of (p,) int Node indexes of connected traversals
Below is the the instruction that describes the task: ### Input: Convert a traversal of a list of edges into a sequence of traversals where every pair of consecutive node indexes is an edge in a passed edge list Parameters ------------- traversals : sequence of (m,) int Node indexes of traversals of a graph edges : (n, 2) int Pairs of connected node indexes edges_hash : None, or (n,) int Edges sorted along axis 1 then hashed using grouping.hashable_rows Returns -------------- splits : sequence of (p,) int Node indexes of connected traversals ### Response: def fill_traversals(traversals, edges, edges_hash=None): """ Convert a traversal of a list of edges into a sequence of traversals where every pair of consecutive node indexes is an edge in a passed edge list Parameters ------------- traversals : sequence of (m,) int Node indexes of traversals of a graph edges : (n, 2) int Pairs of connected node indexes edges_hash : None, or (n,) int Edges sorted along axis 1 then hashed using grouping.hashable_rows Returns -------------- splits : sequence of (p,) int Node indexes of connected traversals """ # make sure edges are correct type edges = np.asanyarray(edges, dtype=np.int64) # make sure edges are sorted edges.sort(axis=1) # if there are no traversals just return edges if len(traversals) == 0: return edges.copy() # hash edges for contains checks if edges_hash is None: edges_hash = grouping.hashable_rows(edges) splits = [] for nodes in traversals: # split traversals to remove edges # that don't actually exist splits.extend(split_traversal( traversal=nodes, edges=edges, edges_hash=edges_hash)) # turn the split traversals back into (n,2) edges included = util.vstack_empty([np.column_stack((i[:-1], i[1:])) for i in splits]) if len(included) > 0: # sort included edges in place included.sort(axis=1) # make sure any edges not included in split traversals # are just added as a length 2 traversal splits.extend(grouping.boolean_rows( edges, included, operation=np.setdiff1d)) else: # no edges were included, so our filled traversal # is just the original edges copied over splits = edges.copy() return splits
def _call(self, method, params): """Call a method. :param method: method to call :param params: dict with the HTTP parameters needed to call the given method :raises ConduitError: when an error is returned by the server """ url = self.URL % {'base': self.base_url, 'method': method} # Conduit and POST parameters params['__conduit__'] = {'token': self.api_token} data = { 'params': json.dumps(params, sort_keys=True), 'output': 'json', '__conduit__': True } logger.debug("Phabricator Conduit client requests: %s params: %s", method, str(data)) r = self.fetch(url, payload=data, method=HttpClient.POST, verify=False) # Check for possible Conduit API errors result = r.json() if result['error_code']: raise ConduitError(error=result['error_info'], code=result['error_code']) return r.text
Call a method. :param method: method to call :param params: dict with the HTTP parameters needed to call the given method :raises ConduitError: when an error is returned by the server
Below is the the instruction that describes the task: ### Input: Call a method. :param method: method to call :param params: dict with the HTTP parameters needed to call the given method :raises ConduitError: when an error is returned by the server ### Response: def _call(self, method, params): """Call a method. :param method: method to call :param params: dict with the HTTP parameters needed to call the given method :raises ConduitError: when an error is returned by the server """ url = self.URL % {'base': self.base_url, 'method': method} # Conduit and POST parameters params['__conduit__'] = {'token': self.api_token} data = { 'params': json.dumps(params, sort_keys=True), 'output': 'json', '__conduit__': True } logger.debug("Phabricator Conduit client requests: %s params: %s", method, str(data)) r = self.fetch(url, payload=data, method=HttpClient.POST, verify=False) # Check for possible Conduit API errors result = r.json() if result['error_code']: raise ConduitError(error=result['error_info'], code=result['error_code']) return r.text
def parse_proteins(self,OrganismDB): ''' Iterate through all the proteins in the DB, creates a hit_dataframe for each protein. ''' for org in OrganismDB.organisms: for prot in org.proteins: if len(prot.hmm_hit_list) > 0: try: prot.hit_dataframe = prot.parse_hmm_hit_list(prot.hmm_hit_list) except ValueError,e: print 'error for', org.name, prot.accession, str(e)
Iterate through all the proteins in the DB, creates a hit_dataframe for each protein.
Below is the the instruction that describes the task: ### Input: Iterate through all the proteins in the DB, creates a hit_dataframe for each protein. ### Response: def parse_proteins(self,OrganismDB): ''' Iterate through all the proteins in the DB, creates a hit_dataframe for each protein. ''' for org in OrganismDB.organisms: for prot in org.proteins: if len(prot.hmm_hit_list) > 0: try: prot.hit_dataframe = prot.parse_hmm_hit_list(prot.hmm_hit_list) except ValueError,e: print 'error for', org.name, prot.accession, str(e)
def _determine_rotated_logfile(self): """ We suspect the logfile has been rotated, so try to guess what the rotated filename is, and return it. """ rotated_filename = self._check_rotated_filename_candidates() if rotated_filename and exists(rotated_filename): if stat(rotated_filename).st_ino == self._offset_file_inode: return rotated_filename # if the inode hasn't changed, then the file shrank; this is expected with copytruncate, # otherwise print a warning if stat(self.filename).st_ino == self._offset_file_inode: if self.copytruncate: return rotated_filename else: sys.stderr.write( "[pygtail] [WARN] file size of %s shrank, and copytruncate support is " "disabled (expected at least %d bytes, was %d bytes).\n" % (self.filename, self._offset, stat(self.filename).st_size)) return None
We suspect the logfile has been rotated, so try to guess what the rotated filename is, and return it.
Below is the the instruction that describes the task: ### Input: We suspect the logfile has been rotated, so try to guess what the rotated filename is, and return it. ### Response: def _determine_rotated_logfile(self): """ We suspect the logfile has been rotated, so try to guess what the rotated filename is, and return it. """ rotated_filename = self._check_rotated_filename_candidates() if rotated_filename and exists(rotated_filename): if stat(rotated_filename).st_ino == self._offset_file_inode: return rotated_filename # if the inode hasn't changed, then the file shrank; this is expected with copytruncate, # otherwise print a warning if stat(self.filename).st_ino == self._offset_file_inode: if self.copytruncate: return rotated_filename else: sys.stderr.write( "[pygtail] [WARN] file size of %s shrank, and copytruncate support is " "disabled (expected at least %d bytes, was %d bytes).\n" % (self.filename, self._offset, stat(self.filename).st_size)) return None
def close_connection (self): """ Close an opened url connection. """ if self.url_connection is None: # no connection is open return try: self.url_connection.close() except Exception: # ignore close errors pass self.url_connection = None
Close an opened url connection.
Below is the the instruction that describes the task: ### Input: Close an opened url connection. ### Response: def close_connection (self): """ Close an opened url connection. """ if self.url_connection is None: # no connection is open return try: self.url_connection.close() except Exception: # ignore close errors pass self.url_connection = None
async def send_media_group(self, chat_id: typing.Union[base.Integer, base.String], media: typing.Union[types.MediaGroup, typing.List], disable_notification: typing.Union[base.Boolean, None] = None, reply_to_message_id: typing.Union[base.Integer, None] = None) -> typing.List[types.Message]: """ Use this method to send a group of photos or videos as an album. Source: https://core.telegram.org/bots/api#sendmediagroup :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param media: A JSON-serialized array describing photos and videos to be sent :type media: :obj:`typing.Union[types.MediaGroup, typing.List]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :return: On success, an array of the sent Messages is returned :rtype: typing.List[types.Message] """ # Convert list to MediaGroup if isinstance(media, list): media = types.MediaGroup(media) files = dict(media.get_files()) media = prepare_arg(media) payload = generate_payload(**locals(), exclude=['files']) result = await self.request(api.Methods.SEND_MEDIA_GROUP, payload, files) return [types.Message(**message) for message in result]
Use this method to send a group of photos or videos as an album. Source: https://core.telegram.org/bots/api#sendmediagroup :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param media: A JSON-serialized array describing photos and videos to be sent :type media: :obj:`typing.Union[types.MediaGroup, typing.List]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :return: On success, an array of the sent Messages is returned :rtype: typing.List[types.Message]
Below is the the instruction that describes the task: ### Input: Use this method to send a group of photos or videos as an album. Source: https://core.telegram.org/bots/api#sendmediagroup :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param media: A JSON-serialized array describing photos and videos to be sent :type media: :obj:`typing.Union[types.MediaGroup, typing.List]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :return: On success, an array of the sent Messages is returned :rtype: typing.List[types.Message] ### Response: async def send_media_group(self, chat_id: typing.Union[base.Integer, base.String], media: typing.Union[types.MediaGroup, typing.List], disable_notification: typing.Union[base.Boolean, None] = None, reply_to_message_id: typing.Union[base.Integer, None] = None) -> typing.List[types.Message]: """ Use this method to send a group of photos or videos as an album. Source: https://core.telegram.org/bots/api#sendmediagroup :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param media: A JSON-serialized array describing photos and videos to be sent :type media: :obj:`typing.Union[types.MediaGroup, typing.List]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: :obj:`typing.Union[base.Integer, None]` :return: On success, an array of the sent Messages is returned :rtype: typing.List[types.Message] """ # Convert list to MediaGroup if isinstance(media, list): media = types.MediaGroup(media) files = dict(media.get_files()) media = prepare_arg(media) payload = generate_payload(**locals(), exclude=['files']) result = await self.request(api.Methods.SEND_MEDIA_GROUP, payload, files) return [types.Message(**message) for message in result]
def complete_import(self, text, line, begidx, endidx): """completion for serialize command""" opts = self.IMPORT_OPTS if not text: completions = opts else: completions = [f for f in opts if f.startswith(text) ] return completions
completion for serialize command
Below is the the instruction that describes the task: ### Input: completion for serialize command ### Response: def complete_import(self, text, line, begidx, endidx): """completion for serialize command""" opts = self.IMPORT_OPTS if not text: completions = opts else: completions = [f for f in opts if f.startswith(text) ] return completions
def alloc_vlan(self, net_id): """Allocates the vlan ID. """ vlan_id = self.service_vlans.allocate_segmentation_id( net_id, source=fw_const.FW_CONST) return vlan_id
Allocates the vlan ID.
Below is the the instruction that describes the task: ### Input: Allocates the vlan ID. ### Response: def alloc_vlan(self, net_id): """Allocates the vlan ID. """ vlan_id = self.service_vlans.allocate_segmentation_id( net_id, source=fw_const.FW_CONST) return vlan_id
def is_valid_callsign(self, callsign, timestamp=timestamp_now): """ Checks if a callsign is valid Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True / False Example: The following checks if "DH1TW" is a valid callsign >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.is_valid_callsign("DH1TW") True """ try: if self.get_all(callsign, timestamp): return True except KeyError: return False
Checks if a callsign is valid Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True / False Example: The following checks if "DH1TW" is a valid callsign >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.is_valid_callsign("DH1TW") True
Below is the the instruction that describes the task: ### Input: Checks if a callsign is valid Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True / False Example: The following checks if "DH1TW" is a valid callsign >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.is_valid_callsign("DH1TW") True ### Response: def is_valid_callsign(self, callsign, timestamp=timestamp_now): """ Checks if a callsign is valid Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True / False Example: The following checks if "DH1TW" is a valid callsign >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.is_valid_callsign("DH1TW") True """ try: if self.get_all(callsign, timestamp): return True except KeyError: return False
def mask_plane(data, wcs, region, negate=False): """ Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required. """ # create an array but don't set the values (they are random) indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int) # since I know exactly what the index array needs to look like i can construct # it faster than list comprehension would allow # we do this only once and then recycle it idx = np.array([(j, 0) for j in range(data.shape[1])]) j = data.shape[1] for i in range(data.shape[0]): idx[:, 1] = i indexes[i*j:(i+1)*j] = idx # put ALL the pixles into our vectorized functions and minimise our overheads ra, dec = wcs.wcs_pix2world(indexes, 1).transpose() bigmask = region.sky_within(ra, dec, degin=True) if not negate: bigmask = np.bitwise_not(bigmask) # rework our 1d list into a 2d array bigmask = bigmask.reshape(data.shape) # and apply the mask data[bigmask] = np.nan return data
Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required.
Below is the the instruction that describes the task: ### Input: Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required. ### Response: def mask_plane(data, wcs, region, negate=False): """ Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required. """ # create an array but don't set the values (they are random) indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int) # since I know exactly what the index array needs to look like i can construct # it faster than list comprehension would allow # we do this only once and then recycle it idx = np.array([(j, 0) for j in range(data.shape[1])]) j = data.shape[1] for i in range(data.shape[0]): idx[:, 1] = i indexes[i*j:(i+1)*j] = idx # put ALL the pixles into our vectorized functions and minimise our overheads ra, dec = wcs.wcs_pix2world(indexes, 1).transpose() bigmask = region.sky_within(ra, dec, degin=True) if not negate: bigmask = np.bitwise_not(bigmask) # rework our 1d list into a 2d array bigmask = bigmask.reshape(data.shape) # and apply the mask data[bigmask] = np.nan return data
def get_encodings_in_range(rmin, rmax): """ Returns the valid encodings for a given encoding range. The encoding ranges are stored in the :py:data:`RANGES` dictionary, with the encoding name as a string and a list as a value containing the phred score and a tuple with the encoding range. For a given encoding range provided via the two first arguments, this function will return all possible encodings and phred scores. Parameters ---------- rmin : int Minimum Unicode code in range. rmax : int Maximum Unicode code in range. Returns ------- valid_encodings : list List of all possible encodings for the provided range. valid_phred : list List of all possible phred scores. """ valid_encodings = [] valid_phred = [] for encoding, (phred, (emin, emax)) in RANGES.items(): if rmin >= emin and rmax <= emax: valid_encodings.append(encoding) valid_phred.append(phred) return valid_encodings, valid_phred
Returns the valid encodings for a given encoding range. The encoding ranges are stored in the :py:data:`RANGES` dictionary, with the encoding name as a string and a list as a value containing the phred score and a tuple with the encoding range. For a given encoding range provided via the two first arguments, this function will return all possible encodings and phred scores. Parameters ---------- rmin : int Minimum Unicode code in range. rmax : int Maximum Unicode code in range. Returns ------- valid_encodings : list List of all possible encodings for the provided range. valid_phred : list List of all possible phred scores.
Below is the the instruction that describes the task: ### Input: Returns the valid encodings for a given encoding range. The encoding ranges are stored in the :py:data:`RANGES` dictionary, with the encoding name as a string and a list as a value containing the phred score and a tuple with the encoding range. For a given encoding range provided via the two first arguments, this function will return all possible encodings and phred scores. Parameters ---------- rmin : int Minimum Unicode code in range. rmax : int Maximum Unicode code in range. Returns ------- valid_encodings : list List of all possible encodings for the provided range. valid_phred : list List of all possible phred scores. ### Response: def get_encodings_in_range(rmin, rmax): """ Returns the valid encodings for a given encoding range. The encoding ranges are stored in the :py:data:`RANGES` dictionary, with the encoding name as a string and a list as a value containing the phred score and a tuple with the encoding range. For a given encoding range provided via the two first arguments, this function will return all possible encodings and phred scores. Parameters ---------- rmin : int Minimum Unicode code in range. rmax : int Maximum Unicode code in range. Returns ------- valid_encodings : list List of all possible encodings for the provided range. valid_phred : list List of all possible phred scores. """ valid_encodings = [] valid_phred = [] for encoding, (phred, (emin, emax)) in RANGES.items(): if rmin >= emin and rmax <= emax: valid_encodings.append(encoding) valid_phred.append(phred) return valid_encodings, valid_phred
def load_configuration(app_name): ''' creates a new configuration and loads the appropriate files. ''' if sys.prefix == '/usr': conf_dir = '/etc' share_dir = '/usr/share' else: conf_dir = os.path.join(sys.prefix, 'etc') share_dir = os.path.join(sys.prefix, 'share') # Step 1: try to locate pynlp.yml yml_config = {} for fname in [ '%s.yml'%(app_name,), os.path.expanduser('~/.%s.yml'%(app_name,)), os.path.join(conf_dir, '%s.yml'%(app_name,))]: if os.path.exists(fname): yml_config = yaml.load(open(fname)) break try: data_dir = yml_config['paths']['data_dir'] except KeyError: try: data_dir = os.environ[app_name.upper()] except KeyError: data_dir = os.path.join(share_dir, app_name) return AppContext(yml_config, data_dir)
creates a new configuration and loads the appropriate files.
Below is the the instruction that describes the task: ### Input: creates a new configuration and loads the appropriate files. ### Response: def load_configuration(app_name): ''' creates a new configuration and loads the appropriate files. ''' if sys.prefix == '/usr': conf_dir = '/etc' share_dir = '/usr/share' else: conf_dir = os.path.join(sys.prefix, 'etc') share_dir = os.path.join(sys.prefix, 'share') # Step 1: try to locate pynlp.yml yml_config = {} for fname in [ '%s.yml'%(app_name,), os.path.expanduser('~/.%s.yml'%(app_name,)), os.path.join(conf_dir, '%s.yml'%(app_name,))]: if os.path.exists(fname): yml_config = yaml.load(open(fname)) break try: data_dir = yml_config['paths']['data_dir'] except KeyError: try: data_dir = os.environ[app_name.upper()] except KeyError: data_dir = os.path.join(share_dir, app_name) return AppContext(yml_config, data_dir)
def get_doc(additional_doc=False, field_prefix='$', field_suffix=':', indent=4): """Return a formated string containing documentation about the audio fields. """ if additional_doc: f = fields.copy() f.update(additional_doc) else: f = fields field_length = get_max_field_length(f) field_length = field_length + len(field_prefix) + len(field_suffix) + 4 description_indent = ' ' * (indent + field_length) output = '' for field, description in sorted(f.items()): description = description['description'] field = ' ' * indent + field_prefix + field + ':' output += field.ljust(field_length) + \ textwrap.fill( description, width=78, initial_indent=description_indent, subsequent_indent=description_indent )[field_length:] + '\n\n\n' return output
Return a formated string containing documentation about the audio fields.
Below is the the instruction that describes the task: ### Input: Return a formated string containing documentation about the audio fields. ### Response: def get_doc(additional_doc=False, field_prefix='$', field_suffix=':', indent=4): """Return a formated string containing documentation about the audio fields. """ if additional_doc: f = fields.copy() f.update(additional_doc) else: f = fields field_length = get_max_field_length(f) field_length = field_length + len(field_prefix) + len(field_suffix) + 4 description_indent = ' ' * (indent + field_length) output = '' for field, description in sorted(f.items()): description = description['description'] field = ' ' * indent + field_prefix + field + ':' output += field.ljust(field_length) + \ textwrap.fill( description, width=78, initial_indent=description_indent, subsequent_indent=description_indent )[field_length:] + '\n\n\n' return output
def valid_host(host): """ check valid hostname """ for part in host.split("."): if not _valid_host_part.match(part): return False return True
check valid hostname
Below is the the instruction that describes the task: ### Input: check valid hostname ### Response: def valid_host(host): """ check valid hostname """ for part in host.split("."): if not _valid_host_part.match(part): return False return True
def feed(self, text=None, source=None): """ Feed some text to the database, either from a string (``text``) or a file (``source``). >>> db = TrigramsDB() >>> db.feed("This is my text") >>> db.feed(source="some/file.txt") """ if text is not None: words = re.split(r'\s+', text) wlen = len(words) for i in range(wlen - 2): self._insert(words[i:i+3]) if source is not None: with open(source, 'r') as f: self.feed(f.read())
Feed some text to the database, either from a string (``text``) or a file (``source``). >>> db = TrigramsDB() >>> db.feed("This is my text") >>> db.feed(source="some/file.txt")
Below is the the instruction that describes the task: ### Input: Feed some text to the database, either from a string (``text``) or a file (``source``). >>> db = TrigramsDB() >>> db.feed("This is my text") >>> db.feed(source="some/file.txt") ### Response: def feed(self, text=None, source=None): """ Feed some text to the database, either from a string (``text``) or a file (``source``). >>> db = TrigramsDB() >>> db.feed("This is my text") >>> db.feed(source="some/file.txt") """ if text is not None: words = re.split(r'\s+', text) wlen = len(words) for i in range(wlen - 2): self._insert(words[i:i+3]) if source is not None: with open(source, 'r') as f: self.feed(f.read())
def ensure_rng(rng, impl='numpy'): """ Returns a random number generator Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> import utool as ut >>> import numpy as np >>> num = 4 >>> print('--- Python as PYTHON ---') >>> py_rng = random.Random(0) >>> pp_nums = [py_rng.random() for _ in range(num)] >>> print(pp_nums) >>> print('--- Numpy as PYTHON ---') >>> np_rng = ut.ensure_rng(random.Random(0), impl='numpy') >>> np_nums = [np_rng.rand() for _ in range(num)] >>> print(np_nums) >>> print('--- Numpy as NUMPY---') >>> np_rng = np.random.RandomState(seed=0) >>> nn_nums = [np_rng.rand() for _ in range(num)] >>> print(nn_nums) >>> print('--- Python as NUMPY---') >>> py_rng = ut.ensure_rng(np.random.RandomState(seed=0), impl='python') >>> pn_nums = [py_rng.random() for _ in range(num)] >>> print(pn_nums) >>> assert np_nums == pp_nums >>> assert pn_nums == nn_nums """ if impl == 'numpy': if rng is None: rng = np.random elif isinstance(rng, int): rng = np.random.RandomState(seed=rng) elif isinstance(rng, random.Random): # Convert python to numpy random state py_rng = rng pystate = py_rng.getstate() npstate = _pystate_to_npstate(pystate) rng = np_rng = np.random.RandomState(seed=0) np_rng.set_state(npstate) elif impl == 'python': if rng is None: rng = random elif isinstance(rng, int): rng = random.Random(rng) elif isinstance(rng, np.random.RandomState): # Convert numpy to python random state np_rng = rng npstate = np_rng.get_state() pystate = _npstate_to_pystate(npstate) rng = py_rng = random.Random(0) py_rng.setstate(pystate) else: raise KeyError('unknown rng impl={}'.format(impl)) return rng
Returns a random number generator Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> import utool as ut >>> import numpy as np >>> num = 4 >>> print('--- Python as PYTHON ---') >>> py_rng = random.Random(0) >>> pp_nums = [py_rng.random() for _ in range(num)] >>> print(pp_nums) >>> print('--- Numpy as PYTHON ---') >>> np_rng = ut.ensure_rng(random.Random(0), impl='numpy') >>> np_nums = [np_rng.rand() for _ in range(num)] >>> print(np_nums) >>> print('--- Numpy as NUMPY---') >>> np_rng = np.random.RandomState(seed=0) >>> nn_nums = [np_rng.rand() for _ in range(num)] >>> print(nn_nums) >>> print('--- Python as NUMPY---') >>> py_rng = ut.ensure_rng(np.random.RandomState(seed=0), impl='python') >>> pn_nums = [py_rng.random() for _ in range(num)] >>> print(pn_nums) >>> assert np_nums == pp_nums >>> assert pn_nums == nn_nums
Below is the the instruction that describes the task: ### Input: Returns a random number generator Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> import utool as ut >>> import numpy as np >>> num = 4 >>> print('--- Python as PYTHON ---') >>> py_rng = random.Random(0) >>> pp_nums = [py_rng.random() for _ in range(num)] >>> print(pp_nums) >>> print('--- Numpy as PYTHON ---') >>> np_rng = ut.ensure_rng(random.Random(0), impl='numpy') >>> np_nums = [np_rng.rand() for _ in range(num)] >>> print(np_nums) >>> print('--- Numpy as NUMPY---') >>> np_rng = np.random.RandomState(seed=0) >>> nn_nums = [np_rng.rand() for _ in range(num)] >>> print(nn_nums) >>> print('--- Python as NUMPY---') >>> py_rng = ut.ensure_rng(np.random.RandomState(seed=0), impl='python') >>> pn_nums = [py_rng.random() for _ in range(num)] >>> print(pn_nums) >>> assert np_nums == pp_nums >>> assert pn_nums == nn_nums ### Response: def ensure_rng(rng, impl='numpy'): """ Returns a random number generator Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> import utool as ut >>> import numpy as np >>> num = 4 >>> print('--- Python as PYTHON ---') >>> py_rng = random.Random(0) >>> pp_nums = [py_rng.random() for _ in range(num)] >>> print(pp_nums) >>> print('--- Numpy as PYTHON ---') >>> np_rng = ut.ensure_rng(random.Random(0), impl='numpy') >>> np_nums = [np_rng.rand() for _ in range(num)] >>> print(np_nums) >>> print('--- Numpy as NUMPY---') >>> np_rng = np.random.RandomState(seed=0) >>> nn_nums = [np_rng.rand() for _ in range(num)] >>> print(nn_nums) >>> print('--- Python as NUMPY---') >>> py_rng = ut.ensure_rng(np.random.RandomState(seed=0), impl='python') >>> pn_nums = [py_rng.random() for _ in range(num)] >>> print(pn_nums) >>> assert np_nums == pp_nums >>> assert pn_nums == nn_nums """ if impl == 'numpy': if rng is None: rng = np.random elif isinstance(rng, int): rng = np.random.RandomState(seed=rng) elif isinstance(rng, random.Random): # Convert python to numpy random state py_rng = rng pystate = py_rng.getstate() npstate = _pystate_to_npstate(pystate) rng = np_rng = np.random.RandomState(seed=0) np_rng.set_state(npstate) elif impl == 'python': if rng is None: rng = random elif isinstance(rng, int): rng = random.Random(rng) elif isinstance(rng, np.random.RandomState): # Convert numpy to python random state np_rng = rng npstate = np_rng.get_state() pystate = _npstate_to_pystate(npstate) rng = py_rng = random.Random(0) py_rng.setstate(pystate) else: raise KeyError('unknown rng impl={}'.format(impl)) return rng
def expand_defaults(schema, features): """Add to features any default transformations. Not every column in the schema has an explicit feature transformation listed in the featurs file. For these columns, add a default transformation based on the schema's type. The features dict is modified by this function call. After this function call, every column in schema is used in a feature, and every feature uses a column in the schema. Args: schema: schema list features: features dict Raises: ValueError: if transform cannot be applied given schema type. """ schema_names = [x['name'] for x in schema] # Add missing source columns for name, transform in six.iteritems(features): if 'source_column' not in transform: transform['source_column'] = name # Check source columns are in the schema and collect which are used. used_schema_columns = [] for name, transform in six.iteritems(features): if transform['source_column'] not in schema_names: raise ValueError('source column %s is not in the schema for transform %s' % (transform['source_column'], name)) used_schema_columns.append(transform['source_column']) # Update default transformation based on schema. for col_schema in schema: schema_name = col_schema['name'] schema_type = col_schema['type'].lower() if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]: raise ValueError(('Only the following schema types are supported: %s' % ' '.join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]))) if schema_name not in used_schema_columns: # add the default transform to the features if schema_type in constant.NUMERIC_SCHEMA: features[schema_name] = { 'transform': constant.DEFAULT_NUMERIC_TRANSFORM, 'source_column': schema_name} elif schema_type == constant.STRING_SCHEMA: features[schema_name] = { 'transform': constant.DEFAULT_CATEGORICAL_TRANSFORM, 'source_column': schema_name} else: raise NotImplementedError('Unknown type %s' % schema_type)
Add to features any default transformations. Not every column in the schema has an explicit feature transformation listed in the featurs file. For these columns, add a default transformation based on the schema's type. The features dict is modified by this function call. After this function call, every column in schema is used in a feature, and every feature uses a column in the schema. Args: schema: schema list features: features dict Raises: ValueError: if transform cannot be applied given schema type.
Below is the the instruction that describes the task: ### Input: Add to features any default transformations. Not every column in the schema has an explicit feature transformation listed in the featurs file. For these columns, add a default transformation based on the schema's type. The features dict is modified by this function call. After this function call, every column in schema is used in a feature, and every feature uses a column in the schema. Args: schema: schema list features: features dict Raises: ValueError: if transform cannot be applied given schema type. ### Response: def expand_defaults(schema, features): """Add to features any default transformations. Not every column in the schema has an explicit feature transformation listed in the featurs file. For these columns, add a default transformation based on the schema's type. The features dict is modified by this function call. After this function call, every column in schema is used in a feature, and every feature uses a column in the schema. Args: schema: schema list features: features dict Raises: ValueError: if transform cannot be applied given schema type. """ schema_names = [x['name'] for x in schema] # Add missing source columns for name, transform in six.iteritems(features): if 'source_column' not in transform: transform['source_column'] = name # Check source columns are in the schema and collect which are used. used_schema_columns = [] for name, transform in six.iteritems(features): if transform['source_column'] not in schema_names: raise ValueError('source column %s is not in the schema for transform %s' % (transform['source_column'], name)) used_schema_columns.append(transform['source_column']) # Update default transformation based on schema. for col_schema in schema: schema_name = col_schema['name'] schema_type = col_schema['type'].lower() if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]: raise ValueError(('Only the following schema types are supported: %s' % ' '.join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]))) if schema_name not in used_schema_columns: # add the default transform to the features if schema_type in constant.NUMERIC_SCHEMA: features[schema_name] = { 'transform': constant.DEFAULT_NUMERIC_TRANSFORM, 'source_column': schema_name} elif schema_type == constant.STRING_SCHEMA: features[schema_name] = { 'transform': constant.DEFAULT_CATEGORICAL_TRANSFORM, 'source_column': schema_name} else: raise NotImplementedError('Unknown type %s' % schema_type)
def filter_list(lst, pattern): """ Filters the lst using pattern. If pattern starts with '(' it will be considered a re regular expression, otherwise it will use fnmatch filter. :param lst: list of strings :param pattern: string :return: list of strings Filtered list of strings """ if is_fnmatch_regex(pattern) and not is_regex(pattern): #use fnmatch log.info('Using fnmatch for {0}'.format(pattern)) filst = fnmatch.filter(lst, pattern) else: #use re log.info('Using regex match for {0}'.format(pattern)) filst = match_list(lst, pattern) if filst: filst.sort() return filst
Filters the lst using pattern. If pattern starts with '(' it will be considered a re regular expression, otherwise it will use fnmatch filter. :param lst: list of strings :param pattern: string :return: list of strings Filtered list of strings
Below is the the instruction that describes the task: ### Input: Filters the lst using pattern. If pattern starts with '(' it will be considered a re regular expression, otherwise it will use fnmatch filter. :param lst: list of strings :param pattern: string :return: list of strings Filtered list of strings ### Response: def filter_list(lst, pattern): """ Filters the lst using pattern. If pattern starts with '(' it will be considered a re regular expression, otherwise it will use fnmatch filter. :param lst: list of strings :param pattern: string :return: list of strings Filtered list of strings """ if is_fnmatch_regex(pattern) and not is_regex(pattern): #use fnmatch log.info('Using fnmatch for {0}'.format(pattern)) filst = fnmatch.filter(lst, pattern) else: #use re log.info('Using regex match for {0}'.format(pattern)) filst = match_list(lst, pattern) if filst: filst.sort() return filst
def persist_uncles(self, uncles: Tuple[BlockHeader]) -> Hash32: """ Persists the list of uncles to the database. Returns the uncles hash. """ return self._persist_uncles(self.db, uncles)
Persists the list of uncles to the database. Returns the uncles hash.
Below is the the instruction that describes the task: ### Input: Persists the list of uncles to the database. Returns the uncles hash. ### Response: def persist_uncles(self, uncles: Tuple[BlockHeader]) -> Hash32: """ Persists the list of uncles to the database. Returns the uncles hash. """ return self._persist_uncles(self.db, uncles)
def isotopePattern(sum_formula, threshold=1e-4, rel_threshold=True, desired_prob=None): """ Calculates isotopic peaks for a sum formula. :param sum_formula: text representation of an atomic composition :type sum_formula: str :param threshold: minimum peak abundance :type threshold: float :param rel_threshold: if True, threshold is relative to the highest peak, otherwise it is a probability :type rel_threshold: bool :param desired_prob: total probability covered by the result; if set, threshold parameter is ignored :type desired_prob: float | None """ assert threshold >= 0 and threshold < 1 assert desired_prob is None or (desired_prob > 0 and desired_prob <= 1) if desired_prob: s = ims.spectrum_new_from_sf(sum_formula.encode('ascii'), desired_prob) else: s = ims.spectrum_new_from_sf_thr(sum_formula.encode('ascii'), threshold, rel_threshold) return _new_spectrum(TheoreticalSpectrum, s)
Calculates isotopic peaks for a sum formula. :param sum_formula: text representation of an atomic composition :type sum_formula: str :param threshold: minimum peak abundance :type threshold: float :param rel_threshold: if True, threshold is relative to the highest peak, otherwise it is a probability :type rel_threshold: bool :param desired_prob: total probability covered by the result; if set, threshold parameter is ignored :type desired_prob: float | None
Below is the the instruction that describes the task: ### Input: Calculates isotopic peaks for a sum formula. :param sum_formula: text representation of an atomic composition :type sum_formula: str :param threshold: minimum peak abundance :type threshold: float :param rel_threshold: if True, threshold is relative to the highest peak, otherwise it is a probability :type rel_threshold: bool :param desired_prob: total probability covered by the result; if set, threshold parameter is ignored :type desired_prob: float | None ### Response: def isotopePattern(sum_formula, threshold=1e-4, rel_threshold=True, desired_prob=None): """ Calculates isotopic peaks for a sum formula. :param sum_formula: text representation of an atomic composition :type sum_formula: str :param threshold: minimum peak abundance :type threshold: float :param rel_threshold: if True, threshold is relative to the highest peak, otherwise it is a probability :type rel_threshold: bool :param desired_prob: total probability covered by the result; if set, threshold parameter is ignored :type desired_prob: float | None """ assert threshold >= 0 and threshold < 1 assert desired_prob is None or (desired_prob > 0 and desired_prob <= 1) if desired_prob: s = ims.spectrum_new_from_sf(sum_formula.encode('ascii'), desired_prob) else: s = ims.spectrum_new_from_sf_thr(sum_formula.encode('ascii'), threshold, rel_threshold) return _new_spectrum(TheoreticalSpectrum, s)
def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"): """ Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: """ merged_forecasts = pd.merge(forecasts["condition"], forecasts["dist"], on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"]) all_members = self.data[mode]["combo"]["Ensemble_Member"] members = np.unique(all_members) all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"]) run_dates = pd.DatetimeIndex(np.unique(all_run_dates)) print(run_dates) for member in members: for run_date in run_dates: mem_run_index = (all_run_dates == run_date) & (all_members == member) member_forecast = merged_forecasts.loc[mem_run_index] member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name, member, run_date.strftime (run_date_format)))) return
Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns:
Below is the the instruction that describes the task: ### Input: Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: ### Response: def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"): """ Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: """ merged_forecasts = pd.merge(forecasts["condition"], forecasts["dist"], on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"]) all_members = self.data[mode]["combo"]["Ensemble_Member"] members = np.unique(all_members) all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"]) run_dates = pd.DatetimeIndex(np.unique(all_run_dates)) print(run_dates) for member in members: for run_date in run_dates: mem_run_index = (all_run_dates == run_date) & (all_members == member) member_forecast = merged_forecasts.loc[mem_run_index] member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name, member, run_date.strftime (run_date_format)))) return
def _parse_input_node(cls, node): """ :param node: xml node :return: dict """ data = {} child = node.getchildren() if not child and node.get('name'): val = node.text elif child: # if tag = "{http://activiti.org/bpmn}script" then data_typ = 'script' data_typ = child[0].tag.split('}')[1] val = getattr(cls, '_parse_%s' % data_typ)(child[0]) data[node.get('name')] = val return data
:param node: xml node :return: dict
Below is the the instruction that describes the task: ### Input: :param node: xml node :return: dict ### Response: def _parse_input_node(cls, node): """ :param node: xml node :return: dict """ data = {} child = node.getchildren() if not child and node.get('name'): val = node.text elif child: # if tag = "{http://activiti.org/bpmn}script" then data_typ = 'script' data_typ = child[0].tag.split('}')[1] val = getattr(cls, '_parse_%s' % data_typ)(child[0]) data[node.get('name')] = val return data
def next_frame_l2(): """Basic conv model with L2 modality.""" hparams = next_frame_basic_deterministic() hparams.loss["targets"] = modalities.video_l2_loss hparams.top["targets"] = modalities.video_l1_top hparams.video_modality_loss_cutoff = 2.4 return hparams
Basic conv model with L2 modality.
Below is the the instruction that describes the task: ### Input: Basic conv model with L2 modality. ### Response: def next_frame_l2(): """Basic conv model with L2 modality.""" hparams = next_frame_basic_deterministic() hparams.loss["targets"] = modalities.video_l2_loss hparams.top["targets"] = modalities.video_l1_top hparams.video_modality_loss_cutoff = 2.4 return hparams
def visit_For(self, node): """ Handle iterator variable in for loops. Iterate variable may be the correct one at the end of the loop. """ body = node.body if node.target.id in self.naming: body = [ast.Assign(targets=[node.target], value=node.iter)] + body self.visit_any_conditionnal(body, node.orelse) else: iter_dep = self.visit(node.iter) self.naming[node.target.id] = iter_dep self.visit_any_conditionnal(body, body + node.orelse)
Handle iterator variable in for loops. Iterate variable may be the correct one at the end of the loop.
Below is the the instruction that describes the task: ### Input: Handle iterator variable in for loops. Iterate variable may be the correct one at the end of the loop. ### Response: def visit_For(self, node): """ Handle iterator variable in for loops. Iterate variable may be the correct one at the end of the loop. """ body = node.body if node.target.id in self.naming: body = [ast.Assign(targets=[node.target], value=node.iter)] + body self.visit_any_conditionnal(body, node.orelse) else: iter_dep = self.visit(node.iter) self.naming[node.target.id] = iter_dep self.visit_any_conditionnal(body, body + node.orelse)
def get(cls): # type: () -> Shell """ Retrieve the current shell. """ if cls._shell is not None: return cls._shell try: name, path = detect_shell(os.getpid()) except (RuntimeError, ShellDetectionFailure): raise RuntimeError("Unable to detect the current shell.") cls._shell = cls(name, path) return cls._shell
Retrieve the current shell.
Below is the the instruction that describes the task: ### Input: Retrieve the current shell. ### Response: def get(cls): # type: () -> Shell """ Retrieve the current shell. """ if cls._shell is not None: return cls._shell try: name, path = detect_shell(os.getpid()) except (RuntimeError, ShellDetectionFailure): raise RuntimeError("Unable to detect the current shell.") cls._shell = cls(name, path) return cls._shell
def read_notmuch_config(self, path): """ parse notmuch's config file :param path: path to notmuch's config file :type path: str """ spec = os.path.join(DEFAULTSPATH, 'notmuch.rc.spec') self._notmuchconfig = read_config(path, spec)
parse notmuch's config file :param path: path to notmuch's config file :type path: str
Below is the the instruction that describes the task: ### Input: parse notmuch's config file :param path: path to notmuch's config file :type path: str ### Response: def read_notmuch_config(self, path): """ parse notmuch's config file :param path: path to notmuch's config file :type path: str """ spec = os.path.join(DEFAULTSPATH, 'notmuch.rc.spec') self._notmuchconfig = read_config(path, spec)
def load(self): """Fetches the MAL user page and sets the current user's attributes. :rtype: :class:`.User` :return: Current user object. """ user_profile = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username)).text self.set(self.parse(utilities.get_clean_dom(user_profile))) return self
Fetches the MAL user page and sets the current user's attributes. :rtype: :class:`.User` :return: Current user object.
Below is the the instruction that describes the task: ### Input: Fetches the MAL user page and sets the current user's attributes. :rtype: :class:`.User` :return: Current user object. ### Response: def load(self): """Fetches the MAL user page and sets the current user's attributes. :rtype: :class:`.User` :return: Current user object. """ user_profile = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username)).text self.set(self.parse(utilities.get_clean_dom(user_profile))) return self
def process_rules(rules, fixed_text, cur = 0, cur_end = 1): """Process rules matched in pattern and returns suitable replacement If any rule's condition is satisfied, output the rules "replace", else output None """ replaced = '' # iterate through rules for rule in rules: matched = False # iterate through matches for match in rule['matches']: matched = process_match(match, fixed_text, cur, cur_end) # Break out of loop if we dont' have a match. Here we are # trusting avrodict to have listed matches sequentially if not matched: break # If a match is found, stop looping through rules any further if matched: replaced = rule['replace'] break # if any match has been found return replace value if matched: return replaced else: return None
Process rules matched in pattern and returns suitable replacement If any rule's condition is satisfied, output the rules "replace", else output None
Below is the the instruction that describes the task: ### Input: Process rules matched in pattern and returns suitable replacement If any rule's condition is satisfied, output the rules "replace", else output None ### Response: def process_rules(rules, fixed_text, cur = 0, cur_end = 1): """Process rules matched in pattern and returns suitable replacement If any rule's condition is satisfied, output the rules "replace", else output None """ replaced = '' # iterate through rules for rule in rules: matched = False # iterate through matches for match in rule['matches']: matched = process_match(match, fixed_text, cur, cur_end) # Break out of loop if we dont' have a match. Here we are # trusting avrodict to have listed matches sequentially if not matched: break # If a match is found, stop looping through rules any further if matched: replaced = rule['replace'] break # if any match has been found return replace value if matched: return replaced else: return None
def get_column(self, column_name): """ Returns a column as a Series. Parameters ---------- column_name : str Returns ------- column : pandas.Series """ frame = self._call_func() return DataFrameWrapper(self.name, frame, copy_col=self.copy_col).get_column(column_name)
Returns a column as a Series. Parameters ---------- column_name : str Returns ------- column : pandas.Series
Below is the the instruction that describes the task: ### Input: Returns a column as a Series. Parameters ---------- column_name : str Returns ------- column : pandas.Series ### Response: def get_column(self, column_name): """ Returns a column as a Series. Parameters ---------- column_name : str Returns ------- column : pandas.Series """ frame = self._call_func() return DataFrameWrapper(self.name, frame, copy_col=self.copy_col).get_column(column_name)
def save(self, *args, **kwargs): """ Fill 'created' and 'modified' attributes on first create """ if self.created is None: self.created = tz_now() if self.modified is None: self.modified = self.created super(Thread, self).save(*args, **kwargs)
Fill 'created' and 'modified' attributes on first create
Below is the the instruction that describes the task: ### Input: Fill 'created' and 'modified' attributes on first create ### Response: def save(self, *args, **kwargs): """ Fill 'created' and 'modified' attributes on first create """ if self.created is None: self.created = tz_now() if self.modified is None: self.modified = self.created super(Thread, self).save(*args, **kwargs)
def keyPressEvent(self, event): "press ESCAPE to quit the application" key = event.key() if key == Qt.Key_Escape: self.app.quit()
press ESCAPE to quit the application
Below is the the instruction that describes the task: ### Input: press ESCAPE to quit the application ### Response: def keyPressEvent(self, event): "press ESCAPE to quit the application" key = event.key() if key == Qt.Key_Escape: self.app.quit()
def setCmd(self, cmd): """Check the cmd is valid, FrameError will be raised if its not.""" cmd = cmd.upper() if cmd not in VALID_COMMANDS: raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % ( cmd, VALID_COMMANDS, STOMP_VERSION) ) else: self._cmd = cmd
Check the cmd is valid, FrameError will be raised if its not.
Below is the the instruction that describes the task: ### Input: Check the cmd is valid, FrameError will be raised if its not. ### Response: def setCmd(self, cmd): """Check the cmd is valid, FrameError will be raised if its not.""" cmd = cmd.upper() if cmd not in VALID_COMMANDS: raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % ( cmd, VALID_COMMANDS, STOMP_VERSION) ) else: self._cmd = cmd
def score(ID, sign, lon): """ Returns the score of an object on a sign and longitude. """ info = getInfo(sign, lon) dignities = [dign for (dign, objID) in info.items() if objID == ID] return sum([SCORES[dign] for dign in dignities])
Returns the score of an object on a sign and longitude.
Below is the the instruction that describes the task: ### Input: Returns the score of an object on a sign and longitude. ### Response: def score(ID, sign, lon): """ Returns the score of an object on a sign and longitude. """ info = getInfo(sign, lon) dignities = [dign for (dign, objID) in info.items() if objID == ID] return sum([SCORES[dign] for dign in dignities])
def siblings(self, **kwargs): """Retrieve the other activities that also belong to the parent. It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including itself. This also works if the activity is of type `ActivityType.PROCESS`. :param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info :type kwargs: dict or None :return: list of :class:`Activity2` :raises NotFoundError: when it is a task in the top level of a project Example ------- >>> task = project.activity('Some Task') >>> siblings = task.siblings() Example for siblings containing certain words in the task name >>> task = project.activity('Some Task') >>> siblings = task.siblings(name__contains='Another Task') """ parent_id = self._json_data.get('parent_id') if parent_id is None: raise NotFoundError("Cannot find subprocess for this task '{}', " "as this task exist on top level.".format(self.name)) return self._client.activities(parent_id=parent_id, scope=self.scope_id, **kwargs)
Retrieve the other activities that also belong to the parent. It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including itself. This also works if the activity is of type `ActivityType.PROCESS`. :param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info :type kwargs: dict or None :return: list of :class:`Activity2` :raises NotFoundError: when it is a task in the top level of a project Example ------- >>> task = project.activity('Some Task') >>> siblings = task.siblings() Example for siblings containing certain words in the task name >>> task = project.activity('Some Task') >>> siblings = task.siblings(name__contains='Another Task')
Below is the the instruction that describes the task: ### Input: Retrieve the other activities that also belong to the parent. It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including itself. This also works if the activity is of type `ActivityType.PROCESS`. :param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info :type kwargs: dict or None :return: list of :class:`Activity2` :raises NotFoundError: when it is a task in the top level of a project Example ------- >>> task = project.activity('Some Task') >>> siblings = task.siblings() Example for siblings containing certain words in the task name >>> task = project.activity('Some Task') >>> siblings = task.siblings(name__contains='Another Task') ### Response: def siblings(self, **kwargs): """Retrieve the other activities that also belong to the parent. It returns a combination of Tasks (a.o. UserTasks) and Subprocesses on the level of the current task, including itself. This also works if the activity is of type `ActivityType.PROCESS`. :param kwargs: Additional search arguments, check :func:`pykechain.Client.activities` for additional info :type kwargs: dict or None :return: list of :class:`Activity2` :raises NotFoundError: when it is a task in the top level of a project Example ------- >>> task = project.activity('Some Task') >>> siblings = task.siblings() Example for siblings containing certain words in the task name >>> task = project.activity('Some Task') >>> siblings = task.siblings(name__contains='Another Task') """ parent_id = self._json_data.get('parent_id') if parent_id is None: raise NotFoundError("Cannot find subprocess for this task '{}', " "as this task exist on top level.".format(self.name)) return self._client.activities(parent_id=parent_id, scope=self.scope_id, **kwargs)
def _read_stdin(): """ Generator for reading from standard input in nonblocking mode. Other ways of reading from ``stdin`` in python waits, until the buffer is big enough, or until EOF character is sent. This functions yields immediately after each line. """ line = sys.stdin.readline() while line: yield line line = sys.stdin.readline()
Generator for reading from standard input in nonblocking mode. Other ways of reading from ``stdin`` in python waits, until the buffer is big enough, or until EOF character is sent. This functions yields immediately after each line.
Below is the the instruction that describes the task: ### Input: Generator for reading from standard input in nonblocking mode. Other ways of reading from ``stdin`` in python waits, until the buffer is big enough, or until EOF character is sent. This functions yields immediately after each line. ### Response: def _read_stdin(): """ Generator for reading from standard input in nonblocking mode. Other ways of reading from ``stdin`` in python waits, until the buffer is big enough, or until EOF character is sent. This functions yields immediately after each line. """ line = sys.stdin.readline() while line: yield line line = sys.stdin.readline()
def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval
Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high
Below is the the instruction that describes the task: ### Input: Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high ### Response: def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval
async def generate_credentials(self): """Create new credentials for authentication. Credentials that have been authenticated shall be saved and loaded with load_credentials before playing anything. If credentials are lost, authentication must be performed again. """ identifier, seed = new_credentials() return '{0}:{1}'.format(identifier, seed.decode().upper())
Create new credentials for authentication. Credentials that have been authenticated shall be saved and loaded with load_credentials before playing anything. If credentials are lost, authentication must be performed again.
Below is the the instruction that describes the task: ### Input: Create new credentials for authentication. Credentials that have been authenticated shall be saved and loaded with load_credentials before playing anything. If credentials are lost, authentication must be performed again. ### Response: async def generate_credentials(self): """Create new credentials for authentication. Credentials that have been authenticated shall be saved and loaded with load_credentials before playing anything. If credentials are lost, authentication must be performed again. """ identifier, seed = new_credentials() return '{0}:{1}'.format(identifier, seed.decode().upper())
def cvxEDA(eda, sampling_rate=1000, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2, solver=None, verbose=False, options={'reltol':1e-9}): """ A convex optimization approach to electrodermal activity processing (CVXEDA). This function implements the cvxEDA algorithm described in "cvxEDA: a Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015). Parameters ---------- eda : list or array raw EDA signal array. sampling_rate : int Sampling rate (samples/second). tau0 : float Slow time constant of the Bateman function. tau1 : float Fast time constant of the Bateman function. delta_knot : float Time between knots of the tonic spline function. alpha : float Penalization for the sparse SMNA driver. gamma : float Penalization for the tonic spline coefficients. solver : bool Sparse QP solver to be used, see cvxopt.solvers.qp verbose : bool Print progress? options : dict Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters Returns ---------- phasic : numpy.array The phasic component. Notes ---------- *Authors* - Luca Citi (https://github.com/lciti) - Alberto Greco *Dependencies* - cvxopt - numpy *See Also* - cvxEDA: https://github.com/lciti/cvxEDA References ----------- - Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing. - Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804. """ frequency = 1/sampling_rate # Normalizing signal eda = z_score(eda) eda = np.array(eda)[:,0] n = len(eda) eda = eda.astype('double') eda = cv.matrix(eda) # bateman ARMA model a1 = 1./min(tau1, tau0) # a1 > a0 a0 = 1./max(tau1, tau0) ar = np.array([(a1*frequency + 2.) * (a0*frequency + 2.), 2.*a1*a0*frequency**2 - 8., (a1*frequency - 2.) * (a0*frequency - 2.)]) / ((a1 - a0) * frequency**2) ma = np.array([1., 2., 1.]) # matrices for ARMA model i = np.arange(2, n) A = cv.spmatrix(np.tile(ar, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n)) M = cv.spmatrix(np.tile(ma, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n)) # spline delta_knot_s = int(round(delta_knot / frequency)) spl = np.r_[np.arange(1.,delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1 spl = np.convolve(spl, spl, 'full') spl /= max(spl) # matrix of spline regressors i = np.c_[np.arange(-(len(spl)//2), (len(spl)+1)//2)] + np.r_[np.arange(0, n, delta_knot_s)] nB = i.shape[1] j = np.tile(np.arange(nB), (len(spl),1)) p = np.tile(spl, (nB,1)).T valid = (i >= 0) & (i < n) B = cv.spmatrix(p[valid], i[valid], j[valid]) # trend C = cv.matrix(np.c_[np.ones(n), np.arange(1., n+1.)/n]) nC = C.size[1] # Solve the problem: # .5*(M*q + B*l + C*d - eda)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l # s.t. A*q >= 0 if verbose is False: options["show_progress"] = False old_options = cv.solvers.options.copy() cv.solvers.options.clear() cv.solvers.options.update(options) if solver == 'conelp': # Use conelp z = lambda m,n: cv.spmatrix([],[],[],(m,n)) G = cv.sparse([[-A,z(2,n),M,z(nB+2,n)],[z(n+2,nC),C,z(nB+2,nC)], [z(n,1),-1,1,z(n+nB+2,1)],[z(2*n+2,1),-1,1,z(nB,1)], [z(n+2,nB),B,z(2,nB),cv.spmatrix(1.0, range(nB), range(nB))]]) h = cv.matrix([z(n,1),.5,.5,eda,.5,.5,z(nB,1)]) c = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T,z(nC,1),1,gamma,z(nB,1)]) res = cv.solvers.conelp(c, G, h, dims={'l':n,'q':[n+2,nB+2],'s':[]}) obj = res['primal objective'] else: # Use qp Mt, Ct, Bt = M.T, C.T, B.T H = cv.sparse([[Mt*M, Ct*M, Bt*M], [Mt*C, Ct*C, Bt*C], [Mt*B, Ct*B, Bt*B+gamma*cv.spmatrix(1.0, range(nB), range(nB))]]) f = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T - Mt*eda, -(Ct*eda), -(Bt*eda)]) res = cv.solvers.qp(H, f, cv.spmatrix(-A.V, A.I, A.J, (n,len(f))), cv.matrix(0., (n,1)), solver=solver) obj = res['primal objective'] + .5 * (eda.T * eda) cv.solvers.options.clear() cv.solvers.options.update(old_options) l = res['x'][-nB:] d = res['x'][n:n+nC] tonic = B*l + C*d q = res['x'][:n] p = A * q phasic = M * q e = eda - phasic - tonic phasic = np.array(phasic)[:,0] # results = (np.array(a).ravel() for a in (r, t, p, l, d, e, obj)) return(tonic, phasic)
A convex optimization approach to electrodermal activity processing (CVXEDA). This function implements the cvxEDA algorithm described in "cvxEDA: a Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015). Parameters ---------- eda : list or array raw EDA signal array. sampling_rate : int Sampling rate (samples/second). tau0 : float Slow time constant of the Bateman function. tau1 : float Fast time constant of the Bateman function. delta_knot : float Time between knots of the tonic spline function. alpha : float Penalization for the sparse SMNA driver. gamma : float Penalization for the tonic spline coefficients. solver : bool Sparse QP solver to be used, see cvxopt.solvers.qp verbose : bool Print progress? options : dict Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters Returns ---------- phasic : numpy.array The phasic component. Notes ---------- *Authors* - Luca Citi (https://github.com/lciti) - Alberto Greco *Dependencies* - cvxopt - numpy *See Also* - cvxEDA: https://github.com/lciti/cvxEDA References ----------- - Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing. - Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804.
Below is the the instruction that describes the task: ### Input: A convex optimization approach to electrodermal activity processing (CVXEDA). This function implements the cvxEDA algorithm described in "cvxEDA: a Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015). Parameters ---------- eda : list or array raw EDA signal array. sampling_rate : int Sampling rate (samples/second). tau0 : float Slow time constant of the Bateman function. tau1 : float Fast time constant of the Bateman function. delta_knot : float Time between knots of the tonic spline function. alpha : float Penalization for the sparse SMNA driver. gamma : float Penalization for the tonic spline coefficients. solver : bool Sparse QP solver to be used, see cvxopt.solvers.qp verbose : bool Print progress? options : dict Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters Returns ---------- phasic : numpy.array The phasic component. Notes ---------- *Authors* - Luca Citi (https://github.com/lciti) - Alberto Greco *Dependencies* - cvxopt - numpy *See Also* - cvxEDA: https://github.com/lciti/cvxEDA References ----------- - Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing. - Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804. ### Response: def cvxEDA(eda, sampling_rate=1000, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2, solver=None, verbose=False, options={'reltol':1e-9}): """ A convex optimization approach to electrodermal activity processing (CVXEDA). This function implements the cvxEDA algorithm described in "cvxEDA: a Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015). Parameters ---------- eda : list or array raw EDA signal array. sampling_rate : int Sampling rate (samples/second). tau0 : float Slow time constant of the Bateman function. tau1 : float Fast time constant of the Bateman function. delta_knot : float Time between knots of the tonic spline function. alpha : float Penalization for the sparse SMNA driver. gamma : float Penalization for the tonic spline coefficients. solver : bool Sparse QP solver to be used, see cvxopt.solvers.qp verbose : bool Print progress? options : dict Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters Returns ---------- phasic : numpy.array The phasic component. Notes ---------- *Authors* - Luca Citi (https://github.com/lciti) - Alberto Greco *Dependencies* - cvxopt - numpy *See Also* - cvxEDA: https://github.com/lciti/cvxEDA References ----------- - Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing. - Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804. """ frequency = 1/sampling_rate # Normalizing signal eda = z_score(eda) eda = np.array(eda)[:,0] n = len(eda) eda = eda.astype('double') eda = cv.matrix(eda) # bateman ARMA model a1 = 1./min(tau1, tau0) # a1 > a0 a0 = 1./max(tau1, tau0) ar = np.array([(a1*frequency + 2.) * (a0*frequency + 2.), 2.*a1*a0*frequency**2 - 8., (a1*frequency - 2.) * (a0*frequency - 2.)]) / ((a1 - a0) * frequency**2) ma = np.array([1., 2., 1.]) # matrices for ARMA model i = np.arange(2, n) A = cv.spmatrix(np.tile(ar, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n)) M = cv.spmatrix(np.tile(ma, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n)) # spline delta_knot_s = int(round(delta_knot / frequency)) spl = np.r_[np.arange(1.,delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1 spl = np.convolve(spl, spl, 'full') spl /= max(spl) # matrix of spline regressors i = np.c_[np.arange(-(len(spl)//2), (len(spl)+1)//2)] + np.r_[np.arange(0, n, delta_knot_s)] nB = i.shape[1] j = np.tile(np.arange(nB), (len(spl),1)) p = np.tile(spl, (nB,1)).T valid = (i >= 0) & (i < n) B = cv.spmatrix(p[valid], i[valid], j[valid]) # trend C = cv.matrix(np.c_[np.ones(n), np.arange(1., n+1.)/n]) nC = C.size[1] # Solve the problem: # .5*(M*q + B*l + C*d - eda)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l # s.t. A*q >= 0 if verbose is False: options["show_progress"] = False old_options = cv.solvers.options.copy() cv.solvers.options.clear() cv.solvers.options.update(options) if solver == 'conelp': # Use conelp z = lambda m,n: cv.spmatrix([],[],[],(m,n)) G = cv.sparse([[-A,z(2,n),M,z(nB+2,n)],[z(n+2,nC),C,z(nB+2,nC)], [z(n,1),-1,1,z(n+nB+2,1)],[z(2*n+2,1),-1,1,z(nB,1)], [z(n+2,nB),B,z(2,nB),cv.spmatrix(1.0, range(nB), range(nB))]]) h = cv.matrix([z(n,1),.5,.5,eda,.5,.5,z(nB,1)]) c = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T,z(nC,1),1,gamma,z(nB,1)]) res = cv.solvers.conelp(c, G, h, dims={'l':n,'q':[n+2,nB+2],'s':[]}) obj = res['primal objective'] else: # Use qp Mt, Ct, Bt = M.T, C.T, B.T H = cv.sparse([[Mt*M, Ct*M, Bt*M], [Mt*C, Ct*C, Bt*C], [Mt*B, Ct*B, Bt*B+gamma*cv.spmatrix(1.0, range(nB), range(nB))]]) f = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T - Mt*eda, -(Ct*eda), -(Bt*eda)]) res = cv.solvers.qp(H, f, cv.spmatrix(-A.V, A.I, A.J, (n,len(f))), cv.matrix(0., (n,1)), solver=solver) obj = res['primal objective'] + .5 * (eda.T * eda) cv.solvers.options.clear() cv.solvers.options.update(old_options) l = res['x'][-nB:] d = res['x'][n:n+nC] tonic = B*l + C*d q = res['x'][:n] p = A * q phasic = M * q e = eda - phasic - tonic phasic = np.array(phasic)[:,0] # results = (np.array(a).ravel() for a in (r, t, p, l, d, e, obj)) return(tonic, phasic)
def convert_to_consumable_types (self, project, name, prop_set, sources, only_one=False): """ Attempts to convert 'source' to the types that this generator can handle. The intention is to produce the set of targets can should be used when generator is run. only_one: convert 'source' to only one of source types if there's more that one possibility, report an error. Returns a pair: consumed: all targets that can be consumed. """ if __debug__: from .targets import ProjectTarget assert isinstance(name, basestring) or name is None assert isinstance(project, ProjectTarget) assert isinstance(prop_set, property_set.PropertySet) assert is_iterable_typed(sources, virtual_target.VirtualTarget) assert isinstance(only_one, bool) consumed = [] missing_types = [] if len (sources) > 1: # Don't know how to handle several sources yet. Just try # to pass the request to other generator missing_types = self.source_types_ else: (c, m) = self.consume_directly (sources [0]) consumed += c missing_types += m # No need to search for transformation if # some source type has consumed source and # no more source types are needed. if only_one and consumed: missing_types = [] #TODO: we should check that only one source type #if create of 'only_one' is true. # TODO: consider if consuned/bypassed separation should # be done by 'construct_types'. if missing_types: transformed = construct_types (project, name, missing_types, prop_set, sources) # Add targets of right type to 'consumed'. Add others to # 'bypassed'. The 'generators.construct' rule has done # its best to convert everything to the required type. # There's no need to rerun it on targets of different types. # NOTE: ignoring usage requirements for t in transformed[1]: if t.type() in missing_types: consumed.append(t) consumed = unique(consumed) return consumed
Attempts to convert 'source' to the types that this generator can handle. The intention is to produce the set of targets can should be used when generator is run. only_one: convert 'source' to only one of source types if there's more that one possibility, report an error. Returns a pair: consumed: all targets that can be consumed.
Below is the the instruction that describes the task: ### Input: Attempts to convert 'source' to the types that this generator can handle. The intention is to produce the set of targets can should be used when generator is run. only_one: convert 'source' to only one of source types if there's more that one possibility, report an error. Returns a pair: consumed: all targets that can be consumed. ### Response: def convert_to_consumable_types (self, project, name, prop_set, sources, only_one=False): """ Attempts to convert 'source' to the types that this generator can handle. The intention is to produce the set of targets can should be used when generator is run. only_one: convert 'source' to only one of source types if there's more that one possibility, report an error. Returns a pair: consumed: all targets that can be consumed. """ if __debug__: from .targets import ProjectTarget assert isinstance(name, basestring) or name is None assert isinstance(project, ProjectTarget) assert isinstance(prop_set, property_set.PropertySet) assert is_iterable_typed(sources, virtual_target.VirtualTarget) assert isinstance(only_one, bool) consumed = [] missing_types = [] if len (sources) > 1: # Don't know how to handle several sources yet. Just try # to pass the request to other generator missing_types = self.source_types_ else: (c, m) = self.consume_directly (sources [0]) consumed += c missing_types += m # No need to search for transformation if # some source type has consumed source and # no more source types are needed. if only_one and consumed: missing_types = [] #TODO: we should check that only one source type #if create of 'only_one' is true. # TODO: consider if consuned/bypassed separation should # be done by 'construct_types'. if missing_types: transformed = construct_types (project, name, missing_types, prop_set, sources) # Add targets of right type to 'consumed'. Add others to # 'bypassed'. The 'generators.construct' rule has done # its best to convert everything to the required type. # There's no need to rerun it on targets of different types. # NOTE: ignoring usage requirements for t in transformed[1]: if t.type() in missing_types: consumed.append(t) consumed = unique(consumed) return consumed
def _set_menu_toggles(self): """Enable menu bar view item checkmarks""" toggles = [ (self.main_toolbar, "main_window_toolbar", _("Main toolbar")), (self.macro_toolbar, "macro_toolbar", _("Macro toolbar")), (self.macro_panel, "macro_panel", _("Macro panel")), (self.attributes_toolbar, "attributes_toolbar", _("Format toolbar")), (self.find_toolbar, "find_toolbar", _("Find toolbar")), (self.widget_toolbar, "widget_toolbar", _("Widget toolbar")), (self.entry_line_panel, "entry_line_panel", _("Entry line")), (self.table_list_panel, "table_list_panel", _("Table list")), ] for toolbar, pane_name, toggle_label in toggles: # Get pane from aui manager pane = self._mgr.GetPane(pane_name) # Get menu item to toggle toggle_id = self.menubar.FindMenuItem(_("View"), toggle_label) if toggle_id != -1: # Check may fail if translation is incomplete toggle_item = self.menubar.FindItemById(toggle_id) # Adjust toggle to pane visibility toggle_item.Check(pane.IsShown())
Enable menu bar view item checkmarks
Below is the the instruction that describes the task: ### Input: Enable menu bar view item checkmarks ### Response: def _set_menu_toggles(self): """Enable menu bar view item checkmarks""" toggles = [ (self.main_toolbar, "main_window_toolbar", _("Main toolbar")), (self.macro_toolbar, "macro_toolbar", _("Macro toolbar")), (self.macro_panel, "macro_panel", _("Macro panel")), (self.attributes_toolbar, "attributes_toolbar", _("Format toolbar")), (self.find_toolbar, "find_toolbar", _("Find toolbar")), (self.widget_toolbar, "widget_toolbar", _("Widget toolbar")), (self.entry_line_panel, "entry_line_panel", _("Entry line")), (self.table_list_panel, "table_list_panel", _("Table list")), ] for toolbar, pane_name, toggle_label in toggles: # Get pane from aui manager pane = self._mgr.GetPane(pane_name) # Get menu item to toggle toggle_id = self.menubar.FindMenuItem(_("View"), toggle_label) if toggle_id != -1: # Check may fail if translation is incomplete toggle_item = self.menubar.FindItemById(toggle_id) # Adjust toggle to pane visibility toggle_item.Check(pane.IsShown())
def properties(self, name=None, pk=None, category=Category.INSTANCE, **kwargs): # type: (Optional[str], Optional[str], Optional[str], **Any) -> List[Property] """Retrieve properties. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: name to limit the search for. :type name: basestring or None :param pk: primary key or id (UUID) of the property to search for :type pk: basestring or None :param category: filter the properties by category. Defaults to INSTANCE. Other options MODEL or None :type category: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: list of :class:`models.Property` :raises NotFoundError: When no `Property` is found """ request_params = { 'name': name, 'id': pk, 'category': category } if kwargs: request_params.update(**kwargs) response = self._request('GET', self._build_url('properties'), params=request_params) if response.status_code != requests.codes.ok: # pragma: no cover raise NotFoundError("Could not retrieve properties") data = response.json() return [Property.create(p, client=self) for p in data['results']]
Retrieve properties. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: name to limit the search for. :type name: basestring or None :param pk: primary key or id (UUID) of the property to search for :type pk: basestring or None :param category: filter the properties by category. Defaults to INSTANCE. Other options MODEL or None :type category: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: list of :class:`models.Property` :raises NotFoundError: When no `Property` is found
Below is the the instruction that describes the task: ### Input: Retrieve properties. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: name to limit the search for. :type name: basestring or None :param pk: primary key or id (UUID) of the property to search for :type pk: basestring or None :param category: filter the properties by category. Defaults to INSTANCE. Other options MODEL or None :type category: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: list of :class:`models.Property` :raises NotFoundError: When no `Property` is found ### Response: def properties(self, name=None, pk=None, category=Category.INSTANCE, **kwargs): # type: (Optional[str], Optional[str], Optional[str], **Any) -> List[Property] """Retrieve properties. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: name to limit the search for. :type name: basestring or None :param pk: primary key or id (UUID) of the property to search for :type pk: basestring or None :param category: filter the properties by category. Defaults to INSTANCE. Other options MODEL or None :type category: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: list of :class:`models.Property` :raises NotFoundError: When no `Property` is found """ request_params = { 'name': name, 'id': pk, 'category': category } if kwargs: request_params.update(**kwargs) response = self._request('GET', self._build_url('properties'), params=request_params) if response.status_code != requests.codes.ok: # pragma: no cover raise NotFoundError("Could not retrieve properties") data = response.json() return [Property.create(p, client=self) for p in data['results']]
def transitively_reduce(self): """ Performs a transitive reduction on the graph. """ removals = set() for from_node, neighbors in self._edges.items(): childpairs = \ [(c1, c2) for c1 in neighbors for c2 in neighbors if c1 != c2] for child1, child2 in childpairs: if self.has_path(child1, child2) \ and not self.has_path(child1, from_node): removals.add((from_node, child2)) for edge in removals: self.remove_edge(edge[0], edge[1])
Performs a transitive reduction on the graph.
Below is the the instruction that describes the task: ### Input: Performs a transitive reduction on the graph. ### Response: def transitively_reduce(self): """ Performs a transitive reduction on the graph. """ removals = set() for from_node, neighbors in self._edges.items(): childpairs = \ [(c1, c2) for c1 in neighbors for c2 in neighbors if c1 != c2] for child1, child2 in childpairs: if self.has_path(child1, child2) \ and not self.has_path(child1, from_node): removals.add((from_node, child2)) for edge in removals: self.remove_edge(edge[0], edge[1])
def get_downsampled_scatter(self, xax="area_um", yax="deform", downsample=0, xscale="linear", yscale="linear"): """Downsampling by removing points at dense locations Parameters ---------- xax: str Identifier for x axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for y axis downsample: int Number of points to draw in the down-sampled plot. This number is either - >=1: exactly downsample to this number by randomly adding or removing points - 0 : do not perform downsampling xscale: str If set to "log", take the logarithm of the x-values before performing downsampling. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- xnew, xnew: filtered x and y """ if downsample < 0: raise ValueError("`downsample` must be zero or positive!") downsample = int(downsample) xax = xax.lower() yax = yax.lower() # Get data x = self[xax][self.filter.all] y = self[yax][self.filter.all] # Apply scale (no change for linear scale) xs = self._apply_scale(x, xscale, xax) ys = self._apply_scale(y, yscale, yax) _, _, idx = downsampling.downsample_grid(xs, ys, samples=downsample, ret_idx=True) self._plot_filter = idx return x[idx], y[idx]
Downsampling by removing points at dense locations Parameters ---------- xax: str Identifier for x axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for y axis downsample: int Number of points to draw in the down-sampled plot. This number is either - >=1: exactly downsample to this number by randomly adding or removing points - 0 : do not perform downsampling xscale: str If set to "log", take the logarithm of the x-values before performing downsampling. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- xnew, xnew: filtered x and y
Below is the the instruction that describes the task: ### Input: Downsampling by removing points at dense locations Parameters ---------- xax: str Identifier for x axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for y axis downsample: int Number of points to draw in the down-sampled plot. This number is either - >=1: exactly downsample to this number by randomly adding or removing points - 0 : do not perform downsampling xscale: str If set to "log", take the logarithm of the x-values before performing downsampling. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- xnew, xnew: filtered x and y ### Response: def get_downsampled_scatter(self, xax="area_um", yax="deform", downsample=0, xscale="linear", yscale="linear"): """Downsampling by removing points at dense locations Parameters ---------- xax: str Identifier for x axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for y axis downsample: int Number of points to draw in the down-sampled plot. This number is either - >=1: exactly downsample to this number by randomly adding or removing points - 0 : do not perform downsampling xscale: str If set to "log", take the logarithm of the x-values before performing downsampling. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- xnew, xnew: filtered x and y """ if downsample < 0: raise ValueError("`downsample` must be zero or positive!") downsample = int(downsample) xax = xax.lower() yax = yax.lower() # Get data x = self[xax][self.filter.all] y = self[yax][self.filter.all] # Apply scale (no change for linear scale) xs = self._apply_scale(x, xscale, xax) ys = self._apply_scale(y, yscale, yax) _, _, idx = downsampling.downsample_grid(xs, ys, samples=downsample, ret_idx=True) self._plot_filter = idx return x[idx], y[idx]
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.hostGroupId is not None: self.hostGroupId = self.args.hostGroupId if self.args.force is not None: self.force = self.args.force if self.force: self.url_parameters = {"forceRemove": True} self.path = "v1/hostgroup/{0}".format(str(self.hostGroupId))
Extracts the specific arguments of this CLI
Below is the the instruction that describes the task: ### Input: Extracts the specific arguments of this CLI ### Response: def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.hostGroupId is not None: self.hostGroupId = self.args.hostGroupId if self.args.force is not None: self.force = self.args.force if self.force: self.url_parameters = {"forceRemove": True} self.path = "v1/hostgroup/{0}".format(str(self.hostGroupId))
def rules(self): """ Returns a sorted list of firewall rules. Returns: list """ list_of_rules = [] for main_row in self.dict_rules: if 'rules' in main_row: for rule_row in main_row['rules']: if 'grants' in rule_row: for grant_row in rule_row['grants']: if 'group_id' in grant_row: # Set a var to not go over 80 chars group_id = grant_row['group_id'] # Some VPC grants don't specify a name if 'name' in grant_row: row_name = grant_row['name'] else: row_name = None fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_group_id=group_id, rules_grants_name=row_name, rules_description=grant_row['description']) list_of_rules.append(fr) elif 'cidr_ip' in grant_row: fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_cidr_ip=grant_row['cidr_ip'], rules_description=grant_row['description']) list_of_rules.append(fr) else: raise ValueError("Unsupported grant:", grant_row) else: fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port']) list_of_rules.append(fr) else: fr = FirewallRule(main_row['id'], main_row['name'], main_row['description']) list_of_rules.append(fr) # Sort the data in order to get a consistent output sorted_list = sorted(list_of_rules, key=lambda fr: (str(fr.id), str(fr.name), str(fr.description), str(fr.rules_direction), str(fr.rules_ip_protocol), str(fr.rules_from_port), str(fr.rules_to_port), str(fr.rules_grants_group_id), str(fr.rules_grants_name), str(fr.rules_grants_cidr_ip))) return sorted_list
Returns a sorted list of firewall rules. Returns: list
Below is the the instruction that describes the task: ### Input: Returns a sorted list of firewall rules. Returns: list ### Response: def rules(self): """ Returns a sorted list of firewall rules. Returns: list """ list_of_rules = [] for main_row in self.dict_rules: if 'rules' in main_row: for rule_row in main_row['rules']: if 'grants' in rule_row: for grant_row in rule_row['grants']: if 'group_id' in grant_row: # Set a var to not go over 80 chars group_id = grant_row['group_id'] # Some VPC grants don't specify a name if 'name' in grant_row: row_name = grant_row['name'] else: row_name = None fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_group_id=group_id, rules_grants_name=row_name, rules_description=grant_row['description']) list_of_rules.append(fr) elif 'cidr_ip' in grant_row: fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port'], rules_grants_cidr_ip=grant_row['cidr_ip'], rules_description=grant_row['description']) list_of_rules.append(fr) else: raise ValueError("Unsupported grant:", grant_row) else: fr = FirewallRule( main_row['id'], main_row['name'], main_row['description'], rules_direction=rule_row['direction'], rules_ip_protocol=rule_row['ip_protocol'], rules_from_port=rule_row['from_port'], rules_to_port=rule_row['to_port']) list_of_rules.append(fr) else: fr = FirewallRule(main_row['id'], main_row['name'], main_row['description']) list_of_rules.append(fr) # Sort the data in order to get a consistent output sorted_list = sorted(list_of_rules, key=lambda fr: (str(fr.id), str(fr.name), str(fr.description), str(fr.rules_direction), str(fr.rules_ip_protocol), str(fr.rules_from_port), str(fr.rules_to_port), str(fr.rules_grants_group_id), str(fr.rules_grants_name), str(fr.rules_grants_cidr_ip))) return sorted_list
def get_sampler(sample_mode: str): """Return a sampler constructor >>> get_sampler('all') <class 'cr8.metrics.All'> >>> get_sampler('reservoir') <class 'cr8.metrics.UniformReservoir'> >>> get_sampler('reservoir:100') functools.partial(<class 'cr8.metrics.UniformReservoir'>, size=100) """ if sample_mode == 'all': return All mode = sample_mode.split(':') if mode[0] == 'reservoir': if len(mode) == 2: return partial(UniformReservoir, size=int(mode[1])) else: return UniformReservoir raise TypeError(f'Invalid sample_mode: {sample_mode}')
Return a sampler constructor >>> get_sampler('all') <class 'cr8.metrics.All'> >>> get_sampler('reservoir') <class 'cr8.metrics.UniformReservoir'> >>> get_sampler('reservoir:100') functools.partial(<class 'cr8.metrics.UniformReservoir'>, size=100)
Below is the the instruction that describes the task: ### Input: Return a sampler constructor >>> get_sampler('all') <class 'cr8.metrics.All'> >>> get_sampler('reservoir') <class 'cr8.metrics.UniformReservoir'> >>> get_sampler('reservoir:100') functools.partial(<class 'cr8.metrics.UniformReservoir'>, size=100) ### Response: def get_sampler(sample_mode: str): """Return a sampler constructor >>> get_sampler('all') <class 'cr8.metrics.All'> >>> get_sampler('reservoir') <class 'cr8.metrics.UniformReservoir'> >>> get_sampler('reservoir:100') functools.partial(<class 'cr8.metrics.UniformReservoir'>, size=100) """ if sample_mode == 'all': return All mode = sample_mode.split(':') if mode[0] == 'reservoir': if len(mode) == 2: return partial(UniformReservoir, size=int(mode[1])) else: return UniformReservoir raise TypeError(f'Invalid sample_mode: {sample_mode}')
def __detect_branching_haghverdi16(self, Dseg, tips): """Detect branching on given segment. Compute point that maximizes kendall tau correlation of the sequences of distances to the second and the third tip, respectively, when 'moving away' from the first tip: tips[0]. 'Moving away' means moving in the direction of increasing distance from the first tip. Parameters ---------- Dseg : np.ndarray Dchosen distance matrix restricted to segment. tips : np.ndarray The three tip points. They form a 'triangle' that contains the data. Returns ------- ssegs : list of np.ndarray List of segments obtained from "splitting away the first tip cell". """ # sort distance from first tip point # then the sequence of distances Dseg[tips[0]][idcs] increases idcs = np.argsort(Dseg[tips[0]]) # consider now the sequence of distances from the other # two tip points, which only increase when being close to `tips[0]` # where they become correlated # at the point where this happens, we define a branching point if True: imax = self.kendall_tau_split(Dseg[tips[1]][idcs], Dseg[tips[2]][idcs]) if False: # if we were in euclidian space, the following should work # as well, but here, it doesn't because the scales in Dseg are # highly different, one would need to write the following equation # in terms of an ordering, such as exploited by the kendall # correlation method above imax = np.argmin(Dseg[tips[0]][idcs] + Dseg[tips[1]][idcs] + Dseg[tips[2]][idcs]) # init list to store new segments ssegs = [] # first new segment: all points until, but excluding the branching point # increasing the following slightly from imax is a more conservative choice # as the criterion based on normalized distances, which follows below, # is less stable if imax > 0.95 * len(idcs) and self.allow_kendall_tau_shift: # if "everything" is correlated (very large value of imax), a more # conservative choice amounts to reducing this logg.warn('shifting branching point away from maximal kendall-tau correlation (suppress this with `allow_kendall_tau_shift=False`)') ibranch = int(0.95 * imax) else: # otherwise, a more conservative choice is the following ibranch = imax + 1 return idcs[:ibranch]
Detect branching on given segment. Compute point that maximizes kendall tau correlation of the sequences of distances to the second and the third tip, respectively, when 'moving away' from the first tip: tips[0]. 'Moving away' means moving in the direction of increasing distance from the first tip. Parameters ---------- Dseg : np.ndarray Dchosen distance matrix restricted to segment. tips : np.ndarray The three tip points. They form a 'triangle' that contains the data. Returns ------- ssegs : list of np.ndarray List of segments obtained from "splitting away the first tip cell".
Below is the the instruction that describes the task: ### Input: Detect branching on given segment. Compute point that maximizes kendall tau correlation of the sequences of distances to the second and the third tip, respectively, when 'moving away' from the first tip: tips[0]. 'Moving away' means moving in the direction of increasing distance from the first tip. Parameters ---------- Dseg : np.ndarray Dchosen distance matrix restricted to segment. tips : np.ndarray The three tip points. They form a 'triangle' that contains the data. Returns ------- ssegs : list of np.ndarray List of segments obtained from "splitting away the first tip cell". ### Response: def __detect_branching_haghverdi16(self, Dseg, tips): """Detect branching on given segment. Compute point that maximizes kendall tau correlation of the sequences of distances to the second and the third tip, respectively, when 'moving away' from the first tip: tips[0]. 'Moving away' means moving in the direction of increasing distance from the first tip. Parameters ---------- Dseg : np.ndarray Dchosen distance matrix restricted to segment. tips : np.ndarray The three tip points. They form a 'triangle' that contains the data. Returns ------- ssegs : list of np.ndarray List of segments obtained from "splitting away the first tip cell". """ # sort distance from first tip point # then the sequence of distances Dseg[tips[0]][idcs] increases idcs = np.argsort(Dseg[tips[0]]) # consider now the sequence of distances from the other # two tip points, which only increase when being close to `tips[0]` # where they become correlated # at the point where this happens, we define a branching point if True: imax = self.kendall_tau_split(Dseg[tips[1]][idcs], Dseg[tips[2]][idcs]) if False: # if we were in euclidian space, the following should work # as well, but here, it doesn't because the scales in Dseg are # highly different, one would need to write the following equation # in terms of an ordering, such as exploited by the kendall # correlation method above imax = np.argmin(Dseg[tips[0]][idcs] + Dseg[tips[1]][idcs] + Dseg[tips[2]][idcs]) # init list to store new segments ssegs = [] # first new segment: all points until, but excluding the branching point # increasing the following slightly from imax is a more conservative choice # as the criterion based on normalized distances, which follows below, # is less stable if imax > 0.95 * len(idcs) and self.allow_kendall_tau_shift: # if "everything" is correlated (very large value of imax), a more # conservative choice amounts to reducing this logg.warn('shifting branching point away from maximal kendall-tau correlation (suppress this with `allow_kendall_tau_shift=False`)') ibranch = int(0.95 * imax) else: # otherwise, a more conservative choice is the following ibranch = imax + 1 return idcs[:ibranch]
def cric__random_forest(): """ Random Forest """ model = sklearn.ensemble.RandomForestClassifier(100, random_state=0) # we want to explain the raw probability outputs of the trees model.predict = lambda X: model.predict_proba(X)[:,1] return model
Random Forest
Below is the the instruction that describes the task: ### Input: Random Forest ### Response: def cric__random_forest(): """ Random Forest """ model = sklearn.ensemble.RandomForestClassifier(100, random_state=0) # we want to explain the raw probability outputs of the trees model.predict = lambda X: model.predict_proba(X)[:,1] return model
def make_catalog_comp_info_dict(self, catalog_sources): """ Make the information about the catalog components Parameters ---------- catalog_sources : dict Dictionary with catalog source defintions Returns ------- catalog_ret_dict : dict Dictionary mapping catalog_name to `model_component.CatalogInfo` split_ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo` """ catalog_ret_dict = {} split_ret_dict = {} for key, value in catalog_sources.items(): if value is None: continue if value['model_type'] != 'catalog': continue versions = value['versions'] for version in versions: ver_key = "%s_%s" % (key, version) source_dict = self.read_catalog_info_yaml(ver_key) try: full_cat_info = catalog_ret_dict[key] except KeyError: full_cat_info = self.build_catalog_info(source_dict) catalog_ret_dict[key] = full_cat_info try: all_sources = [x.strip() for x in full_cat_info.catalog_table[ 'Source_Name'].astype(str).tolist()] except KeyError: print(full_cat_info.catalog_table.colnames) used_sources = [] rules_dict = source_dict['rules_dict'] split_dict = {} for rule_key, rule_val in rules_dict.items(): # full_key =\ # self._name_factory.merged_sourcekey(catalog=ver_key, # rulekey=rule_key) sources = select_sources( full_cat_info.catalog_table, rule_val['cuts']) used_sources.extend(sources) split_dict[rule_key] = self.make_catalog_comp_info( full_cat_info, version, rule_key, rule_val, sources) # Now deal with the remainder for source in used_sources: try: all_sources.remove(source) except ValueError: continue rule_val = dict(cuts=[], merge=source_dict['remainder'].get('merge', False)) split_dict['remain'] = self.make_catalog_comp_info( full_cat_info, version, 'remain', rule_val, all_sources) # Merge in the info for this version of splits split_ret_dict[ver_key] = split_dict self._catalog_comp_info_dicts.update(catalog_ret_dict) self._split_comp_info_dicts.update(split_ret_dict) return (catalog_ret_dict, split_ret_dict)
Make the information about the catalog components Parameters ---------- catalog_sources : dict Dictionary with catalog source defintions Returns ------- catalog_ret_dict : dict Dictionary mapping catalog_name to `model_component.CatalogInfo` split_ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo`
Below is the the instruction that describes the task: ### Input: Make the information about the catalog components Parameters ---------- catalog_sources : dict Dictionary with catalog source defintions Returns ------- catalog_ret_dict : dict Dictionary mapping catalog_name to `model_component.CatalogInfo` split_ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo` ### Response: def make_catalog_comp_info_dict(self, catalog_sources): """ Make the information about the catalog components Parameters ---------- catalog_sources : dict Dictionary with catalog source defintions Returns ------- catalog_ret_dict : dict Dictionary mapping catalog_name to `model_component.CatalogInfo` split_ret_dict : dict Dictionary mapping sourcekey to `model_component.ModelComponentInfo` """ catalog_ret_dict = {} split_ret_dict = {} for key, value in catalog_sources.items(): if value is None: continue if value['model_type'] != 'catalog': continue versions = value['versions'] for version in versions: ver_key = "%s_%s" % (key, version) source_dict = self.read_catalog_info_yaml(ver_key) try: full_cat_info = catalog_ret_dict[key] except KeyError: full_cat_info = self.build_catalog_info(source_dict) catalog_ret_dict[key] = full_cat_info try: all_sources = [x.strip() for x in full_cat_info.catalog_table[ 'Source_Name'].astype(str).tolist()] except KeyError: print(full_cat_info.catalog_table.colnames) used_sources = [] rules_dict = source_dict['rules_dict'] split_dict = {} for rule_key, rule_val in rules_dict.items(): # full_key =\ # self._name_factory.merged_sourcekey(catalog=ver_key, # rulekey=rule_key) sources = select_sources( full_cat_info.catalog_table, rule_val['cuts']) used_sources.extend(sources) split_dict[rule_key] = self.make_catalog_comp_info( full_cat_info, version, rule_key, rule_val, sources) # Now deal with the remainder for source in used_sources: try: all_sources.remove(source) except ValueError: continue rule_val = dict(cuts=[], merge=source_dict['remainder'].get('merge', False)) split_dict['remain'] = self.make_catalog_comp_info( full_cat_info, version, 'remain', rule_val, all_sources) # Merge in the info for this version of splits split_ret_dict[ver_key] = split_dict self._catalog_comp_info_dicts.update(catalog_ret_dict) self._split_comp_info_dicts.update(split_ret_dict) return (catalog_ret_dict, split_ret_dict)
def male_breeding_location_type(self): """This attribute defines whether a breeding male's current location is the same as the breeding cage. This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified.""" if int(self.Male.all()[0].Cage) == int(self.Cage): type = "resident breeder" else: type = "non-resident breeder" return type
This attribute defines whether a breeding male's current location is the same as the breeding cage. This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified.
Below is the the instruction that describes the task: ### Input: This attribute defines whether a breeding male's current location is the same as the breeding cage. This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified. ### Response: def male_breeding_location_type(self): """This attribute defines whether a breeding male's current location is the same as the breeding cage. This attribute is used to color breeding table entries such that male mice which are currently in a different cage can quickly be identified.""" if int(self.Male.all()[0].Cage) == int(self.Cage): type = "resident breeder" else: type = "non-resident breeder" return type
def unique_everseen(iterable): """List unique elements, preserving order. Remember all elements ever seen.""" # unique_everseen('AAAABBBCCDAABBB') --> A B C D seen = set() seen_add = seen.add for element in six.moves.filterfalse(seen.__contains__, iterable): seen_add(element) yield element
List unique elements, preserving order. Remember all elements ever seen.
Below is the the instruction that describes the task: ### Input: List unique elements, preserving order. Remember all elements ever seen. ### Response: def unique_everseen(iterable): """List unique elements, preserving order. Remember all elements ever seen.""" # unique_everseen('AAAABBBCCDAABBB') --> A B C D seen = set() seen_add = seen.add for element in six.moves.filterfalse(seen.__contains__, iterable): seen_add(element) yield element
def uniform(name, shape, scale=0.05, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True): r"""Creates a tensor variable of which initial values are random numbers based on uniform distribution. Note that the default value of `scale` (=0.05) is different from the min/max values (=0.0, 1.0) of tf.random_uniform_initializer. Args: name: The name of the new variable. shape: A tuple/list of integers or an integer. If shape is an integer, it's converted to a list. scale: A Python scalar. All initial values should be in range `[-scale, scale)`. Default is .05. dtype: The data type. Only floating point types are supported. Default is float32. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`. """ shape = shape if isinstance(shape, (tuple, list)) else [shape] x = tf.get_variable(name, shape, dtype=dtype, initializer=tf.random_uniform_initializer(minval=-scale, maxval=scale), regularizer=regularizer, trainable=trainable) # add summary if summary: tf.sg_summary_param(x) return x
r"""Creates a tensor variable of which initial values are random numbers based on uniform distribution. Note that the default value of `scale` (=0.05) is different from the min/max values (=0.0, 1.0) of tf.random_uniform_initializer. Args: name: The name of the new variable. shape: A tuple/list of integers or an integer. If shape is an integer, it's converted to a list. scale: A Python scalar. All initial values should be in range `[-scale, scale)`. Default is .05. dtype: The data type. Only floating point types are supported. Default is float32. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`.
Below is the the instruction that describes the task: ### Input: r"""Creates a tensor variable of which initial values are random numbers based on uniform distribution. Note that the default value of `scale` (=0.05) is different from the min/max values (=0.0, 1.0) of tf.random_uniform_initializer. Args: name: The name of the new variable. shape: A tuple/list of integers or an integer. If shape is an integer, it's converted to a list. scale: A Python scalar. All initial values should be in range `[-scale, scale)`. Default is .05. dtype: The data type. Only floating point types are supported. Default is float32. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`. ### Response: def uniform(name, shape, scale=0.05, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True): r"""Creates a tensor variable of which initial values are random numbers based on uniform distribution. Note that the default value of `scale` (=0.05) is different from the min/max values (=0.0, 1.0) of tf.random_uniform_initializer. Args: name: The name of the new variable. shape: A tuple/list of integers or an integer. If shape is an integer, it's converted to a list. scale: A Python scalar. All initial values should be in range `[-scale, scale)`. Default is .05. dtype: The data type. Only floating point types are supported. Default is float32. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`. """ shape = shape if isinstance(shape, (tuple, list)) else [shape] x = tf.get_variable(name, shape, dtype=dtype, initializer=tf.random_uniform_initializer(minval=-scale, maxval=scale), regularizer=regularizer, trainable=trainable) # add summary if summary: tf.sg_summary_param(x) return x
def fast_gradient_method(model_fn, x, eps, ord, clip_min=None, clip_max=None, y=None, targeted=False, sanity_checks=False): """ PyTorch implementation of the Fast Gradient Method. :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572. :param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param clip_min: (optional) float. Minimum float value for adversarial example components. :param clip_max: (optional) float. Maximum float value for adversarial example components. :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example """ if ord not in [np.inf, 1, 2]: raise ValueError("Norm order must be either np.inf, 1, or 2.") asserts = [] # If a data range was specified, check that the input was in that range if clip_min is not None: assert_ge = torch.all(torch.ge(x, torch.tensor(clip_min, device=x.device, dtype=x.dtype))) asserts.append(assert_ge) if clip_max is not None: assert_le = torch.all(torch.le(x, torch.tensor(clip_max, device=x.device, dtype=x.dtype))) asserts.append(assert_le) # x needs to be a leaf variable, of floating point type and have requires_grad being True for # its grad to be computed and stored properly in a backward call x = x.clone().detach().to(torch.float).requires_grad_(True) if y is None: # Using model predictions as ground truth to avoid label leaking _, y = torch.max(model_fn(x), 1) # Compute loss loss_fn = torch.nn.CrossEntropyLoss() loss = loss_fn(model_fn(x), y) # If attack is targeted, minimize loss of target label rather than maximize loss of correct label if targeted: loss = -loss # Define gradient of loss wrt input loss.backward() optimal_perturbation = optimize_linear(x.grad, eps, ord) # Add perturbation to original example to obtain adversarial example adv_x = x + optimal_perturbation # If clipping is needed, reset all values outside of [clip_min, clip_max] if (clip_min is not None) or (clip_max is not None): # We don't currently support one-sided clipping assert clip_min is not None and clip_max is not None adv_x = torch.clamp(adv_x, clip_min, clip_max) if sanity_checks: assert np.all(asserts) return adv_x
PyTorch implementation of the Fast Gradient Method. :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572. :param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param clip_min: (optional) float. Minimum float value for adversarial example components. :param clip_max: (optional) float. Maximum float value for adversarial example components. :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example
Below is the the instruction that describes the task: ### Input: PyTorch implementation of the Fast Gradient Method. :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572. :param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param clip_min: (optional) float. Minimum float value for adversarial example components. :param clip_max: (optional) float. Maximum float value for adversarial example components. :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example ### Response: def fast_gradient_method(model_fn, x, eps, ord, clip_min=None, clip_max=None, y=None, targeted=False, sanity_checks=False): """ PyTorch implementation of the Fast Gradient Method. :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572. :param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param clip_min: (optional) float. Minimum float value for adversarial example components. :param clip_max: (optional) float. Maximum float value for adversarial example components. :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example """ if ord not in [np.inf, 1, 2]: raise ValueError("Norm order must be either np.inf, 1, or 2.") asserts = [] # If a data range was specified, check that the input was in that range if clip_min is not None: assert_ge = torch.all(torch.ge(x, torch.tensor(clip_min, device=x.device, dtype=x.dtype))) asserts.append(assert_ge) if clip_max is not None: assert_le = torch.all(torch.le(x, torch.tensor(clip_max, device=x.device, dtype=x.dtype))) asserts.append(assert_le) # x needs to be a leaf variable, of floating point type and have requires_grad being True for # its grad to be computed and stored properly in a backward call x = x.clone().detach().to(torch.float).requires_grad_(True) if y is None: # Using model predictions as ground truth to avoid label leaking _, y = torch.max(model_fn(x), 1) # Compute loss loss_fn = torch.nn.CrossEntropyLoss() loss = loss_fn(model_fn(x), y) # If attack is targeted, minimize loss of target label rather than maximize loss of correct label if targeted: loss = -loss # Define gradient of loss wrt input loss.backward() optimal_perturbation = optimize_linear(x.grad, eps, ord) # Add perturbation to original example to obtain adversarial example adv_x = x + optimal_perturbation # If clipping is needed, reset all values outside of [clip_min, clip_max] if (clip_min is not None) or (clip_max is not None): # We don't currently support one-sided clipping assert clip_min is not None and clip_max is not None adv_x = torch.clamp(adv_x, clip_min, clip_max) if sanity_checks: assert np.all(asserts) return adv_x
def describe_list_indices(full_list): """ Parameters ---------- full_list : list The list of items to order and Returns ------- unique_elements : list A list of the unique elements of the list, in the order in which they first appear. element_indices : dict A dictionary of lists for each unique element, giving all the indices in which they appear in the original list. """ unique_elements = [] element_indices = {} for i in range(len(full_list)): item = full_list[i] # new item if item not in unique_elements: unique_elements.append(item) element_indices[item] = [i] # previously seen item else: element_indices[item].append(i) return unique_elements, element_indices
Parameters ---------- full_list : list The list of items to order and Returns ------- unique_elements : list A list of the unique elements of the list, in the order in which they first appear. element_indices : dict A dictionary of lists for each unique element, giving all the indices in which they appear in the original list.
Below is the the instruction that describes the task: ### Input: Parameters ---------- full_list : list The list of items to order and Returns ------- unique_elements : list A list of the unique elements of the list, in the order in which they first appear. element_indices : dict A dictionary of lists for each unique element, giving all the indices in which they appear in the original list. ### Response: def describe_list_indices(full_list): """ Parameters ---------- full_list : list The list of items to order and Returns ------- unique_elements : list A list of the unique elements of the list, in the order in which they first appear. element_indices : dict A dictionary of lists for each unique element, giving all the indices in which they appear in the original list. """ unique_elements = [] element_indices = {} for i in range(len(full_list)): item = full_list[i] # new item if item not in unique_elements: unique_elements.append(item) element_indices[item] = [i] # previously seen item else: element_indices[item].append(i) return unique_elements, element_indices
def sectionWalker(section,callback,*args,walkTrace=tuple(),**kwargs): """ callback needs to be a function that handles different Section elements appropriately walkTrace needs to be a tuple, indicate the route to the section, e.g. (1,2,0) """ callback(section,*args,walkTrace=walkTrace,case='sectionmain',**kwargs) c = count(1) for f in section.figs.items(): callback(section,*args,walkTrace=walkTrace,case='figure',element=f,**kwargs) c = count(1) for t in section.tabs.items(): callback(section,*args,walkTrace=walkTrace,case='table',element=t,**kwargs) c = count(1) for s in section.subs: Section.sectionWalker(s,callback,*args,walkTrace=walkTrace+(next(c),),**kwargs)
callback needs to be a function that handles different Section elements appropriately walkTrace needs to be a tuple, indicate the route to the section, e.g. (1,2,0)
Below is the the instruction that describes the task: ### Input: callback needs to be a function that handles different Section elements appropriately walkTrace needs to be a tuple, indicate the route to the section, e.g. (1,2,0) ### Response: def sectionWalker(section,callback,*args,walkTrace=tuple(),**kwargs): """ callback needs to be a function that handles different Section elements appropriately walkTrace needs to be a tuple, indicate the route to the section, e.g. (1,2,0) """ callback(section,*args,walkTrace=walkTrace,case='sectionmain',**kwargs) c = count(1) for f in section.figs.items(): callback(section,*args,walkTrace=walkTrace,case='figure',element=f,**kwargs) c = count(1) for t in section.tabs.items(): callback(section,*args,walkTrace=walkTrace,case='table',element=t,**kwargs) c = count(1) for s in section.subs: Section.sectionWalker(s,callback,*args,walkTrace=walkTrace+(next(c),),**kwargs)
def from_json(cls, json): """Inherit doc.""" key_range_iter_cls = _KEY_RANGE_ITERATORS[json["key_range_iter_cls"]] obj = cls(key_ranges.KeyRangesFactory.from_json(json["key_ranges"]), model.QuerySpec.from_json(json["query_spec"]), key_range_iter_cls) current_iter = None if json["current_iter"]: current_iter = key_range_iter_cls.from_json(json["current_iter"]) # pylint: disable=protected-access obj._current_iter = current_iter return obj
Inherit doc.
Below is the the instruction that describes the task: ### Input: Inherit doc. ### Response: def from_json(cls, json): """Inherit doc.""" key_range_iter_cls = _KEY_RANGE_ITERATORS[json["key_range_iter_cls"]] obj = cls(key_ranges.KeyRangesFactory.from_json(json["key_ranges"]), model.QuerySpec.from_json(json["query_spec"]), key_range_iter_cls) current_iter = None if json["current_iter"]: current_iter = key_range_iter_cls.from_json(json["current_iter"]) # pylint: disable=protected-access obj._current_iter = current_iter return obj
def start(self): """Start discovering and listing to connections.""" if self._state == CLOSED: raise NSQException('producer already closed') if self.is_running: self.logger.warn('producer already started') return self.logger.debug('starting producer...') self._state = RUNNING for address in self.nsqd_tcp_addresses: address, port = address.split(':') self.connect_to_nsqd(address, int(port))
Start discovering and listing to connections.
Below is the the instruction that describes the task: ### Input: Start discovering and listing to connections. ### Response: def start(self): """Start discovering and listing to connections.""" if self._state == CLOSED: raise NSQException('producer already closed') if self.is_running: self.logger.warn('producer already started') return self.logger.debug('starting producer...') self._state = RUNNING for address in self.nsqd_tcp_addresses: address, port = address.split(':') self.connect_to_nsqd(address, int(port))
def chshell(name, shell): ''' Change the default shell of the user CLI Example: .. code-block:: bash salt '*' user.chshell foo /bin/zsh ''' pre_info = info(name) if not pre_info: raise CommandExecutionError('User \'{0}\' does not exist'.format(name)) if shell == pre_info['shell']: return True _dscl( ['/Users/{0}'.format(name), 'UserShell', pre_info['shell'], shell], ctype='change' ) # dscl buffers changes, sleep 1 second before checking if new value # matches desired value time.sleep(1) return info(name).get('shell') == shell
Change the default shell of the user CLI Example: .. code-block:: bash salt '*' user.chshell foo /bin/zsh
Below is the the instruction that describes the task: ### Input: Change the default shell of the user CLI Example: .. code-block:: bash salt '*' user.chshell foo /bin/zsh ### Response: def chshell(name, shell): ''' Change the default shell of the user CLI Example: .. code-block:: bash salt '*' user.chshell foo /bin/zsh ''' pre_info = info(name) if not pre_info: raise CommandExecutionError('User \'{0}\' does not exist'.format(name)) if shell == pre_info['shell']: return True _dscl( ['/Users/{0}'.format(name), 'UserShell', pre_info['shell'], shell], ctype='change' ) # dscl buffers changes, sleep 1 second before checking if new value # matches desired value time.sleep(1) return info(name).get('shell') == shell
def particle_number_concentration(ConcMat, material): """Return the number of particles in suspension. :param ConcMat: Concentration of the material :type ConcMat: float :param material: The material in solution :type material: floc_model.Material """ return ConcMat.to(material.Density.units) / ((material.Density * np.pi * material.Diameter**3) / 6)
Return the number of particles in suspension. :param ConcMat: Concentration of the material :type ConcMat: float :param material: The material in solution :type material: floc_model.Material
Below is the the instruction that describes the task: ### Input: Return the number of particles in suspension. :param ConcMat: Concentration of the material :type ConcMat: float :param material: The material in solution :type material: floc_model.Material ### Response: def particle_number_concentration(ConcMat, material): """Return the number of particles in suspension. :param ConcMat: Concentration of the material :type ConcMat: float :param material: The material in solution :type material: floc_model.Material """ return ConcMat.to(material.Density.units) / ((material.Density * np.pi * material.Diameter**3) / 6)
def poplist(self, key, default=_absent): """ If <key> is in the dictionary, pop it and return its list of values. If <key> is not in the dictionary, return <default>. KeyError is raised if <default> is not provided and <key> is not in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.poplist(1) == [1, 11, 111] omd.allitems() == [(2,2), (3,3)] omd.poplist(2) == [2] omd.allitems() == [(3,3)] Raises: KeyError if <key> isn't in the dictionary and <default> isn't provided. Returns: List of <key>'s values. """ if key in self: values = self.getlist(key) del self._map[key] for node, nodekey, nodevalue in self._items: if nodekey == key: self._items.removenode(node) return values elif key not in self._map and default is not _absent: return default raise KeyError(key)
If <key> is in the dictionary, pop it and return its list of values. If <key> is not in the dictionary, return <default>. KeyError is raised if <default> is not provided and <key> is not in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.poplist(1) == [1, 11, 111] omd.allitems() == [(2,2), (3,3)] omd.poplist(2) == [2] omd.allitems() == [(3,3)] Raises: KeyError if <key> isn't in the dictionary and <default> isn't provided. Returns: List of <key>'s values.
Below is the the instruction that describes the task: ### Input: If <key> is in the dictionary, pop it and return its list of values. If <key> is not in the dictionary, return <default>. KeyError is raised if <default> is not provided and <key> is not in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.poplist(1) == [1, 11, 111] omd.allitems() == [(2,2), (3,3)] omd.poplist(2) == [2] omd.allitems() == [(3,3)] Raises: KeyError if <key> isn't in the dictionary and <default> isn't provided. Returns: List of <key>'s values. ### Response: def poplist(self, key, default=_absent): """ If <key> is in the dictionary, pop it and return its list of values. If <key> is not in the dictionary, return <default>. KeyError is raised if <default> is not provided and <key> is not in the dictionary. Example: omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)]) omd.poplist(1) == [1, 11, 111] omd.allitems() == [(2,2), (3,3)] omd.poplist(2) == [2] omd.allitems() == [(3,3)] Raises: KeyError if <key> isn't in the dictionary and <default> isn't provided. Returns: List of <key>'s values. """ if key in self: values = self.getlist(key) del self._map[key] for node, nodekey, nodevalue in self._items: if nodekey == key: self._items.removenode(node) return values elif key not in self._map and default is not _absent: return default raise KeyError(key)
def train(self, traindata: np.ndarray) -> None: """ Trains on dataset """ self.clf.fit(traindata[:, 1:5], traindata[:, 5])
Trains on dataset
Below is the the instruction that describes the task: ### Input: Trains on dataset ### Response: def train(self, traindata: np.ndarray) -> None: """ Trains on dataset """ self.clf.fit(traindata[:, 1:5], traindata[:, 5])
def _http_request(self, method, url_path, headers=None, query_params=None, body_params=None, files=None, **kwargs): """ Method to do http requests. :param method: :param url_path: :param headers: :param body_params: :param query_params: :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')`` or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value according to the rendered body. By default: True. :return: """ host = kwargs.get('host', self.host) proxy = kwargs.get('proxy', self.proxy) renderer = kwargs.get('renderer', MultiPartRenderer() if files else self.default_renderer) prefix_url_path = kwargs.get('prefix_url_path', self.prefix_url_path) authentication_instances = kwargs.get('authentication_instances', self.authentication_instances) url_path_format = kwargs.get('url_path_format', self.url_path_format) update_content_type = kwargs.get('update_content_type', True) redirect = kwargs.get('redirect', False) if headers is None: headers = self.default_headers() context = HttpRequestContext( host=host, proxy=proxy, method=method, prefix_url_path=prefix_url_path, url_path=url_path, url_path_params=self.url_path_params, url_path_format=url_path_format, headers=headers, query_params=query_params, body_params=body_params, files=files, renderer=renderer, response_class=self.response_class, authentication_instances=authentication_instances, update_content_type=update_content_type, redirect=redirect ) res = self.http_request_from_context(context) self.cookie.update(res.cookie) return res
Method to do http requests. :param method: :param url_path: :param headers: :param body_params: :param query_params: :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')`` or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value according to the rendered body. By default: True. :return:
Below is the the instruction that describes the task: ### Input: Method to do http requests. :param method: :param url_path: :param headers: :param body_params: :param query_params: :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')`` or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value according to the rendered body. By default: True. :return: ### Response: def _http_request(self, method, url_path, headers=None, query_params=None, body_params=None, files=None, **kwargs): """ Method to do http requests. :param method: :param url_path: :param headers: :param body_params: :param query_params: :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 1-tuple ``('filepath')``, 2-tuple ``('filepath', 'content_type')`` or a 3-tuple ``('filepath', 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param update_content_type: (bool) Update headers before performig the request, adding the Content-Type value according to the rendered body. By default: True. :return: """ host = kwargs.get('host', self.host) proxy = kwargs.get('proxy', self.proxy) renderer = kwargs.get('renderer', MultiPartRenderer() if files else self.default_renderer) prefix_url_path = kwargs.get('prefix_url_path', self.prefix_url_path) authentication_instances = kwargs.get('authentication_instances', self.authentication_instances) url_path_format = kwargs.get('url_path_format', self.url_path_format) update_content_type = kwargs.get('update_content_type', True) redirect = kwargs.get('redirect', False) if headers is None: headers = self.default_headers() context = HttpRequestContext( host=host, proxy=proxy, method=method, prefix_url_path=prefix_url_path, url_path=url_path, url_path_params=self.url_path_params, url_path_format=url_path_format, headers=headers, query_params=query_params, body_params=body_params, files=files, renderer=renderer, response_class=self.response_class, authentication_instances=authentication_instances, update_content_type=update_content_type, redirect=redirect ) res = self.http_request_from_context(context) self.cookie.update(res.cookie) return res
def _escape_parameters(self, char): """ Parse parameters in an escape sequence. Parameters are a list of numbers in ascii (e.g. '12', '4', '42', etc) separated by a semicolon (e.g. "12;4;42"). See the [vt102 user guide](http://vt100.net/docs/vt102-ug/) for more details on the formatting of escape parameters. """ if char == ";": self.params.append(int(self.current_param)) self.current_param = "" elif char == "?": self.state = "mode" elif not char.isdigit(): if len(self.current_param) > 0: self.params.append(int(self.current_param)) # If we're in parameter parsing mode, but we see a non-numeric # value, it must be the end of the control sequence. self._end_escape_sequence(char) else: self.current_param += char
Parse parameters in an escape sequence. Parameters are a list of numbers in ascii (e.g. '12', '4', '42', etc) separated by a semicolon (e.g. "12;4;42"). See the [vt102 user guide](http://vt100.net/docs/vt102-ug/) for more details on the formatting of escape parameters.
Below is the the instruction that describes the task: ### Input: Parse parameters in an escape sequence. Parameters are a list of numbers in ascii (e.g. '12', '4', '42', etc) separated by a semicolon (e.g. "12;4;42"). See the [vt102 user guide](http://vt100.net/docs/vt102-ug/) for more details on the formatting of escape parameters. ### Response: def _escape_parameters(self, char): """ Parse parameters in an escape sequence. Parameters are a list of numbers in ascii (e.g. '12', '4', '42', etc) separated by a semicolon (e.g. "12;4;42"). See the [vt102 user guide](http://vt100.net/docs/vt102-ug/) for more details on the formatting of escape parameters. """ if char == ";": self.params.append(int(self.current_param)) self.current_param = "" elif char == "?": self.state = "mode" elif not char.isdigit(): if len(self.current_param) > 0: self.params.append(int(self.current_param)) # If we're in parameter parsing mode, but we see a non-numeric # value, it must be the end of the control sequence. self._end_escape_sequence(char) else: self.current_param += char
def bind(self, model, *, skip_table_setup=False): """Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`. """ # Make sure we're looking at models validate_is_model(model) concrete = set(filter(lambda m: not m.Meta.abstract, walk_subclasses(model))) if not model.Meta.abstract: concrete.add(model) logger.debug("binding non-abstract models {}".format( sorted(c.__name__ for c in concrete) )) # create_table doesn't block until ACTIVE or validate. # It also doesn't throw when the table already exists, making it safe # to call multiple times for the same unbound model. if skip_table_setup: logger.info("skip_table_setup is True; not trying to create tables or validate models during bind") else: self.session.clear_cache() is_creating = {} for model in concrete: table_name = self._compute_table_name(model) before_create_table.send(self, engine=self, model=model) if not skip_table_setup: if table_name in is_creating: continue creating = self.session.create_table(table_name, model) is_creating[table_name] = creating for model in concrete: if not skip_table_setup: table_name = self._compute_table_name(model) if is_creating[table_name]: # polls until table is active self.session.describe_table(table_name) if model.Meta.ttl: self.session.enable_ttl(table_name, model) if model.Meta.backups and model.Meta.backups["enabled"]: self.session.enable_backups(table_name, model) self.session.validate_table(table_name, model) model_validated.send(self, engine=self, model=model) model_bound.send(self, engine=self, model=model) logger.info("successfully bound {} models to the engine".format(len(concrete)))
Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`.
Below is the the instruction that describes the task: ### Input: Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`. ### Response: def bind(self, model, *, skip_table_setup=False): """Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`. """ # Make sure we're looking at models validate_is_model(model) concrete = set(filter(lambda m: not m.Meta.abstract, walk_subclasses(model))) if not model.Meta.abstract: concrete.add(model) logger.debug("binding non-abstract models {}".format( sorted(c.__name__ for c in concrete) )) # create_table doesn't block until ACTIVE or validate. # It also doesn't throw when the table already exists, making it safe # to call multiple times for the same unbound model. if skip_table_setup: logger.info("skip_table_setup is True; not trying to create tables or validate models during bind") else: self.session.clear_cache() is_creating = {} for model in concrete: table_name = self._compute_table_name(model) before_create_table.send(self, engine=self, model=model) if not skip_table_setup: if table_name in is_creating: continue creating = self.session.create_table(table_name, model) is_creating[table_name] = creating for model in concrete: if not skip_table_setup: table_name = self._compute_table_name(model) if is_creating[table_name]: # polls until table is active self.session.describe_table(table_name) if model.Meta.ttl: self.session.enable_ttl(table_name, model) if model.Meta.backups and model.Meta.backups["enabled"]: self.session.enable_backups(table_name, model) self.session.validate_table(table_name, model) model_validated.send(self, engine=self, model=model) model_bound.send(self, engine=self, model=model) logger.info("successfully bound {} models to the engine".format(len(concrete)))
def convert_adatpers(adapter_fasta): """Generates an adapter file for FastQC from a fasta file. The provided adapters file is assumed to be a simple fasta file with the adapter's name as header and the corresponding sequence:: >TruSeq_Universal_Adapter AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT >TruSeq_Adapter_Index 1 GATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTCTGCTTG Parameters ---------- adapter_fasta : str Path to Fasta file with adapter sequences. Returns ------- adapter_out : str or None The path to the reformatted adapter file. Returns ``None`` if the adapters file does not exist or the path is incorrect. """ adapter_out = "fastqc_adapters.tab" logger.debug("Setting output adapters file to: {}".format(adapter_out)) try: with open(adapter_fasta) as fh, \ open(adapter_out, "w") as adap_fh: for line in fh: if line.startswith(">"): head = line[1:].strip() # Get the next line with the sequence string sequence = next(fh).strip() adap_fh.write("{}\\t{}\\n".format(head, sequence)) logger.info("Converted adapters file") return adapter_out # If an invalid adapters file is provided, return None. except FileNotFoundError: logger.warning("Could not find the provided adapters file: {}".format( adapter_fasta)) return
Generates an adapter file for FastQC from a fasta file. The provided adapters file is assumed to be a simple fasta file with the adapter's name as header and the corresponding sequence:: >TruSeq_Universal_Adapter AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT >TruSeq_Adapter_Index 1 GATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTCTGCTTG Parameters ---------- adapter_fasta : str Path to Fasta file with adapter sequences. Returns ------- adapter_out : str or None The path to the reformatted adapter file. Returns ``None`` if the adapters file does not exist or the path is incorrect.
Below is the the instruction that describes the task: ### Input: Generates an adapter file for FastQC from a fasta file. The provided adapters file is assumed to be a simple fasta file with the adapter's name as header and the corresponding sequence:: >TruSeq_Universal_Adapter AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT >TruSeq_Adapter_Index 1 GATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTCTGCTTG Parameters ---------- adapter_fasta : str Path to Fasta file with adapter sequences. Returns ------- adapter_out : str or None The path to the reformatted adapter file. Returns ``None`` if the adapters file does not exist or the path is incorrect. ### Response: def convert_adatpers(adapter_fasta): """Generates an adapter file for FastQC from a fasta file. The provided adapters file is assumed to be a simple fasta file with the adapter's name as header and the corresponding sequence:: >TruSeq_Universal_Adapter AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT >TruSeq_Adapter_Index 1 GATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTCTGCTTG Parameters ---------- adapter_fasta : str Path to Fasta file with adapter sequences. Returns ------- adapter_out : str or None The path to the reformatted adapter file. Returns ``None`` if the adapters file does not exist or the path is incorrect. """ adapter_out = "fastqc_adapters.tab" logger.debug("Setting output adapters file to: {}".format(adapter_out)) try: with open(adapter_fasta) as fh, \ open(adapter_out, "w") as adap_fh: for line in fh: if line.startswith(">"): head = line[1:].strip() # Get the next line with the sequence string sequence = next(fh).strip() adap_fh.write("{}\\t{}\\n".format(head, sequence)) logger.info("Converted adapters file") return adapter_out # If an invalid adapters file is provided, return None. except FileNotFoundError: logger.warning("Could not find the provided adapters file: {}".format( adapter_fasta)) return
def onlyOnce(fn): 'Set up FN to only run once within an interpreter instance' def wrap(*args, **kwargs): if hasattr(fn, 'called'): return fn.called = 1 return fn(*args, **kwargs) util.mergeFunctionMetadata(fn, wrap) return wrap
Set up FN to only run once within an interpreter instance
Below is the the instruction that describes the task: ### Input: Set up FN to only run once within an interpreter instance ### Response: def onlyOnce(fn): 'Set up FN to only run once within an interpreter instance' def wrap(*args, **kwargs): if hasattr(fn, 'called'): return fn.called = 1 return fn(*args, **kwargs) util.mergeFunctionMetadata(fn, wrap) return wrap
def insert(self, idx, w, comment=''): '''insert a waypoint''' if idx >= self.count(): self.add(w, comment) return if idx < 0: return w = copy.copy(w) if comment: w.comment = comment w.seq = idx self.wpoints.insert(idx, w) self.last_change = time.time() self.reindex()
insert a waypoint
Below is the the instruction that describes the task: ### Input: insert a waypoint ### Response: def insert(self, idx, w, comment=''): '''insert a waypoint''' if idx >= self.count(): self.add(w, comment) return if idx < 0: return w = copy.copy(w) if comment: w.comment = comment w.seq = idx self.wpoints.insert(idx, w) self.last_change = time.time() self.reindex()
def keplerian_sheared_field_locations(ax, kbos, date, ras, decs, names, elongation=False, plot=False): """ Shift fields from the discovery set to the requested date by the average motion of L7 kbos in the discovery field. :param ras: :param decs: :param plot: :param ax: :param kbos: precomputed at the discovery date for that block. e.g. Oct new moon for 13B :param date: :param names: :param elongation: """ seps = {'dra': 0., 'ddec': 0.} for kbo in kbos: ra = kbo.ra dec = kbo.dec kbo.compute(date) seps['dra'] += kbo.ra - ra seps['ddec'] += kbo.dec - dec seps['dra'] /= float(len(kbos)) seps['ddec'] /= float(len(kbos)) print date, seps, len(kbos) for idx in range(len(ras)): name = names[idx] ra = ras[idx] + seps['dra'] dec = decs[idx] + seps['ddec'] if plot: ax.add_artist(Rectangle(xy=(math.degrees(ra) - camera_dimen / 2.0, math.degrees(dec) - camera_dimen / 2.0), height=camera_dimen, width=camera_dimen, edgecolor='b', facecolor='b', lw=0.5, fill=True, alpha=0.2)) if elongation: # For each field centre, plot the elongation onto the field at that date. elong = field_elongation(ephem.degrees(ra), ephem.degrees(dec), date) ax.annotate(name, (math.degrees(ra) + camera_dimen / 2., math.degrees(dec)), size=3) ax.annotate("%0.1f" % elong, (math.degrees(ra) + camera_dimen / 4., math.degrees(dec) - camera_dimen / 4.), size=5) return ax
Shift fields from the discovery set to the requested date by the average motion of L7 kbos in the discovery field. :param ras: :param decs: :param plot: :param ax: :param kbos: precomputed at the discovery date for that block. e.g. Oct new moon for 13B :param date: :param names: :param elongation:
Below is the the instruction that describes the task: ### Input: Shift fields from the discovery set to the requested date by the average motion of L7 kbos in the discovery field. :param ras: :param decs: :param plot: :param ax: :param kbos: precomputed at the discovery date for that block. e.g. Oct new moon for 13B :param date: :param names: :param elongation: ### Response: def keplerian_sheared_field_locations(ax, kbos, date, ras, decs, names, elongation=False, plot=False): """ Shift fields from the discovery set to the requested date by the average motion of L7 kbos in the discovery field. :param ras: :param decs: :param plot: :param ax: :param kbos: precomputed at the discovery date for that block. e.g. Oct new moon for 13B :param date: :param names: :param elongation: """ seps = {'dra': 0., 'ddec': 0.} for kbo in kbos: ra = kbo.ra dec = kbo.dec kbo.compute(date) seps['dra'] += kbo.ra - ra seps['ddec'] += kbo.dec - dec seps['dra'] /= float(len(kbos)) seps['ddec'] /= float(len(kbos)) print date, seps, len(kbos) for idx in range(len(ras)): name = names[idx] ra = ras[idx] + seps['dra'] dec = decs[idx] + seps['ddec'] if plot: ax.add_artist(Rectangle(xy=(math.degrees(ra) - camera_dimen / 2.0, math.degrees(dec) - camera_dimen / 2.0), height=camera_dimen, width=camera_dimen, edgecolor='b', facecolor='b', lw=0.5, fill=True, alpha=0.2)) if elongation: # For each field centre, plot the elongation onto the field at that date. elong = field_elongation(ephem.degrees(ra), ephem.degrees(dec), date) ax.annotate(name, (math.degrees(ra) + camera_dimen / 2., math.degrees(dec)), size=3) ax.annotate("%0.1f" % elong, (math.degrees(ra) + camera_dimen / 4., math.degrees(dec) - camera_dimen / 4.), size=5) return ax
def iri_to_str(self, iri_: ShExDocParser.IriContext) -> str: """ iri: IRIREF | prefixedName """ if iri_.IRIREF(): return self.iriref_to_str(iri_.IRIREF()) else: return self.prefixedname_to_str(iri_.prefixedName())
iri: IRIREF | prefixedName
Below is the the instruction that describes the task: ### Input: iri: IRIREF | prefixedName ### Response: def iri_to_str(self, iri_: ShExDocParser.IriContext) -> str: """ iri: IRIREF | prefixedName """ if iri_.IRIREF(): return self.iriref_to_str(iri_.IRIREF()) else: return self.prefixedname_to_str(iri_.prefixedName())
def partition_found(partition, description): """ returns True, if the partition (--partition) is in the description we received from the host """ # if we want to have a linux partition (/) we use the full path (startswith "/" would result in / /var /dev etc). # if we start with something else, we use the startswith function if "/" in partition: use_fullcompare = True else: use_fullcompare = False if use_fullcompare and (partition == description): return True elif not use_fullcompare and description.startswith(partition): return True else: return False
returns True, if the partition (--partition) is in the description we received from the host
Below is the the instruction that describes the task: ### Input: returns True, if the partition (--partition) is in the description we received from the host ### Response: def partition_found(partition, description): """ returns True, if the partition (--partition) is in the description we received from the host """ # if we want to have a linux partition (/) we use the full path (startswith "/" would result in / /var /dev etc). # if we start with something else, we use the startswith function if "/" in partition: use_fullcompare = True else: use_fullcompare = False if use_fullcompare and (partition == description): return True elif not use_fullcompare and description.startswith(partition): return True else: return False
def post_attachment(self, bugid, attachment): '''http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#create-attachment''' assert type(attachment) is DotDict assert 'data' in attachment assert 'file_name' in attachment assert 'summary' in attachment if (not 'content_type' in attachment): attachment.content_type = 'text/plain' attachment.ids = bugid attachment.data = base64.standard_b64encode(bytearray(attachment.data, 'ascii')).decode('ascii') return self._post('bug/{bugid}/attachment'.format(bugid=bugid), json.dumps(attachment))
http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#create-attachment
Below is the the instruction that describes the task: ### Input: http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#create-attachment ### Response: def post_attachment(self, bugid, attachment): '''http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#create-attachment''' assert type(attachment) is DotDict assert 'data' in attachment assert 'file_name' in attachment assert 'summary' in attachment if (not 'content_type' in attachment): attachment.content_type = 'text/plain' attachment.ids = bugid attachment.data = base64.standard_b64encode(bytearray(attachment.data, 'ascii')).decode('ascii') return self._post('bug/{bugid}/attachment'.format(bugid=bugid), json.dumps(attachment))
def save_filelist(self, opFile, opFormat, delim=',', qu='"'): """ uses a List of files and collects meta data on them and saves to an text file as a list or with metadata depending on opFormat. """ op_folder = os.path.dirname(opFile) if op_folder is not None: # short filename passed if not os.path.exists(op_folder): os.makedirs(op_folder) with open(opFile,'w') as fout: fout.write("fullFilename" + delim) for colHeading in opFormat: fout.write(colHeading + delim) fout.write('\n') for f in self.filelist: line = qu + f + qu + delim try: for fld in opFormat: if fld == "name": line = line + qu + os.path.basename(f) + qu + delim if fld == "date": line = line + qu + self.GetDateAsString(f) + qu + delim if fld == "size": line = line + qu + str(os.path.getsize(f)) + qu + delim if fld == "path": line = line + qu + os.path.dirname(f) + qu + delim except IOError: line += '\n' # no metadata try: fout.write (str(line.encode('ascii', 'ignore').decode('utf-8'))) fout.write ('\n') except IOError: #print("Cant print line - cls_filelist line 304") pass
uses a List of files and collects meta data on them and saves to an text file as a list or with metadata depending on opFormat.
Below is the the instruction that describes the task: ### Input: uses a List of files and collects meta data on them and saves to an text file as a list or with metadata depending on opFormat. ### Response: def save_filelist(self, opFile, opFormat, delim=',', qu='"'): """ uses a List of files and collects meta data on them and saves to an text file as a list or with metadata depending on opFormat. """ op_folder = os.path.dirname(opFile) if op_folder is not None: # short filename passed if not os.path.exists(op_folder): os.makedirs(op_folder) with open(opFile,'w') as fout: fout.write("fullFilename" + delim) for colHeading in opFormat: fout.write(colHeading + delim) fout.write('\n') for f in self.filelist: line = qu + f + qu + delim try: for fld in opFormat: if fld == "name": line = line + qu + os.path.basename(f) + qu + delim if fld == "date": line = line + qu + self.GetDateAsString(f) + qu + delim if fld == "size": line = line + qu + str(os.path.getsize(f)) + qu + delim if fld == "path": line = line + qu + os.path.dirname(f) + qu + delim except IOError: line += '\n' # no metadata try: fout.write (str(line.encode('ascii', 'ignore').decode('utf-8'))) fout.write ('\n') except IOError: #print("Cant print line - cls_filelist line 304") pass
def update(self, outcome, expparams, check_for_resample=True): """ Given an experiment and an outcome of that experiment, updates the posterior distribution to reflect knowledge of that experiment. After updating, resamples the posterior distribution if necessary. :param int outcome: Label for the outcome that was observed, as defined by the :class:`~qinfer.abstract_model.Model` instance under study. :param expparams: Parameters describing the experiment that was performed. :type expparams: :class:`~numpy.ndarray` of dtype given by the :attr:`~qinfer.abstract_model.Model.expparams_dtype` property of the underlying model :param bool check_for_resample: If :obj:`True`, after performing the update, the effective sample size condition will be checked and a resampling step may be performed. """ # First, record the outcome. # TODO: record the experiment as well. self._data_record.append(outcome) self._just_resampled = False # Perform the update. weights, norm = self.hypothetical_update(outcome, expparams, return_normalization=True) # Check for negative weights before applying the update. if not np.all(weights >= 0): warnings.warn("Negative weights occured in particle approximation. Smallest weight observed == {}. Clipping weights.".format(np.min(weights)), ApproximationWarning) np.clip(weights, 0, 1, out=weights) # Next, check if we have caused the weights to go to zero, as can # happen if the likelihood is identically zero for all particles, # or if the previous clip step choked on a NaN. if np.sum(weights) <= self._zero_weight_thresh: if self._zero_weight_policy == 'ignore': pass elif self._zero_weight_policy == 'skip': return elif self._zero_weight_policy == 'warn': warnings.warn("All particle weights are zero. This will very likely fail quite badly.", ApproximationWarning) elif self._zero_weight_policy == 'error': raise RuntimeError("All particle weights are zero.") elif self._zero_weight_policy == 'reset': warnings.warn("All particle weights are zero. Resetting from initial prior.", ApproximationWarning) self.reset() else: raise ValueError("Invalid zero-weight policy {} encountered.".format(self._zero_weight_policy)) # Since hypothetical_update returns an array indexed by # [outcome, experiment, particle], we need to strip off those two # indices first. self.particle_weights[:] = weights[0,0,:] # Record the normalization self._normalization_record.append(norm[0][0]) # Update the particle locations according to the model's timestep. self.particle_locations = self.model.update_timestep( self.particle_locations, expparams )[:, :, 0] # Check if we need to update our min_n_ess attribute. if self.n_ess <= self._min_n_ess: self._min_n_ess = self.n_ess # Resample if needed. if check_for_resample: self._maybe_resample()
Given an experiment and an outcome of that experiment, updates the posterior distribution to reflect knowledge of that experiment. After updating, resamples the posterior distribution if necessary. :param int outcome: Label for the outcome that was observed, as defined by the :class:`~qinfer.abstract_model.Model` instance under study. :param expparams: Parameters describing the experiment that was performed. :type expparams: :class:`~numpy.ndarray` of dtype given by the :attr:`~qinfer.abstract_model.Model.expparams_dtype` property of the underlying model :param bool check_for_resample: If :obj:`True`, after performing the update, the effective sample size condition will be checked and a resampling step may be performed.
Below is the the instruction that describes the task: ### Input: Given an experiment and an outcome of that experiment, updates the posterior distribution to reflect knowledge of that experiment. After updating, resamples the posterior distribution if necessary. :param int outcome: Label for the outcome that was observed, as defined by the :class:`~qinfer.abstract_model.Model` instance under study. :param expparams: Parameters describing the experiment that was performed. :type expparams: :class:`~numpy.ndarray` of dtype given by the :attr:`~qinfer.abstract_model.Model.expparams_dtype` property of the underlying model :param bool check_for_resample: If :obj:`True`, after performing the update, the effective sample size condition will be checked and a resampling step may be performed. ### Response: def update(self, outcome, expparams, check_for_resample=True): """ Given an experiment and an outcome of that experiment, updates the posterior distribution to reflect knowledge of that experiment. After updating, resamples the posterior distribution if necessary. :param int outcome: Label for the outcome that was observed, as defined by the :class:`~qinfer.abstract_model.Model` instance under study. :param expparams: Parameters describing the experiment that was performed. :type expparams: :class:`~numpy.ndarray` of dtype given by the :attr:`~qinfer.abstract_model.Model.expparams_dtype` property of the underlying model :param bool check_for_resample: If :obj:`True`, after performing the update, the effective sample size condition will be checked and a resampling step may be performed. """ # First, record the outcome. # TODO: record the experiment as well. self._data_record.append(outcome) self._just_resampled = False # Perform the update. weights, norm = self.hypothetical_update(outcome, expparams, return_normalization=True) # Check for negative weights before applying the update. if not np.all(weights >= 0): warnings.warn("Negative weights occured in particle approximation. Smallest weight observed == {}. Clipping weights.".format(np.min(weights)), ApproximationWarning) np.clip(weights, 0, 1, out=weights) # Next, check if we have caused the weights to go to zero, as can # happen if the likelihood is identically zero for all particles, # or if the previous clip step choked on a NaN. if np.sum(weights) <= self._zero_weight_thresh: if self._zero_weight_policy == 'ignore': pass elif self._zero_weight_policy == 'skip': return elif self._zero_weight_policy == 'warn': warnings.warn("All particle weights are zero. This will very likely fail quite badly.", ApproximationWarning) elif self._zero_weight_policy == 'error': raise RuntimeError("All particle weights are zero.") elif self._zero_weight_policy == 'reset': warnings.warn("All particle weights are zero. Resetting from initial prior.", ApproximationWarning) self.reset() else: raise ValueError("Invalid zero-weight policy {} encountered.".format(self._zero_weight_policy)) # Since hypothetical_update returns an array indexed by # [outcome, experiment, particle], we need to strip off those two # indices first. self.particle_weights[:] = weights[0,0,:] # Record the normalization self._normalization_record.append(norm[0][0]) # Update the particle locations according to the model's timestep. self.particle_locations = self.model.update_timestep( self.particle_locations, expparams )[:, :, 0] # Check if we need to update our min_n_ess attribute. if self.n_ess <= self._min_n_ess: self._min_n_ess = self.n_ess # Resample if needed. if check_for_resample: self._maybe_resample()
def get_cached_translated_field(instance, field_name, language_code=None, use_fallback=False): """ Fetch an cached field. """ if language_code is None: language_code = instance.get_current_language() # In django-parler 1.1 the order of the arguments was fixed, It used to be language_code, field_name # This serves as detection against backwards incompatibility issues. if len(field_name) <= 5 and len(language_code) > 5: raise RuntimeError("Unexpected language code, did you swap field_name, language_code?") translated_model = instance._parler_meta.get_model_by_field(field_name) values = _get_cached_values(instance, translated_model, language_code, use_fallback) if not values: return None # Allow older cached versions where the field didn't exist yet. return values.get(field_name, None)
Fetch an cached field.
Below is the the instruction that describes the task: ### Input: Fetch an cached field. ### Response: def get_cached_translated_field(instance, field_name, language_code=None, use_fallback=False): """ Fetch an cached field. """ if language_code is None: language_code = instance.get_current_language() # In django-parler 1.1 the order of the arguments was fixed, It used to be language_code, field_name # This serves as detection against backwards incompatibility issues. if len(field_name) <= 5 and len(language_code) > 5: raise RuntimeError("Unexpected language code, did you swap field_name, language_code?") translated_model = instance._parler_meta.get_model_by_field(field_name) values = _get_cached_values(instance, translated_model, language_code, use_fallback) if not values: return None # Allow older cached versions where the field didn't exist yet. return values.get(field_name, None)
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates the predictions for the masked LM objective.""" cand_indexes = [] for (i, token) in enumerate(tokens): if token in ['[CLS]', '[SEP]']: continue cand_indexes.append(i) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) masked_lms = [] covered_indexes = set() for index in cand_indexes: if len(masked_lms) >= num_to_predict: break if index in covered_indexes: continue covered_indexes.add(index) masked_token = None # 80% of the time, replace with [MASK] if rng.random() < 0.8: masked_token = '[MASK]' else: # 10% of the time, keep original if rng.random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) masked_lms = sorted(masked_lms, key=lambda x: x.index) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels)
Creates the predictions for the masked LM objective.
Below is the the instruction that describes the task: ### Input: Creates the predictions for the masked LM objective. ### Response: def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): """Creates the predictions for the masked LM objective.""" cand_indexes = [] for (i, token) in enumerate(tokens): if token in ['[CLS]', '[SEP]']: continue cand_indexes.append(i) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) masked_lms = [] covered_indexes = set() for index in cand_indexes: if len(masked_lms) >= num_to_predict: break if index in covered_indexes: continue covered_indexes.add(index) masked_token = None # 80% of the time, replace with [MASK] if rng.random() < 0.8: masked_token = '[MASK]' else: # 10% of the time, keep original if rng.random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) masked_lms = sorted(masked_lms, key=lambda x: x.index) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels)
def bulk_upsert(self, conflict_target: List, rows: List[Dict], index_predicate: str=None): """Creates a set of new records or updates the existing ones with the specified data. Arguments: conflict_target: Fields to pass into the ON CONFLICT clause. index_predicate: The index predicate to satisfy an arbiter partial index. rows: Rows to upsert. """ return self.get_queryset().bulk_upsert(conflict_target, rows, index_predicate)
Creates a set of new records or updates the existing ones with the specified data. Arguments: conflict_target: Fields to pass into the ON CONFLICT clause. index_predicate: The index predicate to satisfy an arbiter partial index. rows: Rows to upsert.
Below is the the instruction that describes the task: ### Input: Creates a set of new records or updates the existing ones with the specified data. Arguments: conflict_target: Fields to pass into the ON CONFLICT clause. index_predicate: The index predicate to satisfy an arbiter partial index. rows: Rows to upsert. ### Response: def bulk_upsert(self, conflict_target: List, rows: List[Dict], index_predicate: str=None): """Creates a set of new records or updates the existing ones with the specified data. Arguments: conflict_target: Fields to pass into the ON CONFLICT clause. index_predicate: The index predicate to satisfy an arbiter partial index. rows: Rows to upsert. """ return self.get_queryset().bulk_upsert(conflict_target, rows, index_predicate)
def remove_hook(self, repo_id, name): """Remove repository hook.""" ghrepo = self.api.repository_with_id(repo_id) if ghrepo: hooks = (h for h in ghrepo.hooks() if h.config.get('url', '') == self.webhook_url) hook = next(hooks, None) if not hook or hook.delete(): Repository.disable(user_id=self.user_id, github_id=repo_id, name=name) return True return False
Remove repository hook.
Below is the the instruction that describes the task: ### Input: Remove repository hook. ### Response: def remove_hook(self, repo_id, name): """Remove repository hook.""" ghrepo = self.api.repository_with_id(repo_id) if ghrepo: hooks = (h for h in ghrepo.hooks() if h.config.get('url', '') == self.webhook_url) hook = next(hooks, None) if not hook or hook.delete(): Repository.disable(user_id=self.user_id, github_id=repo_id, name=name) return True return False
def refresh(self, module=None): """Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used. """ module = module._mdl if module is not None else ffi.NULL lib.EnvRefreshAgenda(self._env, module)
Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used.
Below is the the instruction that describes the task: ### Input: Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used. ### Response: def refresh(self, module=None): """Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used. """ module = module._mdl if module is not None else ffi.NULL lib.EnvRefreshAgenda(self._env, module)
def from_dict(cls, data): """Create a new Measurement subclass instance using the given dict. If Measurement.name_from_class was previously called with this data's associated Measurement sub-class in Python, the returned object will be an instance of that sub-class. If the measurement name in ``data`` is unrecognized, the returned object will be of the generic ``Measurement`` type. Args: data (dict): the data for the new measurement, including at least a name and value. """ args = [] if 'id' in data and 'data' in data: measurement_class = CanMessage args.append("Bus %s: 0x%x" % (data.get('bus', '?'), data['id'])) args.append(data['data']) # TODO grab bus else: measurement_class = cls._class_from_name(data['name']) if measurement_class == Measurement: args.append(data['name']) args.append(data['value']) return measurement_class(*args, event=data.get('event', None), override_unit=True)
Create a new Measurement subclass instance using the given dict. If Measurement.name_from_class was previously called with this data's associated Measurement sub-class in Python, the returned object will be an instance of that sub-class. If the measurement name in ``data`` is unrecognized, the returned object will be of the generic ``Measurement`` type. Args: data (dict): the data for the new measurement, including at least a name and value.
Below is the the instruction that describes the task: ### Input: Create a new Measurement subclass instance using the given dict. If Measurement.name_from_class was previously called with this data's associated Measurement sub-class in Python, the returned object will be an instance of that sub-class. If the measurement name in ``data`` is unrecognized, the returned object will be of the generic ``Measurement`` type. Args: data (dict): the data for the new measurement, including at least a name and value. ### Response: def from_dict(cls, data): """Create a new Measurement subclass instance using the given dict. If Measurement.name_from_class was previously called with this data's associated Measurement sub-class in Python, the returned object will be an instance of that sub-class. If the measurement name in ``data`` is unrecognized, the returned object will be of the generic ``Measurement`` type. Args: data (dict): the data for the new measurement, including at least a name and value. """ args = [] if 'id' in data and 'data' in data: measurement_class = CanMessage args.append("Bus %s: 0x%x" % (data.get('bus', '?'), data['id'])) args.append(data['data']) # TODO grab bus else: measurement_class = cls._class_from_name(data['name']) if measurement_class == Measurement: args.append(data['name']) args.append(data['value']) return measurement_class(*args, event=data.get('event', None), override_unit=True)
def create_css(self, rules): """ Generate the final CSS string """ style = rules[0].legacy_compiler_options.get( 'style', self.compiler.output_style) debug_info = self.compiler.generate_source_map if style == 'legacy': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '', '\n', '\n', '\n', debug_info elif style == 'compressed': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = False, '', '', False, '', '', '', '', False elif style == 'compact': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', '', False, '\n', ' ', '\n', ' ', debug_info elif style == 'expanded': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '\n', '\n', '\n', '\n', debug_info else: # if style == 'nested': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', True, '\n', '\n', '\n', ' ', debug_info return self._create_css(rules, sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg)
Generate the final CSS string
Below is the the instruction that describes the task: ### Input: Generate the final CSS string ### Response: def create_css(self, rules): """ Generate the final CSS string """ style = rules[0].legacy_compiler_options.get( 'style', self.compiler.output_style) debug_info = self.compiler.generate_source_map if style == 'legacy': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '', '\n', '\n', '\n', debug_info elif style == 'compressed': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = False, '', '', False, '', '', '', '', False elif style == 'compact': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', '', False, '\n', ' ', '\n', ' ', debug_info elif style == 'expanded': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '\n', '\n', '\n', '\n', debug_info else: # if style == 'nested': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', True, '\n', '\n', '\n', ' ', debug_info return self._create_css(rules, sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg)
def _read_single(parser, filepath): """Reads a single config file into the parser, silently failing if the file does not exist. Args: parser (ConfigParser): parser to read the file into. filepath (str): full path to the config file. """ from os import path global packages if path.isfile(filepath): parser.readfp(open(filepath))
Reads a single config file into the parser, silently failing if the file does not exist. Args: parser (ConfigParser): parser to read the file into. filepath (str): full path to the config file.
Below is the the instruction that describes the task: ### Input: Reads a single config file into the parser, silently failing if the file does not exist. Args: parser (ConfigParser): parser to read the file into. filepath (str): full path to the config file. ### Response: def _read_single(parser, filepath): """Reads a single config file into the parser, silently failing if the file does not exist. Args: parser (ConfigParser): parser to read the file into. filepath (str): full path to the config file. """ from os import path global packages if path.isfile(filepath): parser.readfp(open(filepath))
def residuals(self, pars, x, y, order): """ Residual of Fourier Series. Parameters ---------- pars : array_like Fourier series parameters. x : array_like An array of date. y : array_like An array of true values to fit. order : int An order of Fourier Series. """ return y - self.fourier_series(pars, x, order)
Residual of Fourier Series. Parameters ---------- pars : array_like Fourier series parameters. x : array_like An array of date. y : array_like An array of true values to fit. order : int An order of Fourier Series.
Below is the the instruction that describes the task: ### Input: Residual of Fourier Series. Parameters ---------- pars : array_like Fourier series parameters. x : array_like An array of date. y : array_like An array of true values to fit. order : int An order of Fourier Series. ### Response: def residuals(self, pars, x, y, order): """ Residual of Fourier Series. Parameters ---------- pars : array_like Fourier series parameters. x : array_like An array of date. y : array_like An array of true values to fit. order : int An order of Fourier Series. """ return y - self.fourier_series(pars, x, order)
def ___replace_adjective_maybe(sentence, counts): """Lets find and replace all instances of #ADJECTIVE_MAYBE :param _sentence: :param counts: """ random_decision = random.randint(0, 1) if sentence is not None: while sentence.find('#ADJECTIVE_MAYBE') != -1: if random_decision % 2 == 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', ' ' + str(__get_adjective(counts)), 1) elif random_decision % 2 != 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1) if sentence.find('#ADJECTIVE_MAYBE') == -1: return sentence return sentence else: return sentence
Lets find and replace all instances of #ADJECTIVE_MAYBE :param _sentence: :param counts:
Below is the the instruction that describes the task: ### Input: Lets find and replace all instances of #ADJECTIVE_MAYBE :param _sentence: :param counts: ### Response: def ___replace_adjective_maybe(sentence, counts): """Lets find and replace all instances of #ADJECTIVE_MAYBE :param _sentence: :param counts: """ random_decision = random.randint(0, 1) if sentence is not None: while sentence.find('#ADJECTIVE_MAYBE') != -1: if random_decision % 2 == 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', ' ' + str(__get_adjective(counts)), 1) elif random_decision % 2 != 0: sentence = sentence.replace('#ADJECTIVE_MAYBE', '', 1) if sentence.find('#ADJECTIVE_MAYBE') == -1: return sentence return sentence else: return sentence
def checkASN(filename): """ Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value """ # Extract the file extn type: extnType = filename[filename.rfind('_')+1:filename.rfind('.')] # Determine if this extn name is valid for an assocation file if isValidAssocExtn(extnType): return True else: return False
Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value
Below is the the instruction that describes the task: ### Input: Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value ### Response: def checkASN(filename): """ Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value """ # Extract the file extn type: extnType = filename[filename.rfind('_')+1:filename.rfind('.')] # Determine if this extn name is valid for an assocation file if isValidAssocExtn(extnType): return True else: return False
def vertex_colors(self, values): """ Set the colors for each vertex of a mesh This will apply these colors and delete any previously specified color information. Parameters ------------ colors: (len(mesh.vertices), 3), set each face to the color (len(mesh.vertices), 4), set each face to the color (3,) int, set the whole mesh this color (4,) int, set the whole mesh this color """ if values is None: if 'vertex_colors' in self._data: self._data.data.pop('vertex_colors') return # make sure passed values are numpy array values = np.asanyarray(values) # Ensure the color shape is sane if (self.mesh is not None and not (values.shape == (len(self.mesh.vertices), 3) or values.shape == (len(self.mesh.vertices), 4) or values.shape == (3,) or values.shape == (4,))): return colors = to_rgba(values) if (self.mesh is not None and colors.shape == (4,)): count = len(self.mesh.vertices) colors = np.tile(colors, (count, 1)) # if we set any color information, clear the others self._data.clear() self._data['vertex_colors'] = colors self._cache.verify()
Set the colors for each vertex of a mesh This will apply these colors and delete any previously specified color information. Parameters ------------ colors: (len(mesh.vertices), 3), set each face to the color (len(mesh.vertices), 4), set each face to the color (3,) int, set the whole mesh this color (4,) int, set the whole mesh this color
Below is the the instruction that describes the task: ### Input: Set the colors for each vertex of a mesh This will apply these colors and delete any previously specified color information. Parameters ------------ colors: (len(mesh.vertices), 3), set each face to the color (len(mesh.vertices), 4), set each face to the color (3,) int, set the whole mesh this color (4,) int, set the whole mesh this color ### Response: def vertex_colors(self, values): """ Set the colors for each vertex of a mesh This will apply these colors and delete any previously specified color information. Parameters ------------ colors: (len(mesh.vertices), 3), set each face to the color (len(mesh.vertices), 4), set each face to the color (3,) int, set the whole mesh this color (4,) int, set the whole mesh this color """ if values is None: if 'vertex_colors' in self._data: self._data.data.pop('vertex_colors') return # make sure passed values are numpy array values = np.asanyarray(values) # Ensure the color shape is sane if (self.mesh is not None and not (values.shape == (len(self.mesh.vertices), 3) or values.shape == (len(self.mesh.vertices), 4) or values.shape == (3,) or values.shape == (4,))): return colors = to_rgba(values) if (self.mesh is not None and colors.shape == (4,)): count = len(self.mesh.vertices) colors = np.tile(colors, (count, 1)) # if we set any color information, clear the others self._data.clear() self._data['vertex_colors'] = colors self._cache.verify()
def load_resfile(directory, resfile_path=None): """ Return a list of tuples indicating the start and end points of the loops that were sampled in the given directory. """ if resfile_path is None: workspace = workspace_from_dir(directory) resfile_path = workspace.resfile_path from klab.rosetta.input_files import Resfile return Resfile(resfile_path)
Return a list of tuples indicating the start and end points of the loops that were sampled in the given directory.
Below is the the instruction that describes the task: ### Input: Return a list of tuples indicating the start and end points of the loops that were sampled in the given directory. ### Response: def load_resfile(directory, resfile_path=None): """ Return a list of tuples indicating the start and end points of the loops that were sampled in the given directory. """ if resfile_path is None: workspace = workspace_from_dir(directory) resfile_path = workspace.resfile_path from klab.rosetta.input_files import Resfile return Resfile(resfile_path)
def _get_by_id(self, style_id, style_type): """ Return the style of *style_type* matching *style_id*. Returns the default for *style_type* if *style_id* is not found or if the style having *style_id* is not of *style_type*. """ style = self._element.get_by_id(style_id) if style is None or style.type != style_type: return self.default(style_type) return StyleFactory(style)
Return the style of *style_type* matching *style_id*. Returns the default for *style_type* if *style_id* is not found or if the style having *style_id* is not of *style_type*.
Below is the the instruction that describes the task: ### Input: Return the style of *style_type* matching *style_id*. Returns the default for *style_type* if *style_id* is not found or if the style having *style_id* is not of *style_type*. ### Response: def _get_by_id(self, style_id, style_type): """ Return the style of *style_type* matching *style_id*. Returns the default for *style_type* if *style_id* is not found or if the style having *style_id* is not of *style_type*. """ style = self._element.get_by_id(style_id) if style is None or style.type != style_type: return self.default(style_type) return StyleFactory(style)
def writeout_cache(self, conn=None): """Write any entries in the cache to the database.""" if conn is None: conn = self.db with self.db_input_cache_lock: try: self._writeout_input_cache(conn) except sqlite3.IntegrityError: self.new_session(conn) print("ERROR! Session/line number was not unique in", "database. History logging moved to new session", self.session_number) try: # Try writing to the new session. If this fails, don't # recurse self._writeout_input_cache(conn) except sqlite3.IntegrityError: pass finally: self.db_input_cache = [] with self.db_output_cache_lock: try: self._writeout_output_cache(conn) except sqlite3.IntegrityError: print("!! Session/line number for output was not unique", "in database. Output will not be stored.") finally: self.db_output_cache = []
Write any entries in the cache to the database.
Below is the the instruction that describes the task: ### Input: Write any entries in the cache to the database. ### Response: def writeout_cache(self, conn=None): """Write any entries in the cache to the database.""" if conn is None: conn = self.db with self.db_input_cache_lock: try: self._writeout_input_cache(conn) except sqlite3.IntegrityError: self.new_session(conn) print("ERROR! Session/line number was not unique in", "database. History logging moved to new session", self.session_number) try: # Try writing to the new session. If this fails, don't # recurse self._writeout_input_cache(conn) except sqlite3.IntegrityError: pass finally: self.db_input_cache = [] with self.db_output_cache_lock: try: self._writeout_output_cache(conn) except sqlite3.IntegrityError: print("!! Session/line number for output was not unique", "in database. Output will not be stored.") finally: self.db_output_cache = []
def netconf_session_end_session_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_session_end = ET.SubElement(config, "netconf-session-end", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications") session_id = ET.SubElement(netconf_session_end, "session-id") session_id.text = kwargs.pop('session_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def netconf_session_end_session_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_session_end = ET.SubElement(config, "netconf-session-end", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications") session_id = ET.SubElement(netconf_session_end, "session-id") session_id.text = kwargs.pop('session_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def get_athlete_friends(self, athlete_id=None, limit=None): """ Gets friends for current (or specified) athlete. http://strava.github.io/api/v3/follow/#friends :param: athlete_id :type: athlete_id: int :param limit: Maximum number of athletes to return (default unlimited). :type limit: int :return: An iterator of :class:`stravalib.model.Athlete` objects. :rtype: :class:`BatchedResultsIterator` """ if athlete_id is None: result_fetcher = functools.partial(self.protocol.get, '/athlete/friends') else: raise NotImplementedError("The /athletes/{id}/friends endpoint was removed by Strava. " "See https://developers.strava.com/docs/january-2018-update/") # result_fetcher = functools.partial(self.protocol.get, # '/athletes/{id}/friends', # id=athlete_id) return BatchedResultsIterator(entity=model.Athlete, bind_client=self, result_fetcher=result_fetcher, limit=limit)
Gets friends for current (or specified) athlete. http://strava.github.io/api/v3/follow/#friends :param: athlete_id :type: athlete_id: int :param limit: Maximum number of athletes to return (default unlimited). :type limit: int :return: An iterator of :class:`stravalib.model.Athlete` objects. :rtype: :class:`BatchedResultsIterator`
Below is the the instruction that describes the task: ### Input: Gets friends for current (or specified) athlete. http://strava.github.io/api/v3/follow/#friends :param: athlete_id :type: athlete_id: int :param limit: Maximum number of athletes to return (default unlimited). :type limit: int :return: An iterator of :class:`stravalib.model.Athlete` objects. :rtype: :class:`BatchedResultsIterator` ### Response: def get_athlete_friends(self, athlete_id=None, limit=None): """ Gets friends for current (or specified) athlete. http://strava.github.io/api/v3/follow/#friends :param: athlete_id :type: athlete_id: int :param limit: Maximum number of athletes to return (default unlimited). :type limit: int :return: An iterator of :class:`stravalib.model.Athlete` objects. :rtype: :class:`BatchedResultsIterator` """ if athlete_id is None: result_fetcher = functools.partial(self.protocol.get, '/athlete/friends') else: raise NotImplementedError("The /athletes/{id}/friends endpoint was removed by Strava. " "See https://developers.strava.com/docs/january-2018-update/") # result_fetcher = functools.partial(self.protocol.get, # '/athletes/{id}/friends', # id=athlete_id) return BatchedResultsIterator(entity=model.Athlete, bind_client=self, result_fetcher=result_fetcher, limit=limit)
def basis_comparison_report(bs1, bs2, uncontract_general=False): ''' Compares two basis set dictionaries and prints a report about their differences ''' all_bs1 = list(bs1['elements'].keys()) if uncontract_general: bs1 = manip.uncontract_general(bs1) bs2 = manip.uncontract_general(bs2) not_in_bs1 = [] # Found in bs2, not in bs1 not_in_bs2 = all_bs1.copy() # Found in bs1, not in bs2 no_diff = [] # Elements for which there is no difference some_diff = [] # Elements that are different big_diff = [] # Elements that are substantially different for k, v in bs2['elements'].items(): if k not in all_bs1: not_in_bs1.append(k) continue print() print("-------------------------------------") print(" Element ", k) bs1_el = bs1['elements'][k] max_rdiff_el = 0.0 max_rdiff_ecp = 0.0 # Check to make sure that neither or both have ecp/electron shells if 'electron_shells' in v and 'electron_shells' not in bs1_el: print("bs2 has electron_shells, but bs1 does not") max_rdiff_el = float('inf') if 'electron_shells' in bs1_el and 'electron_shells' not in v: print("bs1 has electron_shells, but bs2 does not") max_rdiff_el = float('inf') if 'ecp_potentials' in v and 'ecp_potentials' not in bs1_el: print("bs2 has ecp_potentials, but bs1 does not") max_rdiff_ecp = float('inf') if 'ecp_potentials' in bs1_el and 'ecp_potentials' not in v: print("bs1 has ecp_potentials, but bs2 does not") max_rdiff_ecp = float('inf') if 'electron_shells' in v and 'electron_shells' in bs1_el: max_rdiff_el = max(max_rdiff_el, shells_difference(v['electron_shells'], bs1_el['electron_shells'])) if 'ecp_potentials' in v and 'ecp_potentials' in bs1_el: nel1 = v['ecp_electrons'] nel2 = bs1_el['ecp_electrons'] if int(nel1) != int(nel2): print('Different number of electrons replaced by ECP ({} vs {})'.format(nel1, nel2)) max_rdiff_ecp = float('inf') else: max_rdiff_ecp = max(max_rdiff_ecp, potentials_difference(v['ecp_potentials'], bs1_el['ecp_potentials'])) max_rdiff = max(max_rdiff_el, max_rdiff_ecp) # Handle some differences if max_rdiff == float('inf'): big_diff.append(k) elif max_rdiff == 0.0: no_diff.append(k) else: some_diff.append(k) not_in_bs2.remove(k) print() print(" Not in bs1: ", _print_list(not_in_bs1)) print(" Not in bs2: ", _print_list(not_in_bs2)) print(" No difference: ", _print_list(no_diff)) print("Some difference: ", _print_list(some_diff)) print(" BIG difference: ", _print_list(big_diff)) print() return (len(not_in_bs1) == 0 and len(not_in_bs2) == 0 and len(some_diff) == 0 and len(big_diff) == 0)
Compares two basis set dictionaries and prints a report about their differences
Below is the the instruction that describes the task: ### Input: Compares two basis set dictionaries and prints a report about their differences ### Response: def basis_comparison_report(bs1, bs2, uncontract_general=False): ''' Compares two basis set dictionaries and prints a report about their differences ''' all_bs1 = list(bs1['elements'].keys()) if uncontract_general: bs1 = manip.uncontract_general(bs1) bs2 = manip.uncontract_general(bs2) not_in_bs1 = [] # Found in bs2, not in bs1 not_in_bs2 = all_bs1.copy() # Found in bs1, not in bs2 no_diff = [] # Elements for which there is no difference some_diff = [] # Elements that are different big_diff = [] # Elements that are substantially different for k, v in bs2['elements'].items(): if k not in all_bs1: not_in_bs1.append(k) continue print() print("-------------------------------------") print(" Element ", k) bs1_el = bs1['elements'][k] max_rdiff_el = 0.0 max_rdiff_ecp = 0.0 # Check to make sure that neither or both have ecp/electron shells if 'electron_shells' in v and 'electron_shells' not in bs1_el: print("bs2 has electron_shells, but bs1 does not") max_rdiff_el = float('inf') if 'electron_shells' in bs1_el and 'electron_shells' not in v: print("bs1 has electron_shells, but bs2 does not") max_rdiff_el = float('inf') if 'ecp_potentials' in v and 'ecp_potentials' not in bs1_el: print("bs2 has ecp_potentials, but bs1 does not") max_rdiff_ecp = float('inf') if 'ecp_potentials' in bs1_el and 'ecp_potentials' not in v: print("bs1 has ecp_potentials, but bs2 does not") max_rdiff_ecp = float('inf') if 'electron_shells' in v and 'electron_shells' in bs1_el: max_rdiff_el = max(max_rdiff_el, shells_difference(v['electron_shells'], bs1_el['electron_shells'])) if 'ecp_potentials' in v and 'ecp_potentials' in bs1_el: nel1 = v['ecp_electrons'] nel2 = bs1_el['ecp_electrons'] if int(nel1) != int(nel2): print('Different number of electrons replaced by ECP ({} vs {})'.format(nel1, nel2)) max_rdiff_ecp = float('inf') else: max_rdiff_ecp = max(max_rdiff_ecp, potentials_difference(v['ecp_potentials'], bs1_el['ecp_potentials'])) max_rdiff = max(max_rdiff_el, max_rdiff_ecp) # Handle some differences if max_rdiff == float('inf'): big_diff.append(k) elif max_rdiff == 0.0: no_diff.append(k) else: some_diff.append(k) not_in_bs2.remove(k) print() print(" Not in bs1: ", _print_list(not_in_bs1)) print(" Not in bs2: ", _print_list(not_in_bs2)) print(" No difference: ", _print_list(no_diff)) print("Some difference: ", _print_list(some_diff)) print(" BIG difference: ", _print_list(big_diff)) print() return (len(not_in_bs1) == 0 and len(not_in_bs2) == 0 and len(some_diff) == 0 and len(big_diff) == 0)
def upload_attachments(self, attachments, parentid=None, basedir=None): """Upload files to the already created (but never uploaded) attachments""" return Zupload(self, attachments, parentid, basedir=basedir).upload()
Upload files to the already created (but never uploaded) attachments
Below is the the instruction that describes the task: ### Input: Upload files to the already created (but never uploaded) attachments ### Response: def upload_attachments(self, attachments, parentid=None, basedir=None): """Upload files to the already created (but never uploaded) attachments""" return Zupload(self, attachments, parentid, basedir=basedir).upload()
def scheduling_linear_ordering(J,p,d,w): """ scheduling_linear_ordering: model for the one machine total weighted tardiness problem Model for the one machine total weighted tardiness problem using the linear ordering formulation Parameters: - J: set of jobs - p[j]: processing time of job j - d[j]: latest non-tardy time for job j - w[j]: weighted of job j; the objective is the sum of the weighted completion time Returns a model, ready to be solved. """ model = Model("scheduling: linear ordering") T,x = {},{} # tardiness variable; x[j,k] =1 if job j precedes job k, =0 otherwise for j in J: T[j] = model.addVar(vtype="C", name="T(%s)"%(j)) for k in J: if j != k: x[j,k] = model.addVar(vtype="B", name="x(%s,%s)"%(j,k)) for j in J: model.addCons(quicksum(p[k]*x[k,j] for k in J if k != j) - T[j] <= d[j]-p[j], "Tardiness(%r)"%(j)) for k in J: if k <= j: continue model.addCons(x[j,k] + x[k,j] == 1, "Disjunctive(%s,%s)"%(j,k)) for ell in J: if ell > k: model.addCons(x[j,k] + x[k,ell] + x[ell,j] <= 2, "Triangle(%s,%s,%s)"%(j,k,ell)) model.setObjective(quicksum(w[j]*T[j] for j in J), "minimize") model.data = x,T return model
scheduling_linear_ordering: model for the one machine total weighted tardiness problem Model for the one machine total weighted tardiness problem using the linear ordering formulation Parameters: - J: set of jobs - p[j]: processing time of job j - d[j]: latest non-tardy time for job j - w[j]: weighted of job j; the objective is the sum of the weighted completion time Returns a model, ready to be solved.
Below is the the instruction that describes the task: ### Input: scheduling_linear_ordering: model for the one machine total weighted tardiness problem Model for the one machine total weighted tardiness problem using the linear ordering formulation Parameters: - J: set of jobs - p[j]: processing time of job j - d[j]: latest non-tardy time for job j - w[j]: weighted of job j; the objective is the sum of the weighted completion time Returns a model, ready to be solved. ### Response: def scheduling_linear_ordering(J,p,d,w): """ scheduling_linear_ordering: model for the one machine total weighted tardiness problem Model for the one machine total weighted tardiness problem using the linear ordering formulation Parameters: - J: set of jobs - p[j]: processing time of job j - d[j]: latest non-tardy time for job j - w[j]: weighted of job j; the objective is the sum of the weighted completion time Returns a model, ready to be solved. """ model = Model("scheduling: linear ordering") T,x = {},{} # tardiness variable; x[j,k] =1 if job j precedes job k, =0 otherwise for j in J: T[j] = model.addVar(vtype="C", name="T(%s)"%(j)) for k in J: if j != k: x[j,k] = model.addVar(vtype="B", name="x(%s,%s)"%(j,k)) for j in J: model.addCons(quicksum(p[k]*x[k,j] for k in J if k != j) - T[j] <= d[j]-p[j], "Tardiness(%r)"%(j)) for k in J: if k <= j: continue model.addCons(x[j,k] + x[k,j] == 1, "Disjunctive(%s,%s)"%(j,k)) for ell in J: if ell > k: model.addCons(x[j,k] + x[k,ell] + x[ell,j] <= 2, "Triangle(%s,%s,%s)"%(j,k,ell)) model.setObjective(quicksum(w[j]*T[j] for j in J), "minimize") model.data = x,T return model
def _after_flush_handler(session, _flush_context): """Archive all new/updated/deleted data""" dialect = get_dialect(session) handlers = [ (_versioned_delete, session.deleted), (_versioned_insert, session.new), (_versioned_update, session.dirty), ] for handler, rows in handlers: # TODO: Bulk archive insert statements for row in rows: if not isinstance(row, SavageModelMixin): continue if not hasattr(row, 'ArchiveTable'): raise LogTableCreationError('Need to register Savage tables!!') user_id = getattr(row, '_updated_by', None) handler(row, session, user_id, dialect)
Archive all new/updated/deleted data
Below is the the instruction that describes the task: ### Input: Archive all new/updated/deleted data ### Response: def _after_flush_handler(session, _flush_context): """Archive all new/updated/deleted data""" dialect = get_dialect(session) handlers = [ (_versioned_delete, session.deleted), (_versioned_insert, session.new), (_versioned_update, session.dirty), ] for handler, rows in handlers: # TODO: Bulk archive insert statements for row in rows: if not isinstance(row, SavageModelMixin): continue if not hasattr(row, 'ArchiveTable'): raise LogTableCreationError('Need to register Savage tables!!') user_id = getattr(row, '_updated_by', None) handler(row, session, user_id, dialect)
def pool_process(func, iterable, cpus=cpu_count(), return_vals=False, cpu_reduction=0, progress_bar=False): """ Multiprocessing helper function for performing looped operation using multiple processors. :param func: Function to call :param iterable: Iterable object to perform each function on :param cpus: Number of cpu cores, defaults to system's cpu count :param return_vals: Bool, returns output values when True :param cpu_reduction: Number of cpu core's to not use :param progress_bar: Display text based progress bar :return: """ with Pool(cpus - abs(cpu_reduction)) as pool: # Return values returned by 'func' if return_vals: # Show progress bar if progress_bar: vals = [v for v in tqdm(pool.imap_unordered(func, iterable), total=len(iterable))] # No progress bar else: vals = pool.map(func, iterable) # Close pool and return values pool.close() # pool.join() return vals # Don't capture values returned by 'func' else: pool.map(func, iterable) pool.close() return True
Multiprocessing helper function for performing looped operation using multiple processors. :param func: Function to call :param iterable: Iterable object to perform each function on :param cpus: Number of cpu cores, defaults to system's cpu count :param return_vals: Bool, returns output values when True :param cpu_reduction: Number of cpu core's to not use :param progress_bar: Display text based progress bar :return:
Below is the the instruction that describes the task: ### Input: Multiprocessing helper function for performing looped operation using multiple processors. :param func: Function to call :param iterable: Iterable object to perform each function on :param cpus: Number of cpu cores, defaults to system's cpu count :param return_vals: Bool, returns output values when True :param cpu_reduction: Number of cpu core's to not use :param progress_bar: Display text based progress bar :return: ### Response: def pool_process(func, iterable, cpus=cpu_count(), return_vals=False, cpu_reduction=0, progress_bar=False): """ Multiprocessing helper function for performing looped operation using multiple processors. :param func: Function to call :param iterable: Iterable object to perform each function on :param cpus: Number of cpu cores, defaults to system's cpu count :param return_vals: Bool, returns output values when True :param cpu_reduction: Number of cpu core's to not use :param progress_bar: Display text based progress bar :return: """ with Pool(cpus - abs(cpu_reduction)) as pool: # Return values returned by 'func' if return_vals: # Show progress bar if progress_bar: vals = [v for v in tqdm(pool.imap_unordered(func, iterable), total=len(iterable))] # No progress bar else: vals = pool.map(func, iterable) # Close pool and return values pool.close() # pool.join() return vals # Don't capture values returned by 'func' else: pool.map(func, iterable) pool.close() return True
def emitTriggered(self, action): """ Emits the triggered action for this widget. :param action | <QAction> """ self.currentActionChanged.emit(action) self.currentIndexChanged.emit(self.indexOf(action)) if not self.signalsBlocked(): self.triggered.emit(action)
Emits the triggered action for this widget. :param action | <QAction>
Below is the the instruction that describes the task: ### Input: Emits the triggered action for this widget. :param action | <QAction> ### Response: def emitTriggered(self, action): """ Emits the triggered action for this widget. :param action | <QAction> """ self.currentActionChanged.emit(action) self.currentIndexChanged.emit(self.indexOf(action)) if not self.signalsBlocked(): self.triggered.emit(action)
def hydrate_bundles(bundles_field, glob_match_error_behavior): """Given a BundlesField, request Snapshots for each of its filesets and create BundleAdaptors.""" path_globs_with_match_errors = [ pg.copy(glob_match_error_behavior=glob_match_error_behavior) for pg in bundles_field.path_globs_list ] snapshot_list = yield [Get(Snapshot, PathGlobs, pg) for pg in path_globs_with_match_errors] spec_path = bundles_field.address.spec_path bundles = [] zipped = zip(bundles_field.bundles, bundles_field.filespecs_list, snapshot_list) for bundle, filespecs, snapshot in zipped: rel_spec_path = getattr(bundle, 'rel_path', spec_path) kwargs = bundle.kwargs() # NB: We `include_dirs=True` because bundle filesets frequently specify directories in order # to trigger a (deprecated) default inclusion of their recursive contents. See the related # deprecation in `pants.backend.jvm.tasks.bundle_create`. kwargs['fileset'] = _eager_fileset_with_spec(rel_spec_path, filespecs, snapshot, include_dirs=True) bundles.append(BundleAdaptor(**kwargs)) yield HydratedField('bundles', bundles)
Given a BundlesField, request Snapshots for each of its filesets and create BundleAdaptors.
Below is the the instruction that describes the task: ### Input: Given a BundlesField, request Snapshots for each of its filesets and create BundleAdaptors. ### Response: def hydrate_bundles(bundles_field, glob_match_error_behavior): """Given a BundlesField, request Snapshots for each of its filesets and create BundleAdaptors.""" path_globs_with_match_errors = [ pg.copy(glob_match_error_behavior=glob_match_error_behavior) for pg in bundles_field.path_globs_list ] snapshot_list = yield [Get(Snapshot, PathGlobs, pg) for pg in path_globs_with_match_errors] spec_path = bundles_field.address.spec_path bundles = [] zipped = zip(bundles_field.bundles, bundles_field.filespecs_list, snapshot_list) for bundle, filespecs, snapshot in zipped: rel_spec_path = getattr(bundle, 'rel_path', spec_path) kwargs = bundle.kwargs() # NB: We `include_dirs=True` because bundle filesets frequently specify directories in order # to trigger a (deprecated) default inclusion of their recursive contents. See the related # deprecation in `pants.backend.jvm.tasks.bundle_create`. kwargs['fileset'] = _eager_fileset_with_spec(rel_spec_path, filespecs, snapshot, include_dirs=True) bundles.append(BundleAdaptor(**kwargs)) yield HydratedField('bundles', bundles)
def add_model(self, model, force=False): """ Add a model. The model will be asssigned to a class attribute with the YANG name of the model. Args: model (PybindBase): Model to add. force (bool): If not set, verify the model is in SUPPORTED_MODELS Examples: >>> import napalm_yang >>> config = napalm_yang.base.Root() >>> config.add_model(napalm_yang.models.openconfig_interfaces) >>> config.interfaces <pyangbind.lib.yangtypes.YANGBaseClass object at 0x10bef6680> """ if isinstance(model, str): self._load_model(model) return try: model = model() except Exception: pass if model._yang_name not in [a[0] for a in SUPPORTED_MODELS] and not force: raise ValueError( "Only models in SUPPORTED_MODELS can be added without `force=True`" ) for k, v in model: self._elements[k] = v setattr(self, k, v)
Add a model. The model will be asssigned to a class attribute with the YANG name of the model. Args: model (PybindBase): Model to add. force (bool): If not set, verify the model is in SUPPORTED_MODELS Examples: >>> import napalm_yang >>> config = napalm_yang.base.Root() >>> config.add_model(napalm_yang.models.openconfig_interfaces) >>> config.interfaces <pyangbind.lib.yangtypes.YANGBaseClass object at 0x10bef6680>
Below is the the instruction that describes the task: ### Input: Add a model. The model will be asssigned to a class attribute with the YANG name of the model. Args: model (PybindBase): Model to add. force (bool): If not set, verify the model is in SUPPORTED_MODELS Examples: >>> import napalm_yang >>> config = napalm_yang.base.Root() >>> config.add_model(napalm_yang.models.openconfig_interfaces) >>> config.interfaces <pyangbind.lib.yangtypes.YANGBaseClass object at 0x10bef6680> ### Response: def add_model(self, model, force=False): """ Add a model. The model will be asssigned to a class attribute with the YANG name of the model. Args: model (PybindBase): Model to add. force (bool): If not set, verify the model is in SUPPORTED_MODELS Examples: >>> import napalm_yang >>> config = napalm_yang.base.Root() >>> config.add_model(napalm_yang.models.openconfig_interfaces) >>> config.interfaces <pyangbind.lib.yangtypes.YANGBaseClass object at 0x10bef6680> """ if isinstance(model, str): self._load_model(model) return try: model = model() except Exception: pass if model._yang_name not in [a[0] for a in SUPPORTED_MODELS] and not force: raise ValueError( "Only models in SUPPORTED_MODELS can be added without `force=True`" ) for k, v in model: self._elements[k] = v setattr(self, k, v)
def sourceWatchdog(self): """Watchdog timer function. Recreates sources which have not generated events in 10*interval if they have watchdog set to true in their configuration """ for i, source in enumerate(self.sources): if not source.config.get('watchdog', False): continue sn = repr(source) last = self.lastEvents.get(source, None) if last: try: if last < (time.time()-(source.inter*10)): log.msg("Trying to restart stale source %s: %ss" % ( sn, int(time.time() - last) )) s = self.sources.pop(i) try: s.t.stop() except Exception as e: log.msg("Could not stop timer for %s: %s" % ( sn, e)) config = copy.deepcopy(s.config) del self.lastEvents[source] del s, source source = self.createSource(config) reactor.callLater(0, self._startSource, source) except Exception as e: log.msg("Could not reset source %s: %s" % ( sn, e))
Watchdog timer function. Recreates sources which have not generated events in 10*interval if they have watchdog set to true in their configuration
Below is the the instruction that describes the task: ### Input: Watchdog timer function. Recreates sources which have not generated events in 10*interval if they have watchdog set to true in their configuration ### Response: def sourceWatchdog(self): """Watchdog timer function. Recreates sources which have not generated events in 10*interval if they have watchdog set to true in their configuration """ for i, source in enumerate(self.sources): if not source.config.get('watchdog', False): continue sn = repr(source) last = self.lastEvents.get(source, None) if last: try: if last < (time.time()-(source.inter*10)): log.msg("Trying to restart stale source %s: %ss" % ( sn, int(time.time() - last) )) s = self.sources.pop(i) try: s.t.stop() except Exception as e: log.msg("Could not stop timer for %s: %s" % ( sn, e)) config = copy.deepcopy(s.config) del self.lastEvents[source] del s, source source = self.createSource(config) reactor.callLater(0, self._startSource, source) except Exception as e: log.msg("Could not reset source %s: %s" % ( sn, e))
def get_imr_length(approx, **kwds): """Call through to pnutils to obtain IMR waveform durations """ m1 = float(kwds['mass1']) m2 = float(kwds['mass2']) s1z = float(kwds['spin1z']) s2z = float(kwds['spin2z']) f_low = float(kwds['f_lower']) # 10% margin of error is incorporated in the pnutils function return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx)
Call through to pnutils to obtain IMR waveform durations
Below is the the instruction that describes the task: ### Input: Call through to pnutils to obtain IMR waveform durations ### Response: def get_imr_length(approx, **kwds): """Call through to pnutils to obtain IMR waveform durations """ m1 = float(kwds['mass1']) m2 = float(kwds['mass2']) s1z = float(kwds['spin1z']) s2z = float(kwds['spin2z']) f_low = float(kwds['f_lower']) # 10% margin of error is incorporated in the pnutils function return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx)