code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _data(self, copy=False): """ Get data kwargs of the container (i.e. dataframe and series objects). """ data = {} for key, obj in vars(self).items(): if isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)): if copy: data[key] = obj.copy(deep=True) else: data[key] = obj return data
Get data kwargs of the container (i.e. dataframe and series objects).
Below is the the instruction that describes the task: ### Input: Get data kwargs of the container (i.e. dataframe and series objects). ### Response: def _data(self, copy=False): """ Get data kwargs of the container (i.e. dataframe and series objects). """ data = {} for key, obj in vars(self).items(): if isinstance(obj, (pd.Series, pd.DataFrame, pd.SparseSeries, pd.SparseDataFrame)): if copy: data[key] = obj.copy(deep=True) else: data[key] = obj return data
def change_ssh_port(): """ For security woven changes the default ssh port. """ host = normalize(env.host_string)[1] after = env.port before = str(env.DEFAULT_SSH_PORT) host_string=join_host_strings(env.user,host,before) with settings(host_string=host_string, user=env.user): if env.verbosity: print env.host, "CHANGING SSH PORT TO: "+str(after) sed('/etc/ssh/sshd_config','Port '+ str(before),'Port '+str(after),use_sudo=True) if env.verbosity: print env.host, "RESTARTING SSH on",after sudo('/etc/init.d/ssh restart') return True
For security woven changes the default ssh port.
Below is the the instruction that describes the task: ### Input: For security woven changes the default ssh port. ### Response: def change_ssh_port(): """ For security woven changes the default ssh port. """ host = normalize(env.host_string)[1] after = env.port before = str(env.DEFAULT_SSH_PORT) host_string=join_host_strings(env.user,host,before) with settings(host_string=host_string, user=env.user): if env.verbosity: print env.host, "CHANGING SSH PORT TO: "+str(after) sed('/etc/ssh/sshd_config','Port '+ str(before),'Port '+str(after),use_sudo=True) if env.verbosity: print env.host, "RESTARTING SSH on",after sudo('/etc/init.d/ssh restart') return True
def start(self): """ Try to remove the old mock logs first. """ if self.resultdir: for lname in self.mock_logfiles: self.logfiles[lname] = self.build.path_module.join(self.resultdir, lname) else: for lname in self.mock_logfiles: self.logfiles[lname] = lname self.addLogObserver('state.log', MockStateObserver()) cmd = remotecommand.RemoteCommand('rmdir', {'dir': [self.build.path_module.join('build', self.logfiles[l]) for l in self.mock_logfiles]}) d = self.runCommand(cmd) # must resolve super() outside of the callback context. super_ = super() @d.addCallback def removeDone(cmd): super_.start() d.addErrback(self.failed)
Try to remove the old mock logs first.
Below is the the instruction that describes the task: ### Input: Try to remove the old mock logs first. ### Response: def start(self): """ Try to remove the old mock logs first. """ if self.resultdir: for lname in self.mock_logfiles: self.logfiles[lname] = self.build.path_module.join(self.resultdir, lname) else: for lname in self.mock_logfiles: self.logfiles[lname] = lname self.addLogObserver('state.log', MockStateObserver()) cmd = remotecommand.RemoteCommand('rmdir', {'dir': [self.build.path_module.join('build', self.logfiles[l]) for l in self.mock_logfiles]}) d = self.runCommand(cmd) # must resolve super() outside of the callback context. super_ = super() @d.addCallback def removeDone(cmd): super_.start() d.addErrback(self.failed)
def map_pol_to_crt(aryTht, aryRad): """Remap coordinates from polar to cartesian Parameters ---------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. Returns ------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. """ aryXCrds = aryRad * np.cos(aryTht) aryYrds = aryRad * np.sin(aryTht) return aryXCrds, aryYrds
Remap coordinates from polar to cartesian Parameters ---------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. Returns ------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values.
Below is the the instruction that describes the task: ### Input: Remap coordinates from polar to cartesian Parameters ---------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. Returns ------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. ### Response: def map_pol_to_crt(aryTht, aryRad): """Remap coordinates from polar to cartesian Parameters ---------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. Returns ------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. """ aryXCrds = aryRad * np.cos(aryTht) aryYrds = aryRad * np.sin(aryTht) return aryXCrds, aryYrds
def as_device(self): """Convert to a `Device` object. `node_type` must be `NodeType.DEVICE`. """ if self.node_type != NodeType.DEVICE: raise ValueError( 'Cannot convert to `Device`, node_type is not a device.') return self._origin.Device(self._data)
Convert to a `Device` object. `node_type` must be `NodeType.DEVICE`.
Below is the the instruction that describes the task: ### Input: Convert to a `Device` object. `node_type` must be `NodeType.DEVICE`. ### Response: def as_device(self): """Convert to a `Device` object. `node_type` must be `NodeType.DEVICE`. """ if self.node_type != NodeType.DEVICE: raise ValueError( 'Cannot convert to `Device`, node_type is not a device.') return self._origin.Device(self._data)
def get_password(self, host=None): """ If host=None, return the current server list (dict). Else, return the host's password (or the default one if defined or None) """ if host is None: return self._password_dict else: try: return self._password_dict[host] except (KeyError, TypeError): try: return self._password_dict['default'] except (KeyError, TypeError): return None
If host=None, return the current server list (dict). Else, return the host's password (or the default one if defined or None)
Below is the the instruction that describes the task: ### Input: If host=None, return the current server list (dict). Else, return the host's password (or the default one if defined or None) ### Response: def get_password(self, host=None): """ If host=None, return the current server list (dict). Else, return the host's password (or the default one if defined or None) """ if host is None: return self._password_dict else: try: return self._password_dict[host] except (KeyError, TypeError): try: return self._password_dict['default'] except (KeyError, TypeError): return None
def _sync_table(self, columns): """Lazy load, create or adapt the table structure in the database.""" if self._table is None: # Load an existing table from the database. self._reflect_table() if self._table is None: # Create the table with an initial set of columns. if not self._auto_create: raise DatasetException("Table does not exist: %s" % self.name) # Keep the lock scope small because this is run very often. with self.db.lock: self._threading_warn() self._table = SQLATable(self.name, self.db.metadata, schema=self.db.schema) if self._primary_id is not False: # This can go wrong on DBMS like MySQL and SQLite where # tables cannot have no columns. primary_id = self._primary_id or self.PRIMARY_DEFAULT primary_type = self._primary_type or Types.integer increment = primary_type in [Types.integer, Types.bigint] column = Column(primary_id, primary_type, primary_key=True, autoincrement=increment) self._table.append_column(column) for column in columns: if not column.name == self._primary_id: self._table.append_column(column) self._table.create(self.db.executable, checkfirst=True) elif len(columns): with self.db.lock: self._reflect_table() self._threading_warn() for column in columns: if not self.has_column(column.name): self.db.op.add_column(self.name, column, self.db.schema) self._reflect_table()
Lazy load, create or adapt the table structure in the database.
Below is the the instruction that describes the task: ### Input: Lazy load, create or adapt the table structure in the database. ### Response: def _sync_table(self, columns): """Lazy load, create or adapt the table structure in the database.""" if self._table is None: # Load an existing table from the database. self._reflect_table() if self._table is None: # Create the table with an initial set of columns. if not self._auto_create: raise DatasetException("Table does not exist: %s" % self.name) # Keep the lock scope small because this is run very often. with self.db.lock: self._threading_warn() self._table = SQLATable(self.name, self.db.metadata, schema=self.db.schema) if self._primary_id is not False: # This can go wrong on DBMS like MySQL and SQLite where # tables cannot have no columns. primary_id = self._primary_id or self.PRIMARY_DEFAULT primary_type = self._primary_type or Types.integer increment = primary_type in [Types.integer, Types.bigint] column = Column(primary_id, primary_type, primary_key=True, autoincrement=increment) self._table.append_column(column) for column in columns: if not column.name == self._primary_id: self._table.append_column(column) self._table.create(self.db.executable, checkfirst=True) elif len(columns): with self.db.lock: self._reflect_table() self._threading_warn() for column in columns: if not self.has_column(column.name): self.db.op.add_column(self.name, column, self.db.schema) self._reflect_table()
def extract_tags(cls, obj): """ Extract tags from the given object :param Any obj: Object to use as context :return: Tags to add on span :rtype: dict """ return dict( [("request.{}".format(attr), obj.get(attr, None)) for attr in cls.TAGS] )
Extract tags from the given object :param Any obj: Object to use as context :return: Tags to add on span :rtype: dict
Below is the the instruction that describes the task: ### Input: Extract tags from the given object :param Any obj: Object to use as context :return: Tags to add on span :rtype: dict ### Response: def extract_tags(cls, obj): """ Extract tags from the given object :param Any obj: Object to use as context :return: Tags to add on span :rtype: dict """ return dict( [("request.{}".format(attr), obj.get(attr, None)) for attr in cls.TAGS] )
def return_hdr(self): """Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- the time is probably in "local" Unix Time, which is in the local time zone, so we read it as "UTC" (meaning, do not apply timezone transformation) and then remove timezone info. The only doubt I have is how to interpret the "SystemOffset" time. I assume it's in s, and that would fix most of the time zone problems, but it does not take into account DST. Or maybe "SystemOffset" is in micros and we need to apply the correct time zone to TimeStamp Unix time. This needs to be tested with a Moberg system. """ subj_id = str() patient = parse(join(self.filename, 'patient.info')) for patientname in ['PatientFirstName', 'PatientLastName']: subj_id += patient.findall(patientname)[0].text.strip() unix_time = int(patient.findall('TimeStamp')[0].text.strip()) / 1e6 system_offset = int(patient.findall('SystemOffset')[0].text.strip()) start_time = (datetime.fromtimestamp(unix_time, TIMEZONE) + timedelta(seconds=system_offset)).replace(tzinfo=None) s_freq = 256 # could not find it in the text files montage = parse(join(self.filename, 'Montage.xml')) mont = montage.find('Montage') chan_name = [chan.get('lead') for chan in mont.findall('Channel') if chan.get('role') == 'REFERENTIAL_INPUT'] data_size = getsize(join(self.filename, EEG_FILE)) n_samples = int(data_size / DATA_PRECISION / len(chan_name)) self.n_smp = n_samples self.n_chan = len(chan_name) settings = parse(join(self.filename, SETTINGS_FILE)) conversion = settings.findall('SampleConversion')[0].text.strip() dig_min, dig_max, anl_min, anl_max = [int(x) for x in conversion.split(',')] if dig_max == -dig_min and anl_max == -anl_min: self.convertion = lambda dat: dat / dig_max * anl_max else: # pragma: no cover self.convertion = lambda dat: ((dat + dig_min) / (dig_max - dig_min) * (anl_max - anl_min) + anl_min) orig = {'patient': patient, 'montage': montage, 'settings': settings, } return subj_id, start_time, s_freq, chan_name, n_samples, orig
Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- the time is probably in "local" Unix Time, which is in the local time zone, so we read it as "UTC" (meaning, do not apply timezone transformation) and then remove timezone info. The only doubt I have is how to interpret the "SystemOffset" time. I assume it's in s, and that would fix most of the time zone problems, but it does not take into account DST. Or maybe "SystemOffset" is in micros and we need to apply the correct time zone to TimeStamp Unix time. This needs to be tested with a Moberg system.
Below is the the instruction that describes the task: ### Input: Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- the time is probably in "local" Unix Time, which is in the local time zone, so we read it as "UTC" (meaning, do not apply timezone transformation) and then remove timezone info. The only doubt I have is how to interpret the "SystemOffset" time. I assume it's in s, and that would fix most of the time zone problems, but it does not take into account DST. Or maybe "SystemOffset" is in micros and we need to apply the correct time zone to TimeStamp Unix time. This needs to be tested with a Moberg system. ### Response: def return_hdr(self): """Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header Notes ----- the time is probably in "local" Unix Time, which is in the local time zone, so we read it as "UTC" (meaning, do not apply timezone transformation) and then remove timezone info. The only doubt I have is how to interpret the "SystemOffset" time. I assume it's in s, and that would fix most of the time zone problems, but it does not take into account DST. Or maybe "SystemOffset" is in micros and we need to apply the correct time zone to TimeStamp Unix time. This needs to be tested with a Moberg system. """ subj_id = str() patient = parse(join(self.filename, 'patient.info')) for patientname in ['PatientFirstName', 'PatientLastName']: subj_id += patient.findall(patientname)[0].text.strip() unix_time = int(patient.findall('TimeStamp')[0].text.strip()) / 1e6 system_offset = int(patient.findall('SystemOffset')[0].text.strip()) start_time = (datetime.fromtimestamp(unix_time, TIMEZONE) + timedelta(seconds=system_offset)).replace(tzinfo=None) s_freq = 256 # could not find it in the text files montage = parse(join(self.filename, 'Montage.xml')) mont = montage.find('Montage') chan_name = [chan.get('lead') for chan in mont.findall('Channel') if chan.get('role') == 'REFERENTIAL_INPUT'] data_size = getsize(join(self.filename, EEG_FILE)) n_samples = int(data_size / DATA_PRECISION / len(chan_name)) self.n_smp = n_samples self.n_chan = len(chan_name) settings = parse(join(self.filename, SETTINGS_FILE)) conversion = settings.findall('SampleConversion')[0].text.strip() dig_min, dig_max, anl_min, anl_max = [int(x) for x in conversion.split(',')] if dig_max == -dig_min and anl_max == -anl_min: self.convertion = lambda dat: dat / dig_max * anl_max else: # pragma: no cover self.convertion = lambda dat: ((dat + dig_min) / (dig_max - dig_min) * (anl_max - anl_min) + anl_min) orig = {'patient': patient, 'montage': montage, 'settings': settings, } return subj_id, start_time, s_freq, chan_name, n_samples, orig
def _get_annotations(self, text, language=''): """Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input. """ body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': 'UTF32', } if language: body['document']['language'] = language request = self.service.documents().annotateText(body=body) response = request.execute() tokens = response.get('tokens', []) language = response.get('language') return {'tokens': tokens, 'language': language}
Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input.
Below is the the instruction that describes the task: ### Input: Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input. ### Response: def _get_annotations(self, text, language=''): """Returns the list of annotations retrieved from the given text. Args: text (str): Input text. language (:obj:`str`, optional): Language code. Returns: Results in a dictionary. :code:`tokens` contains the list of annotations and :code:`language` contains the inferred language from the input. """ body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': 'UTF32', } if language: body['document']['language'] = language request = self.service.documents().annotateText(body=body) response = request.execute() tokens = response.get('tokens', []) language = response.get('language') return {'tokens': tokens, 'language': language}
def serialize(self, value, **kwargs): """ pre-serialize value """ if self._serialize is not None: return self._serialize(value, **kwargs) else: return value
pre-serialize value
Below is the the instruction that describes the task: ### Input: pre-serialize value ### Response: def serialize(self, value, **kwargs): """ pre-serialize value """ if self._serialize is not None: return self._serialize(value, **kwargs) else: return value
def intersect(a, b): """ Check if two rectangles intersect """ if a[x0] == a[x1] or a[y0] == a[y1]: return False if b[x0] == b[x1] or b[y0] == b[y1]: return False return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
Check if two rectangles intersect
Below is the the instruction that describes the task: ### Input: Check if two rectangles intersect ### Response: def intersect(a, b): """ Check if two rectangles intersect """ if a[x0] == a[x1] or a[y0] == a[y1]: return False if b[x0] == b[x1] or b[y0] == b[y1]: return False return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
def variable_summaries(vars_, groups=None, scope='weights'): """Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. """ groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for var in vars_: for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(var) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] # pylint: disable=redefined-argument-from-local for name, vars_ in grouped.items(): vars_ = [tf.reshape(var, [-1]) for var in vars_] vars_ = tf.concat(vars_, 0) summaries.append(tf.summary.histogram(scope + '/' + name, vars_)) return tf.summary.merge(summaries)
Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor.
Below is the the instruction that describes the task: ### Input: Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. ### Response: def variable_summaries(vars_, groups=None, scope='weights'): """Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor. """ groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for var in vars_: for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(var) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] # pylint: disable=redefined-argument-from-local for name, vars_ in grouped.items(): vars_ = [tf.reshape(var, [-1]) for var in vars_] vars_ = tf.concat(vars_, 0) summaries.append(tf.summary.histogram(scope + '/' + name, vars_)) return tf.summary.merge(summaries)
def search(name): ''' Search for matches in the ports tree. Globs are supported, and the category is optional CLI Examples: .. code-block:: bash salt '*' ports.search 'security/*' salt '*' ports.search 'security/n*' salt '*' ports.search nmap .. warning:: Takes a while to run ''' name = six.text_type(name) all_ports = list_all() if '/' in name: if name.count('/') > 1: raise SaltInvocationError( 'Invalid search string \'{0}\'. Port names cannot have more ' 'than one slash' ) else: return fnmatch.filter(all_ports, name) else: ret = [] for port in all_ports: if fnmatch.fnmatch(port.rsplit('/')[-1], name): ret.append(port) return ret
Search for matches in the ports tree. Globs are supported, and the category is optional CLI Examples: .. code-block:: bash salt '*' ports.search 'security/*' salt '*' ports.search 'security/n*' salt '*' ports.search nmap .. warning:: Takes a while to run
Below is the the instruction that describes the task: ### Input: Search for matches in the ports tree. Globs are supported, and the category is optional CLI Examples: .. code-block:: bash salt '*' ports.search 'security/*' salt '*' ports.search 'security/n*' salt '*' ports.search nmap .. warning:: Takes a while to run ### Response: def search(name): ''' Search for matches in the ports tree. Globs are supported, and the category is optional CLI Examples: .. code-block:: bash salt '*' ports.search 'security/*' salt '*' ports.search 'security/n*' salt '*' ports.search nmap .. warning:: Takes a while to run ''' name = six.text_type(name) all_ports = list_all() if '/' in name: if name.count('/') > 1: raise SaltInvocationError( 'Invalid search string \'{0}\'. Port names cannot have more ' 'than one slash' ) else: return fnmatch.filter(all_ports, name) else: ret = [] for port in all_ports: if fnmatch.fnmatch(port.rsplit('/')[-1], name): ret.append(port) return ret
def do_cd(self, arg): """Imitates the bash shell 'cd' command.""" from os import chdir, path fullpath = path.abspath(path.expanduser(arg)) if path.isdir(fullpath): chdir(fullpath) else: msg.err("'{}' is not a valid directory.".format(arg))
Imitates the bash shell 'cd' command.
Below is the the instruction that describes the task: ### Input: Imitates the bash shell 'cd' command. ### Response: def do_cd(self, arg): """Imitates the bash shell 'cd' command.""" from os import chdir, path fullpath = path.abspath(path.expanduser(arg)) if path.isdir(fullpath): chdir(fullpath) else: msg.err("'{}' is not a valid directory.".format(arg))
def get_all_intervals(self): """Returns the true list of intervals including the empty intervals.""" ints = sorted(self.get_intervals(True)) if self.tier_type == 'IntervalTier': if not ints: ints.append((self.xmin, self.xmax, '')) else: if ints[0][0] > self.xmin: ints.insert(0, (self.xmin, ints[0][0], '')) if ints[-1][1] < self.xmax: ints.append((ints[-1][1], self.xmax, '')) p = ints[-1] for index, i in reversed(list(enumerate(ints[:-1], 1))): if p[0] - i[1] != 0: ints.insert(index, (i[1], p[0], '')) p = i return ints
Returns the true list of intervals including the empty intervals.
Below is the the instruction that describes the task: ### Input: Returns the true list of intervals including the empty intervals. ### Response: def get_all_intervals(self): """Returns the true list of intervals including the empty intervals.""" ints = sorted(self.get_intervals(True)) if self.tier_type == 'IntervalTier': if not ints: ints.append((self.xmin, self.xmax, '')) else: if ints[0][0] > self.xmin: ints.insert(0, (self.xmin, ints[0][0], '')) if ints[-1][1] < self.xmax: ints.append((ints[-1][1], self.xmax, '')) p = ints[-1] for index, i in reversed(list(enumerate(ints[:-1], 1))): if p[0] - i[1] != 0: ints.insert(index, (i[1], p[0], '')) p = i return ints
def restrictCatalogToObservableSpaceMMD(self, catalog): """ Retain only the catalog objects which fall within the observable (i.e., unmasked) space. Parameters: catalog: a Catalog object Returns: sel : boolean selection array where True means the object would be observable (i.e., unmasked). ADW: Careful, this function is fragile! The selection here should be the same as isochrone.observableFraction space. However, for technical reasons it is faster to do the calculation with broadcasting there. """ # ADW: This creates a slope in color-magnitude space near the magnitude limit # i.e., if color=g-r then you can't have an object with g-r=1 and mag_r > mask_r-1 # Depending on which is the detection band, this slope will appear at blue # or red colors. When it occurs at blue colors, it effects very few objects. # However, when occuring for red objects it can cut many objects. It is # unclear that this is being correctly accounted for in the likelihood catalog.spatialBin(self.roi) sel_roi = (catalog.pixel_roi_index >= 0) # Objects outside ROI have pixel_roi_index of -1 sel_mag_1 = catalog.mag_1 < self.mask_1.mask_roi_sparse[catalog.pixel_roi_index] sel_mag_2 = catalog.mag_2 < self.mask_2.mask_roi_sparse[catalog.pixel_roi_index] # and are located in the region of mag-mag space where background can be estimated sel_mmd = ugali.utils.binning.take2D(self.solid_angle_mmd, catalog.mag_2, catalog.mag_1, self.roi.bins_mag, self.roi.bins_mag) > 0. sel = np.all([sel_roi,sel_mag_1,sel_mag_2,sel_mmd], axis=0) return sel
Retain only the catalog objects which fall within the observable (i.e., unmasked) space. Parameters: catalog: a Catalog object Returns: sel : boolean selection array where True means the object would be observable (i.e., unmasked). ADW: Careful, this function is fragile! The selection here should be the same as isochrone.observableFraction space. However, for technical reasons it is faster to do the calculation with broadcasting there.
Below is the the instruction that describes the task: ### Input: Retain only the catalog objects which fall within the observable (i.e., unmasked) space. Parameters: catalog: a Catalog object Returns: sel : boolean selection array where True means the object would be observable (i.e., unmasked). ADW: Careful, this function is fragile! The selection here should be the same as isochrone.observableFraction space. However, for technical reasons it is faster to do the calculation with broadcasting there. ### Response: def restrictCatalogToObservableSpaceMMD(self, catalog): """ Retain only the catalog objects which fall within the observable (i.e., unmasked) space. Parameters: catalog: a Catalog object Returns: sel : boolean selection array where True means the object would be observable (i.e., unmasked). ADW: Careful, this function is fragile! The selection here should be the same as isochrone.observableFraction space. However, for technical reasons it is faster to do the calculation with broadcasting there. """ # ADW: This creates a slope in color-magnitude space near the magnitude limit # i.e., if color=g-r then you can't have an object with g-r=1 and mag_r > mask_r-1 # Depending on which is the detection band, this slope will appear at blue # or red colors. When it occurs at blue colors, it effects very few objects. # However, when occuring for red objects it can cut many objects. It is # unclear that this is being correctly accounted for in the likelihood catalog.spatialBin(self.roi) sel_roi = (catalog.pixel_roi_index >= 0) # Objects outside ROI have pixel_roi_index of -1 sel_mag_1 = catalog.mag_1 < self.mask_1.mask_roi_sparse[catalog.pixel_roi_index] sel_mag_2 = catalog.mag_2 < self.mask_2.mask_roi_sparse[catalog.pixel_roi_index] # and are located in the region of mag-mag space where background can be estimated sel_mmd = ugali.utils.binning.take2D(self.solid_angle_mmd, catalog.mag_2, catalog.mag_1, self.roi.bins_mag, self.roi.bins_mag) > 0. sel = np.all([sel_roi,sel_mag_1,sel_mag_2,sel_mmd], axis=0) return sel
def _urlendecode(url, func): """Encode or decode ``url`` by applying ``func`` to all of its URL-encodable parts. """ parsed = urlparse(url) for attr in URL_ENCODABLE_PARTS: parsed = parsed._replace(**{attr: func(getattr(parsed, attr))}) return urlunparse(parsed)
Encode or decode ``url`` by applying ``func`` to all of its URL-encodable parts.
Below is the the instruction that describes the task: ### Input: Encode or decode ``url`` by applying ``func`` to all of its URL-encodable parts. ### Response: def _urlendecode(url, func): """Encode or decode ``url`` by applying ``func`` to all of its URL-encodable parts. """ parsed = urlparse(url) for attr in URL_ENCODABLE_PARTS: parsed = parsed._replace(**{attr: func(getattr(parsed, attr))}) return urlunparse(parsed)
def get_timedelta_str(timedelta, exclude_zeros=False): """ get_timedelta_str Returns: str: timedelta_str, formated time string References: http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> timedelta = get_unix_timedelta(10) >>> timedelta_str = get_timedelta_str(timedelta) >>> result = (timedelta_str) >>> print(result) 10 seconds """ if timedelta == datetime.timedelta(0): return '0 seconds' days = timedelta.days hours, rem = divmod(timedelta.seconds, 3600) minutes, seconds = divmod(rem, 60) fmtstr_list = [] fmtdict = {} def append_cases(unit, fmtlbl): if not exclude_zeros or unit != 0: if unit == 1: fmtstr_list.append('{%s} %s' % (fmtlbl, fmtlbl)) else: fmtstr_list.append('{%s} %ss' % (fmtlbl, fmtlbl)) fmtdict[fmtlbl] = unit if abs(days) > 0: append_cases(days, 'day') if len(fmtstr_list) > 0 or abs(hours) > 0: append_cases(hours, 'hour') if len(fmtstr_list) > 0 or abs(minutes) > 0: append_cases(minutes, 'minute') if len(fmtstr_list) > 0 or abs(seconds) > 0: append_cases(seconds, 'second') fmtstr = ' '.join(fmtstr_list) timedelta_str = fmtstr.format(**fmtdict) return timedelta_str
get_timedelta_str Returns: str: timedelta_str, formated time string References: http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> timedelta = get_unix_timedelta(10) >>> timedelta_str = get_timedelta_str(timedelta) >>> result = (timedelta_str) >>> print(result) 10 seconds
Below is the the instruction that describes the task: ### Input: get_timedelta_str Returns: str: timedelta_str, formated time string References: http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> timedelta = get_unix_timedelta(10) >>> timedelta_str = get_timedelta_str(timedelta) >>> result = (timedelta_str) >>> print(result) 10 seconds ### Response: def get_timedelta_str(timedelta, exclude_zeros=False): """ get_timedelta_str Returns: str: timedelta_str, formated time string References: http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> timedelta = get_unix_timedelta(10) >>> timedelta_str = get_timedelta_str(timedelta) >>> result = (timedelta_str) >>> print(result) 10 seconds """ if timedelta == datetime.timedelta(0): return '0 seconds' days = timedelta.days hours, rem = divmod(timedelta.seconds, 3600) minutes, seconds = divmod(rem, 60) fmtstr_list = [] fmtdict = {} def append_cases(unit, fmtlbl): if not exclude_zeros or unit != 0: if unit == 1: fmtstr_list.append('{%s} %s' % (fmtlbl, fmtlbl)) else: fmtstr_list.append('{%s} %ss' % (fmtlbl, fmtlbl)) fmtdict[fmtlbl] = unit if abs(days) > 0: append_cases(days, 'day') if len(fmtstr_list) > 0 or abs(hours) > 0: append_cases(hours, 'hour') if len(fmtstr_list) > 0 or abs(minutes) > 0: append_cases(minutes, 'minute') if len(fmtstr_list) > 0 or abs(seconds) > 0: append_cases(seconds, 'second') fmtstr = ' '.join(fmtstr_list) timedelta_str = fmtstr.format(**fmtdict) return timedelta_str
def add_widgets(self,**kwargs): """ Called by the initializer to add all widgets. Widgets are discovered by searching through the :py:attr:`WIDGETS` class attribute. If a key in :py:attr:`WIDGETS` is also found in the keyword arguments and not none, the function with the name given in the value of the key will be called with its only argument being the value of the keyword argument. For more complex usage scenarios, it is also possible to override this method in a subclass, but the original method should always be called to ensure compatibility with classes relying on this feature. """ for name,fname in self.WIDGETS.items(): if name in kwargs and kwargs[name] is not None: assert hasattr(self,fname) assert callable(getattr(self,fname)) getattr(self,fname)(kwargs[name])
Called by the initializer to add all widgets. Widgets are discovered by searching through the :py:attr:`WIDGETS` class attribute. If a key in :py:attr:`WIDGETS` is also found in the keyword arguments and not none, the function with the name given in the value of the key will be called with its only argument being the value of the keyword argument. For more complex usage scenarios, it is also possible to override this method in a subclass, but the original method should always be called to ensure compatibility with classes relying on this feature.
Below is the the instruction that describes the task: ### Input: Called by the initializer to add all widgets. Widgets are discovered by searching through the :py:attr:`WIDGETS` class attribute. If a key in :py:attr:`WIDGETS` is also found in the keyword arguments and not none, the function with the name given in the value of the key will be called with its only argument being the value of the keyword argument. For more complex usage scenarios, it is also possible to override this method in a subclass, but the original method should always be called to ensure compatibility with classes relying on this feature. ### Response: def add_widgets(self,**kwargs): """ Called by the initializer to add all widgets. Widgets are discovered by searching through the :py:attr:`WIDGETS` class attribute. If a key in :py:attr:`WIDGETS` is also found in the keyword arguments and not none, the function with the name given in the value of the key will be called with its only argument being the value of the keyword argument. For more complex usage scenarios, it is also possible to override this method in a subclass, but the original method should always be called to ensure compatibility with classes relying on this feature. """ for name,fname in self.WIDGETS.items(): if name in kwargs and kwargs[name] is not None: assert hasattr(self,fname) assert callable(getattr(self,fname)) getattr(self,fname)(kwargs[name])
def info(self): '''Return a nested dictionary of information related to the actor status and performance. The dictionary contains the following entries: * ``actor`` a dictionary containing information regarding the type of actor and its status. * ``events`` a dictionary of information about the :ref:`event loop <asyncio-event-loop>` running the actor. * ``extra`` the :attr:`extra` attribute (you can use it to add stuff). * ``system`` system info. This method is invoked when you run the :ref:`info command <actor_info_command>` from another actor. ''' if not self.started(): return isp = self.is_process() actor = {'name': self.name, 'state': self.info_state, 'actor_id': self.aid, 'uptime': self._loop.time() - self._started, 'thread_id': self.tid, 'process_id': self.pid, 'is_process': isp, 'age': self.concurrency.age} data = {'actor': actor, 'extra': self.extra} if isp: data['system'] = system.process_info(self.pid) self.event('on_info').fire(data=data) return data
Return a nested dictionary of information related to the actor status and performance. The dictionary contains the following entries: * ``actor`` a dictionary containing information regarding the type of actor and its status. * ``events`` a dictionary of information about the :ref:`event loop <asyncio-event-loop>` running the actor. * ``extra`` the :attr:`extra` attribute (you can use it to add stuff). * ``system`` system info. This method is invoked when you run the :ref:`info command <actor_info_command>` from another actor.
Below is the the instruction that describes the task: ### Input: Return a nested dictionary of information related to the actor status and performance. The dictionary contains the following entries: * ``actor`` a dictionary containing information regarding the type of actor and its status. * ``events`` a dictionary of information about the :ref:`event loop <asyncio-event-loop>` running the actor. * ``extra`` the :attr:`extra` attribute (you can use it to add stuff). * ``system`` system info. This method is invoked when you run the :ref:`info command <actor_info_command>` from another actor. ### Response: def info(self): '''Return a nested dictionary of information related to the actor status and performance. The dictionary contains the following entries: * ``actor`` a dictionary containing information regarding the type of actor and its status. * ``events`` a dictionary of information about the :ref:`event loop <asyncio-event-loop>` running the actor. * ``extra`` the :attr:`extra` attribute (you can use it to add stuff). * ``system`` system info. This method is invoked when you run the :ref:`info command <actor_info_command>` from another actor. ''' if not self.started(): return isp = self.is_process() actor = {'name': self.name, 'state': self.info_state, 'actor_id': self.aid, 'uptime': self._loop.time() - self._started, 'thread_id': self.tid, 'process_id': self.pid, 'is_process': isp, 'age': self.concurrency.age} data = {'actor': actor, 'extra': self.extra} if isp: data['system'] = system.process_info(self.pid) self.event('on_info').fire(data=data) return data
def addJunctionPos(shape, fromPos, toPos): """Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality""" result = list(shape) if fromPos != shape[0]: result = [fromPos] + result if toPos != shape[-1]: result.append(toPos) return result
Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality
Below is the the instruction that describes the task: ### Input: Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality ### Response: def addJunctionPos(shape, fromPos, toPos): """Extends shape with the given positions in case they differ from the existing endpoints. assumes that shape and positions have the same dimensionality""" result = list(shape) if fromPos != shape[0]: result = [fromPos] + result if toPos != shape[-1]: result.append(toPos) return result
def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4=None, filter=None, timeout=2, verbose=None, **kargs): # noqa: E501 """Instant TCP traceroute traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None # noqa: E501 """ if verbose is None: verbose = conf.verb if filter is None: # we only consider ICMP error packets and TCP packets with at # least the ACK flag set *and* either the SYN or the RST flag # set filter = "(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))" # noqa: E501 if l4 is None: a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport), # noqa: E501 timeout=timeout, filter=filter, verbose=verbose, **kargs) else: # this should always work filter = "ip" a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / l4, timeout=timeout, filter=filter, verbose=verbose, **kargs) a = TracerouteResult(a.res) if verbose: a.show() return a, b
Instant TCP traceroute traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None # noqa: E501
Below is the the instruction that describes the task: ### Input: Instant TCP traceroute traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None # noqa: E501 ### Response: def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4=None, filter=None, timeout=2, verbose=None, **kargs): # noqa: E501 """Instant TCP traceroute traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None # noqa: E501 """ if verbose is None: verbose = conf.verb if filter is None: # we only consider ICMP error packets and TCP packets with at # least the ACK flag set *and* either the SYN or the RST flag # set filter = "(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))" # noqa: E501 if l4 is None: a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport), # noqa: E501 timeout=timeout, filter=filter, verbose=verbose, **kargs) else: # this should always work filter = "ip" a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / l4, timeout=timeout, filter=filter, verbose=verbose, **kargs) a = TracerouteResult(a.res) if verbose: a.show() return a, b
def getSecurityHkey(self, s): ''' returns the necessary string value for an HKEY for the win32security module ''' try: return self.hkeys_security[s] except KeyError: raise CommandExecutionError(( 'No HKEY named "{0}". It should be one of the following: {1}' ).format(s, ', '.join(self.hkeys_security)))
returns the necessary string value for an HKEY for the win32security module
Below is the the instruction that describes the task: ### Input: returns the necessary string value for an HKEY for the win32security module ### Response: def getSecurityHkey(self, s): ''' returns the necessary string value for an HKEY for the win32security module ''' try: return self.hkeys_security[s] except KeyError: raise CommandExecutionError(( 'No HKEY named "{0}". It should be one of the following: {1}' ).format(s, ', '.join(self.hkeys_security)))
def error_handler(exception_cls, response): """Handle HTTP errors by formatting into strings.""" # Responses are sent as html. Split on the newlines and give us # the <p> text back. error = convert_response_to_text(response) exception = exception_cls("Response Code: %s\tResponse: %s" % (response.status_code, error)) exception.status_code = response.status_code raise exception
Handle HTTP errors by formatting into strings.
Below is the the instruction that describes the task: ### Input: Handle HTTP errors by formatting into strings. ### Response: def error_handler(exception_cls, response): """Handle HTTP errors by formatting into strings.""" # Responses are sent as html. Split on the newlines and give us # the <p> text back. error = convert_response_to_text(response) exception = exception_cls("Response Code: %s\tResponse: %s" % (response.status_code, error)) exception.status_code = response.status_code raise exception
def to_graph(alnfname, weight_func): """Create a NetworkX graph from a sequence alignment. Nodes are string sequence IDs; edge weights are the output of weight_func between each pair, by default the absolute identity (# identical chars). """ import networkx G = networkx.Graph() aln = AlignIO.read(alnfname, 'fasta') for i, arec in enumerate(aln): for brec in aln[i+1:]: ident = weight_func(str(arec.seq), str(brec.seq)) G.add_edge(arec.id, brec.id, weight=ident) return G
Create a NetworkX graph from a sequence alignment. Nodes are string sequence IDs; edge weights are the output of weight_func between each pair, by default the absolute identity (# identical chars).
Below is the the instruction that describes the task: ### Input: Create a NetworkX graph from a sequence alignment. Nodes are string sequence IDs; edge weights are the output of weight_func between each pair, by default the absolute identity (# identical chars). ### Response: def to_graph(alnfname, weight_func): """Create a NetworkX graph from a sequence alignment. Nodes are string sequence IDs; edge weights are the output of weight_func between each pair, by default the absolute identity (# identical chars). """ import networkx G = networkx.Graph() aln = AlignIO.read(alnfname, 'fasta') for i, arec in enumerate(aln): for brec in aln[i+1:]: ident = weight_func(str(arec.seq), str(brec.seq)) G.add_edge(arec.id, brec.id, weight=ident) return G
def screenshot(url, *args, **kwargs): """ Call PhantomJS with the specified flags and options. """ phantomscript = os.path.join(os.path.dirname(__file__), 'take_screenshot.js') directory = kwargs.get('save_dir', '/tmp') image_name = kwargs.get('image_name', None) or _image_name_from_url(url) ext = kwargs.get('format', 'png').lower() save_path = os.path.join(directory, image_name) + '.' + ext crop_to_visible = kwargs.get('crop_to_visible', False) cmd_args = [ 'phantomjs', '--ssl-protocol=any', phantomscript, url, '--width', str(kwargs['width']), '--height', str(kwargs['height']), '--useragent', str(kwargs['user_agent']), '--dir', directory, '--ext', ext, '--name', str(image_name), ] if crop_to_visible: cmd_args.append('--croptovisible') # TODO: # - quality # - renderafter # - maxexecutiontime # - resourcetimeout output = subprocess.Popen(cmd_args, stdout=subprocess.PIPE).communicate()[0] return Screenshot(save_path, directory, image_name + '.' + ext, ext)
Call PhantomJS with the specified flags and options.
Below is the the instruction that describes the task: ### Input: Call PhantomJS with the specified flags and options. ### Response: def screenshot(url, *args, **kwargs): """ Call PhantomJS with the specified flags and options. """ phantomscript = os.path.join(os.path.dirname(__file__), 'take_screenshot.js') directory = kwargs.get('save_dir', '/tmp') image_name = kwargs.get('image_name', None) or _image_name_from_url(url) ext = kwargs.get('format', 'png').lower() save_path = os.path.join(directory, image_name) + '.' + ext crop_to_visible = kwargs.get('crop_to_visible', False) cmd_args = [ 'phantomjs', '--ssl-protocol=any', phantomscript, url, '--width', str(kwargs['width']), '--height', str(kwargs['height']), '--useragent', str(kwargs['user_agent']), '--dir', directory, '--ext', ext, '--name', str(image_name), ] if crop_to_visible: cmd_args.append('--croptovisible') # TODO: # - quality # - renderafter # - maxexecutiontime # - resourcetimeout output = subprocess.Popen(cmd_args, stdout=subprocess.PIPE).communicate()[0] return Screenshot(save_path, directory, image_name + '.' + ext, ext)
def nodes_with_role(rolename): """Configures a list of nodes that have the given role in their run list""" nodes = [n['name'] for n in lib.get_nodes_with_role(rolename, env.chef_environment)] if not len(nodes): print("No nodes found with role '{0}'".format(rolename)) sys.exit(0) return node(*nodes)
Configures a list of nodes that have the given role in their run list
Below is the the instruction that describes the task: ### Input: Configures a list of nodes that have the given role in their run list ### Response: def nodes_with_role(rolename): """Configures a list of nodes that have the given role in their run list""" nodes = [n['name'] for n in lib.get_nodes_with_role(rolename, env.chef_environment)] if not len(nodes): print("No nodes found with role '{0}'".format(rolename)) sys.exit(0) return node(*nodes)
def _snapshot_to_data(snapshot): ''' Returns snapshot data from a D-Bus response. A snapshot D-Bus response is a dbus.Struct containing the information related to a snapshot: [id, type, pre_snapshot, timestamp, user, description, cleanup_algorithm, userdata] id: dbus.UInt32 type: dbus.UInt16 pre_snapshot: dbus.UInt32 timestamp: dbus.Int64 user: dbus.UInt32 description: dbus.String cleaup_algorithm: dbus.String userdata: dbus.Dictionary ''' data = {} data['id'] = snapshot[0] data['type'] = ['single', 'pre', 'post'][snapshot[1]] if data['type'] == 'post': data['pre'] = snapshot[2] if snapshot[3] != -1: data['timestamp'] = snapshot[3] else: data['timestamp'] = int(time.time()) data['user'] = getpwuid(snapshot[4])[0] data['description'] = snapshot[5] data['cleanup'] = snapshot[6] data['userdata'] = {} for key, value in snapshot[7].items(): data['userdata'][key] = value return data
Returns snapshot data from a D-Bus response. A snapshot D-Bus response is a dbus.Struct containing the information related to a snapshot: [id, type, pre_snapshot, timestamp, user, description, cleanup_algorithm, userdata] id: dbus.UInt32 type: dbus.UInt16 pre_snapshot: dbus.UInt32 timestamp: dbus.Int64 user: dbus.UInt32 description: dbus.String cleaup_algorithm: dbus.String userdata: dbus.Dictionary
Below is the the instruction that describes the task: ### Input: Returns snapshot data from a D-Bus response. A snapshot D-Bus response is a dbus.Struct containing the information related to a snapshot: [id, type, pre_snapshot, timestamp, user, description, cleanup_algorithm, userdata] id: dbus.UInt32 type: dbus.UInt16 pre_snapshot: dbus.UInt32 timestamp: dbus.Int64 user: dbus.UInt32 description: dbus.String cleaup_algorithm: dbus.String userdata: dbus.Dictionary ### Response: def _snapshot_to_data(snapshot): ''' Returns snapshot data from a D-Bus response. A snapshot D-Bus response is a dbus.Struct containing the information related to a snapshot: [id, type, pre_snapshot, timestamp, user, description, cleanup_algorithm, userdata] id: dbus.UInt32 type: dbus.UInt16 pre_snapshot: dbus.UInt32 timestamp: dbus.Int64 user: dbus.UInt32 description: dbus.String cleaup_algorithm: dbus.String userdata: dbus.Dictionary ''' data = {} data['id'] = snapshot[0] data['type'] = ['single', 'pre', 'post'][snapshot[1]] if data['type'] == 'post': data['pre'] = snapshot[2] if snapshot[3] != -1: data['timestamp'] = snapshot[3] else: data['timestamp'] = int(time.time()) data['user'] = getpwuid(snapshot[4])[0] data['description'] = snapshot[5] data['cleanup'] = snapshot[6] data['userdata'] = {} for key, value in snapshot[7].items(): data['userdata'][key] = value return data
def create_index(self,fasta_dir=None, index_dir=None): """Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir""" # Use default directories if they are not supplied if not fasta_dir: fasta_dir = self.fasta_dir if not index_dir: index_dir = self.index_dir # Can't continue if we still don't have an index_dir or fasta_dir if not fasta_dir: print("fasta_dir not defined!") sys.exit(1) if not index_dir: print("index_dir not defined!") sys.exit(1) index_dir = os.path.abspath(index_dir) fasta_dir = os.path.abspath(fasta_dir) self.index_dir = index_dir # Prepare index directory if not os.path.exists(index_dir): try: os.mkdir(index_dir) except OSError as e: if e.args[0] == 13: sys.stderr.write("No permission to create index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) # Directories need to exist self._check_dir(fasta_dir) self._check_dir(index_dir) # Get all fasta-files fastafiles = find_by_ext(fasta_dir, FASTA_EXT) if not(fastafiles): msg = "No fastafiles found in {} with extension in {}".format( fasta_dir, ",".join(FASTA_EXT)) raise IOError(msg) # param_file will hold all the information about the location of the fasta-files, indeces and # length of the sequences param_file = os.path.join(index_dir, self.param_file) size_file = os.path.join(index_dir, self.size_file) try: out = open(param_file, "w") except IOError as e: if e.args[0] == 13: sys.stderr.write("No permission to create files in index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) s_out = open(size_file, "w") for fasta_file in fastafiles: #sys.stderr.write("Indexing %s\n" % fasta_file) f = open(fasta_file) line = f.readline() if not line.startswith(">"): sys.stderr.write("%s is not a valid FASTA file, expected > at first line\n" % fasta_file) sys.exit() seqname = line.strip().replace(">", "") line = f.readline() line_size = len(line.strip()) total_size = 0 while line: line = line.strip() if line.startswith(">"): sys.stderr.write("Sorry, can only index genomes with " "one sequence per FASTA file\n%s contains multiple " "sequences\n" % fasta_file) sys.exit() total_size += len(line) line = f.readline() index_file = os.path.join(index_dir, "%s.index" % seqname) out.write("{}\t{}\t{}\t{}\t{}\n".format( seqname, fasta_file, index_file, line_size, total_size)) s_out.write("{}\t{}\n".format(seqname, total_size)) self._make_index(fasta_file, index_file) f.close() out.close() s_out.close() # Read the index we just made so we can immediately use it self._read_index_file()
Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir
Below is the the instruction that describes the task: ### Input: Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir ### Response: def create_index(self,fasta_dir=None, index_dir=None): """Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir""" # Use default directories if they are not supplied if not fasta_dir: fasta_dir = self.fasta_dir if not index_dir: index_dir = self.index_dir # Can't continue if we still don't have an index_dir or fasta_dir if not fasta_dir: print("fasta_dir not defined!") sys.exit(1) if not index_dir: print("index_dir not defined!") sys.exit(1) index_dir = os.path.abspath(index_dir) fasta_dir = os.path.abspath(fasta_dir) self.index_dir = index_dir # Prepare index directory if not os.path.exists(index_dir): try: os.mkdir(index_dir) except OSError as e: if e.args[0] == 13: sys.stderr.write("No permission to create index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) # Directories need to exist self._check_dir(fasta_dir) self._check_dir(index_dir) # Get all fasta-files fastafiles = find_by_ext(fasta_dir, FASTA_EXT) if not(fastafiles): msg = "No fastafiles found in {} with extension in {}".format( fasta_dir, ",".join(FASTA_EXT)) raise IOError(msg) # param_file will hold all the information about the location of the fasta-files, indeces and # length of the sequences param_file = os.path.join(index_dir, self.param_file) size_file = os.path.join(index_dir, self.size_file) try: out = open(param_file, "w") except IOError as e: if e.args[0] == 13: sys.stderr.write("No permission to create files in index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) s_out = open(size_file, "w") for fasta_file in fastafiles: #sys.stderr.write("Indexing %s\n" % fasta_file) f = open(fasta_file) line = f.readline() if not line.startswith(">"): sys.stderr.write("%s is not a valid FASTA file, expected > at first line\n" % fasta_file) sys.exit() seqname = line.strip().replace(">", "") line = f.readline() line_size = len(line.strip()) total_size = 0 while line: line = line.strip() if line.startswith(">"): sys.stderr.write("Sorry, can only index genomes with " "one sequence per FASTA file\n%s contains multiple " "sequences\n" % fasta_file) sys.exit() total_size += len(line) line = f.readline() index_file = os.path.join(index_dir, "%s.index" % seqname) out.write("{}\t{}\t{}\t{}\t{}\n".format( seqname, fasta_file, index_file, line_size, total_size)) s_out.write("{}\t{}\n".format(seqname, total_size)) self._make_index(fasta_file, index_file) f.close() out.close() s_out.close() # Read the index we just made so we can immediately use it self._read_index_file()
def cmp(self,range2,overlap_size=0): """the comparitor for ranges * return 1 if greater than range2 * return -1 if less than range2 * return 0 if overlapped :param range2: :param overlap_size: allow some padding for an 'equal' comparison (default 0) :type range2: GenomicRange :type overlap_size: int """ if self.overlaps(range2,padding=overlap_size): return 0 if self.chr < range2.chr: return -1 elif self.chr > range2.chr: return 1 elif self.end < range2.start: return -1 elif self.start > range2.end: return 1 sys.stderr.write("ERROR: cmp function unexpcted state\n") sys.exit() return 0
the comparitor for ranges * return 1 if greater than range2 * return -1 if less than range2 * return 0 if overlapped :param range2: :param overlap_size: allow some padding for an 'equal' comparison (default 0) :type range2: GenomicRange :type overlap_size: int
Below is the the instruction that describes the task: ### Input: the comparitor for ranges * return 1 if greater than range2 * return -1 if less than range2 * return 0 if overlapped :param range2: :param overlap_size: allow some padding for an 'equal' comparison (default 0) :type range2: GenomicRange :type overlap_size: int ### Response: def cmp(self,range2,overlap_size=0): """the comparitor for ranges * return 1 if greater than range2 * return -1 if less than range2 * return 0 if overlapped :param range2: :param overlap_size: allow some padding for an 'equal' comparison (default 0) :type range2: GenomicRange :type overlap_size: int """ if self.overlaps(range2,padding=overlap_size): return 0 if self.chr < range2.chr: return -1 elif self.chr > range2.chr: return 1 elif self.end < range2.start: return -1 elif self.start > range2.end: return 1 sys.stderr.write("ERROR: cmp function unexpcted state\n") sys.exit() return 0
def get_rotated(self, angle): """Rotates this vector through the given anti-clockwise angle in radians.""" ca = math.cos(angle) sa = math.sin(angle) return Point(self.x*ca-self.y*sa, self.x*sa+self.y*ca)
Rotates this vector through the given anti-clockwise angle in radians.
Below is the the instruction that describes the task: ### Input: Rotates this vector through the given anti-clockwise angle in radians. ### Response: def get_rotated(self, angle): """Rotates this vector through the given anti-clockwise angle in radians.""" ca = math.cos(angle) sa = math.sin(angle) return Point(self.x*ca-self.y*sa, self.x*sa+self.y*ca)
def get_followers(self): """ https://vk.com/dev/users.getFollowers """ response = self._session.fetch_items("users.getFollowers", self.from_json, self._session, count=1000, user_id=self.id, fields=self.USER_FIELDS) return response
https://vk.com/dev/users.getFollowers
Below is the the instruction that describes the task: ### Input: https://vk.com/dev/users.getFollowers ### Response: def get_followers(self): """ https://vk.com/dev/users.getFollowers """ response = self._session.fetch_items("users.getFollowers", self.from_json, self._session, count=1000, user_id=self.id, fields=self.USER_FIELDS) return response
def logs(self): """returns an object to work with the site logs""" if self._resources is None: self.__init() if "logs" in self._resources: url = self._url + "/logs" return _logs.Log(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
returns an object to work with the site logs
Below is the the instruction that describes the task: ### Input: returns an object to work with the site logs ### Response: def logs(self): """returns an object to work with the site logs""" if self._resources is None: self.__init() if "logs" in self._resources: url = self._url + "/logs" return _logs.Log(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
def prepend_scheme_if_needed(url, new_scheme): """Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) # urlparse is a finicky beast, and sometimes decides that there isn't a # netloc present. Assume that it's being over-cautious, and switch netloc # and path if urlparse decided there was no netloc. if not netloc: netloc, path = path, netloc return urlunparse((scheme, netloc, path, params, query, fragment))
Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str
Below is the the instruction that describes the task: ### Input: Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str ### Response: def prepend_scheme_if_needed(url, new_scheme): """Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) # urlparse is a finicky beast, and sometimes decides that there isn't a # netloc present. Assume that it's being over-cautious, and switch netloc # and path if urlparse decided there was no netloc. if not netloc: netloc, path = path, netloc return urlunparse((scheme, netloc, path, params, query, fragment))
def index(request, obj_id): """Handles a request based on method and calls the appropriate function""" if request.method == 'GET': return get(request, obj_id) elif request.method == 'PUT': getPutData(request) return put(request, obj_id)
Handles a request based on method and calls the appropriate function
Below is the the instruction that describes the task: ### Input: Handles a request based on method and calls the appropriate function ### Response: def index(request, obj_id): """Handles a request based on method and calls the appropriate function""" if request.method == 'GET': return get(request, obj_id) elif request.method == 'PUT': getPutData(request) return put(request, obj_id)
def Merge(text, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ return MergeLines(text.split('\n'), message, allow_unknown_extension, allow_field_number)
Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
Below is the the instruction that describes the task: ### Input: Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. ### Response: def Merge(text, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ return MergeLines(text.split('\n'), message, allow_unknown_extension, allow_field_number)
def get_center_of_mass(image): """ Compute an image center of mass in physical space which is defined as the mean of the intensity weighted voxel coordinate system. ANTsR function: `getCenterOfMass` Arguments --------- image : ANTsImage image from which center of mass will be computed Returns ------- scalar Example ------- >>> fi = ants.image_read( ants.get_ants_data("r16")) >>> com1 = ants.get_center_of_mass( fi ) >>> fi = ants.image_read( ants.get_ants_data("r64")) >>> com2 = ants.get_center_of_mass( fi ) """ if image.pixeltype != 'float': image = image.clone('float') libfn = utils.get_lib_fn('centerOfMass%s' % image._libsuffix) com = libfn(image.pointer) return tuple(com)
Compute an image center of mass in physical space which is defined as the mean of the intensity weighted voxel coordinate system. ANTsR function: `getCenterOfMass` Arguments --------- image : ANTsImage image from which center of mass will be computed Returns ------- scalar Example ------- >>> fi = ants.image_read( ants.get_ants_data("r16")) >>> com1 = ants.get_center_of_mass( fi ) >>> fi = ants.image_read( ants.get_ants_data("r64")) >>> com2 = ants.get_center_of_mass( fi )
Below is the the instruction that describes the task: ### Input: Compute an image center of mass in physical space which is defined as the mean of the intensity weighted voxel coordinate system. ANTsR function: `getCenterOfMass` Arguments --------- image : ANTsImage image from which center of mass will be computed Returns ------- scalar Example ------- >>> fi = ants.image_read( ants.get_ants_data("r16")) >>> com1 = ants.get_center_of_mass( fi ) >>> fi = ants.image_read( ants.get_ants_data("r64")) >>> com2 = ants.get_center_of_mass( fi ) ### Response: def get_center_of_mass(image): """ Compute an image center of mass in physical space which is defined as the mean of the intensity weighted voxel coordinate system. ANTsR function: `getCenterOfMass` Arguments --------- image : ANTsImage image from which center of mass will be computed Returns ------- scalar Example ------- >>> fi = ants.image_read( ants.get_ants_data("r16")) >>> com1 = ants.get_center_of_mass( fi ) >>> fi = ants.image_read( ants.get_ants_data("r64")) >>> com2 = ants.get_center_of_mass( fi ) """ if image.pixeltype != 'float': image = image.clone('float') libfn = utils.get_lib_fn('centerOfMass%s' % image._libsuffix) com = libfn(image.pointer) return tuple(com)
def forwards(apps, schema_editor): """ Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition. """ Event = apps.get_model('spectator_events', 'Event') Work = apps.get_model('spectator_events', 'Work') WorkRole = apps.get_model('spectator_events', 'WorkRole') WorkSelection = apps.get_model('spectator_events', 'WorkSelection') for event in Event.objects.filter(kind='museum'): # Create a new Work based on this Event's details. work = Work.objects.create( kind='exhibition', title=event.title, title_sort=event.title_sort ) # This doesn't generate the slug field automatically because Django. # So we'll have to do it manually. Graarhhh. work.slug = generate_slug(work.pk) work.save() # Associate the new Work with the Event. WorkSelection.objects.create( event=event, work=work ) # Associate any Creators on the Event with the new Work. for role in event.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) # Remove Creators from the Event. role.delete()
Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition.
Below is the the instruction that describes the task: ### Input: Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition. ### Response: def forwards(apps, schema_editor): """ Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition. """ Event = apps.get_model('spectator_events', 'Event') Work = apps.get_model('spectator_events', 'Work') WorkRole = apps.get_model('spectator_events', 'WorkRole') WorkSelection = apps.get_model('spectator_events', 'WorkSelection') for event in Event.objects.filter(kind='museum'): # Create a new Work based on this Event's details. work = Work.objects.create( kind='exhibition', title=event.title, title_sort=event.title_sort ) # This doesn't generate the slug field automatically because Django. # So we'll have to do it manually. Graarhhh. work.slug = generate_slug(work.pk) work.save() # Associate the new Work with the Event. WorkSelection.objects.create( event=event, work=work ) # Associate any Creators on the Event with the new Work. for role in event.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) # Remove Creators from the Event. role.delete()
def read_ambient(self, timeout_sec=1): """Read the ambient light sensor and return it as an unsigned 16-bit value. """ # Clear any interrupts. self._clear_interrupt(VCNL4010_INT_ALS_READY) # Call base class read_ambient and return result. return super(VCNL4010, self).read_ambient(timeout_sec)
Read the ambient light sensor and return it as an unsigned 16-bit value.
Below is the the instruction that describes the task: ### Input: Read the ambient light sensor and return it as an unsigned 16-bit value. ### Response: def read_ambient(self, timeout_sec=1): """Read the ambient light sensor and return it as an unsigned 16-bit value. """ # Clear any interrupts. self._clear_interrupt(VCNL4010_INT_ALS_READY) # Call base class read_ambient and return result. return super(VCNL4010, self).read_ambient(timeout_sec)
def visit_Compare(self, node): """ Boolean are possible index. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b ... d = b < 3 ... e = b == 4''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=1, high=1) >>> res['d'] Interval(low=0, high=0) >>> res['e'] Interval(low=0, high=1) """ if any(isinstance(op, (ast.In, ast.NotIn, ast.Is, ast.IsNot)) for op in node.ops): self.generic_visit(node) return self.add(node, Interval(0, 1)) curr = self.visit(node.left) res = [] for op, comparator in zip(node.ops, node.comparators): comparator = self.visit(comparator) fake = ast.Compare(ast.Name('x', ast.Load(), None), [op], [ast.Name('y', ast.Load(), None)]) fake = ast.Expression(fake) ast.fix_missing_locations(fake) expr = compile(ast.gast_to_ast(fake), '<range_values>', 'eval') res.append(eval(expr, {'x': curr, 'y': comparator})) if all(res): return self.add(node, Interval(1, 1)) elif any(r.low == r.high == 0 for r in res): return self.add(node, Interval(0, 0)) else: return self.add(node, Interval(0, 1))
Boolean are possible index. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b ... d = b < 3 ... e = b == 4''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=1, high=1) >>> res['d'] Interval(low=0, high=0) >>> res['e'] Interval(low=0, high=1)
Below is the the instruction that describes the task: ### Input: Boolean are possible index. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b ... d = b < 3 ... e = b == 4''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=1, high=1) >>> res['d'] Interval(low=0, high=0) >>> res['e'] Interval(low=0, high=1) ### Response: def visit_Compare(self, node): """ Boolean are possible index. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b ... d = b < 3 ... e = b == 4''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=1, high=1) >>> res['d'] Interval(low=0, high=0) >>> res['e'] Interval(low=0, high=1) """ if any(isinstance(op, (ast.In, ast.NotIn, ast.Is, ast.IsNot)) for op in node.ops): self.generic_visit(node) return self.add(node, Interval(0, 1)) curr = self.visit(node.left) res = [] for op, comparator in zip(node.ops, node.comparators): comparator = self.visit(comparator) fake = ast.Compare(ast.Name('x', ast.Load(), None), [op], [ast.Name('y', ast.Load(), None)]) fake = ast.Expression(fake) ast.fix_missing_locations(fake) expr = compile(ast.gast_to_ast(fake), '<range_values>', 'eval') res.append(eval(expr, {'x': curr, 'y': comparator})) if all(res): return self.add(node, Interval(1, 1)) elif any(r.low == r.high == 0 for r in res): return self.add(node, Interval(0, 0)) else: return self.add(node, Interval(0, 1))
def omit(self): """The test startup duration to omit in seconds.""" self._omit = self.lib.iperf_get_test_omit(self._test) return self._omit
The test startup duration to omit in seconds.
Below is the the instruction that describes the task: ### Input: The test startup duration to omit in seconds. ### Response: def omit(self): """The test startup duration to omit in seconds.""" self._omit = self.lib.iperf_get_test_omit(self._test) return self._omit
def get_sdc_by_id(self, id): """ Get ScaleIO SDC object by its id :param name: id of SDC :return: ScaleIO SDC object :raise KeyError: No SDC with specified id found :rtype: SDC object """ for sdc in self.sdc: if sdc.id == id: return sdc raise KeyError("SDC with that ID not found")
Get ScaleIO SDC object by its id :param name: id of SDC :return: ScaleIO SDC object :raise KeyError: No SDC with specified id found :rtype: SDC object
Below is the the instruction that describes the task: ### Input: Get ScaleIO SDC object by its id :param name: id of SDC :return: ScaleIO SDC object :raise KeyError: No SDC with specified id found :rtype: SDC object ### Response: def get_sdc_by_id(self, id): """ Get ScaleIO SDC object by its id :param name: id of SDC :return: ScaleIO SDC object :raise KeyError: No SDC with specified id found :rtype: SDC object """ for sdc in self.sdc: if sdc.id == id: return sdc raise KeyError("SDC with that ID not found")
def add(self, now, num): """ Add a timestamp and date to the data """ if num == 0: return self.points.append((now, num))
Add a timestamp and date to the data
Below is the the instruction that describes the task: ### Input: Add a timestamp and date to the data ### Response: def add(self, now, num): """ Add a timestamp and date to the data """ if num == 0: return self.points.append((now, num))
def read_settings(self): """ Read the "dsbfile" file Populates `self.settings` """ logger.debug('Reading settings from: %s', self.settings_path) self.settings = Settings.from_dsbfile(self.settings_path)
Read the "dsbfile" file Populates `self.settings`
Below is the the instruction that describes the task: ### Input: Read the "dsbfile" file Populates `self.settings` ### Response: def read_settings(self): """ Read the "dsbfile" file Populates `self.settings` """ logger.debug('Reading settings from: %s', self.settings_path) self.settings = Settings.from_dsbfile(self.settings_path)
def parse_atoms(self, pdb): """Parse the ATOM entries into the object""" atomre = re.compile("ATOM") atomlines = [line for line in pdb.lines if atomre.match(line)] chainresnums = {} for line in atomlines: chain = line[21] resname = line[17:20] resnum = line[22:27] #print resnum chainresnums.setdefault(chain, []) if resnum in chainresnums[chain]: assert self[chain][chainresnums[chain].index(resnum)] == resname else: if resnum[-1] == ' ': self.setdefault(chain, []) self[chain] += [resname] chainresnums[chain] += [resnum] return chainresnums
Parse the ATOM entries into the object
Below is the the instruction that describes the task: ### Input: Parse the ATOM entries into the object ### Response: def parse_atoms(self, pdb): """Parse the ATOM entries into the object""" atomre = re.compile("ATOM") atomlines = [line for line in pdb.lines if atomre.match(line)] chainresnums = {} for line in atomlines: chain = line[21] resname = line[17:20] resnum = line[22:27] #print resnum chainresnums.setdefault(chain, []) if resnum in chainresnums[chain]: assert self[chain][chainresnums[chain].index(resnum)] == resname else: if resnum[-1] == ' ': self.setdefault(chain, []) self[chain] += [resname] chainresnums[chain] += [resnum] return chainresnums
def discharge(self): """Discharge per unit length""" Q = np.zeros(self.aq.naq) Q[self.layers] = self.parameters[:, 0] return Q
Discharge per unit length
Below is the the instruction that describes the task: ### Input: Discharge per unit length ### Response: def discharge(self): """Discharge per unit length""" Q = np.zeros(self.aq.naq) Q[self.layers] = self.parameters[:, 0] return Q
def subseq(self, start, end): '''Returns Fastq object with the same name, of the bases from start to end, but not including end''' return Fastq(self.id, self.seq[start:end], self.qual[start:end])
Returns Fastq object with the same name, of the bases from start to end, but not including end
Below is the the instruction that describes the task: ### Input: Returns Fastq object with the same name, of the bases from start to end, but not including end ### Response: def subseq(self, start, end): '''Returns Fastq object with the same name, of the bases from start to end, but not including end''' return Fastq(self.id, self.seq[start:end], self.qual[start:end])
def attr_case_name(self, name): """Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case """ lower_name = name.lower() for i in self.attrs(): if lower_name == i.lower(): return i # check if attribute present in higher order structures for key in self.keys_nD(): for i in self[key].children.attrs(): if lower_name == i.lower(): return i # nothing was found if still here # pass name back, free to be whatever return name
Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case
Below is the the instruction that describes the task: ### Input: Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case ### Response: def attr_case_name(self, name): """Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case """ lower_name = name.lower() for i in self.attrs(): if lower_name == i.lower(): return i # check if attribute present in higher order structures for key in self.keys_nD(): for i in self[key].children.attrs(): if lower_name == i.lower(): return i # nothing was found if still here # pass name back, free to be whatever return name
def get_archives(self, title_id, language_code): """Get the archive list from a given `title_id` and `language_code`. :param int title_id: title id. :param int language_code: language code. :return: the archives. :rtype: list of :class:`LegendasTVArchive` """ logger.info('Getting archives for title %d and language %d', title_id, language_code) archives = [] page = 1 while True: # get the archive page url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format( title=title_id, language=language_code, page=page) r = self.session.get(url) r.raise_for_status() # parse the results soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) for archive_soup in soup.select('div.list_element > article > div'): # create archive archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text, 'pack' in archive_soup['class'], 'destaque' in archive_soup['class'], self.server_url + archive_soup.a['href'][1:]) # extract text containing downloads, rating and timestamp data_text = archive_soup.find('p', class_='data').text # match downloads archive.downloads = int(downloads_re.search(data_text).group('downloads')) # match rating match = rating_re.search(data_text) if match: archive.rating = int(match.group('rating')) # match timestamp and validate it time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()} archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data)) if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc): raise ProviderError('Archive timestamp is in the future') # add archive archives.append(archive) # stop on last page if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None: break # increment page count page += 1 logger.debug('Found %d archives', len(archives)) return archives
Get the archive list from a given `title_id` and `language_code`. :param int title_id: title id. :param int language_code: language code. :return: the archives. :rtype: list of :class:`LegendasTVArchive`
Below is the the instruction that describes the task: ### Input: Get the archive list from a given `title_id` and `language_code`. :param int title_id: title id. :param int language_code: language code. :return: the archives. :rtype: list of :class:`LegendasTVArchive` ### Response: def get_archives(self, title_id, language_code): """Get the archive list from a given `title_id` and `language_code`. :param int title_id: title id. :param int language_code: language code. :return: the archives. :rtype: list of :class:`LegendasTVArchive` """ logger.info('Getting archives for title %d and language %d', title_id, language_code) archives = [] page = 1 while True: # get the archive page url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format( title=title_id, language=language_code, page=page) r = self.session.get(url) r.raise_for_status() # parse the results soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) for archive_soup in soup.select('div.list_element > article > div'): # create archive archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text, 'pack' in archive_soup['class'], 'destaque' in archive_soup['class'], self.server_url + archive_soup.a['href'][1:]) # extract text containing downloads, rating and timestamp data_text = archive_soup.find('p', class_='data').text # match downloads archive.downloads = int(downloads_re.search(data_text).group('downloads')) # match rating match = rating_re.search(data_text) if match: archive.rating = int(match.group('rating')) # match timestamp and validate it time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()} archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data)) if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc): raise ProviderError('Archive timestamp is in the future') # add archive archives.append(archive) # stop on last page if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None: break # increment page count page += 1 logger.debug('Found %d archives', len(archives)) return archives
def get_bdstoken(cookie): '''从/disk/home页面获取bdstoken等token信息 这些token对于之后的请求非常重要. ''' url = const.PAN_REFERER req = net.urlopen(url, headers={'Cookie': cookie.header_output()}) if req: return parse_bdstoken(req.data.decode()) else: return None
从/disk/home页面获取bdstoken等token信息 这些token对于之后的请求非常重要.
Below is the the instruction that describes the task: ### Input: 从/disk/home页面获取bdstoken等token信息 这些token对于之后的请求非常重要. ### Response: def get_bdstoken(cookie): '''从/disk/home页面获取bdstoken等token信息 这些token对于之后的请求非常重要. ''' url = const.PAN_REFERER req = net.urlopen(url, headers={'Cookie': cookie.header_output()}) if req: return parse_bdstoken(req.data.decode()) else: return None
def is_downstream_of(self, other): """ return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account """ if self.chrom != other.chrom: return None if getattr(other, "strand", None) == "-": # other feature is on - strand, so this must have higher start return self.end <= other.start return self.start >= other.end
return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account
Below is the the instruction that describes the task: ### Input: return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account ### Response: def is_downstream_of(self, other): """ return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account """ if self.chrom != other.chrom: return None if getattr(other, "strand", None) == "-": # other feature is on - strand, so this must have higher start return self.end <= other.start return self.start >= other.end
def monkey_patch_bson(bson=None): '''Patch bson in pymongo to use loads and dumps interface.''' if not bson: import bson if not hasattr(bson, 'loads'): bson.loads = lambda bsondoc: bson.BSON(bsondoc).decode() if not hasattr(bson, 'dumps'): bson.dumps = lambda document: bson.BSON.encode(document)
Patch bson in pymongo to use loads and dumps interface.
Below is the the instruction that describes the task: ### Input: Patch bson in pymongo to use loads and dumps interface. ### Response: def monkey_patch_bson(bson=None): '''Patch bson in pymongo to use loads and dumps interface.''' if not bson: import bson if not hasattr(bson, 'loads'): bson.loads = lambda bsondoc: bson.BSON(bsondoc).decode() if not hasattr(bson, 'dumps'): bson.dumps = lambda document: bson.BSON.encode(document)
def _search(self, limit, format): ''' Returns a list of result objects, with the url for the next page bing search url. ''' url = self.QUERY_URL.format(requests.utils.quote("'{}'".format(self.query)), min(50, limit), self.current_offset, format) r = requests.get(url, auth=("", self.api_key)) try: json_results = r.json() except ValueError as vE: if not self.safe: raise PyBingVideoException("Request returned with code %s, error msg: %s" % (r.status_code, r.text)) else: print ("[ERROR] Request returned with code %s, error msg: %s. \nContinuing in 5 seconds." % (r.status_code, r.text)) time.sleep(5) packaged_results = [VideoResult(single_result_json) for single_result_json in json_results['d']['results']] self.current_offset += min(50, limit, len(packaged_results)) return packaged_results
Returns a list of result objects, with the url for the next page bing search url.
Below is the the instruction that describes the task: ### Input: Returns a list of result objects, with the url for the next page bing search url. ### Response: def _search(self, limit, format): ''' Returns a list of result objects, with the url for the next page bing search url. ''' url = self.QUERY_URL.format(requests.utils.quote("'{}'".format(self.query)), min(50, limit), self.current_offset, format) r = requests.get(url, auth=("", self.api_key)) try: json_results = r.json() except ValueError as vE: if not self.safe: raise PyBingVideoException("Request returned with code %s, error msg: %s" % (r.status_code, r.text)) else: print ("[ERROR] Request returned with code %s, error msg: %s. \nContinuing in 5 seconds." % (r.status_code, r.text)) time.sleep(5) packaged_results = [VideoResult(single_result_json) for single_result_json in json_results['d']['results']] self.current_offset += min(50, limit, len(packaged_results)) return packaged_results
def start(self, tag, attrs): """Handle the start tag. Call the handler's 'stream_start' methods with an empty root element if it is top level. For lower level tags use :etree:`ElementTree.TreeBuilder` to collect them. """ if self._level == 0: self._root = ElementTree.Element(tag, attrs) self._handler.stream_start(self._root) if self._level < 2: self._builder = ElementTree.TreeBuilder() self._level += 1 return self._builder.start(tag, attrs)
Handle the start tag. Call the handler's 'stream_start' methods with an empty root element if it is top level. For lower level tags use :etree:`ElementTree.TreeBuilder` to collect them.
Below is the the instruction that describes the task: ### Input: Handle the start tag. Call the handler's 'stream_start' methods with an empty root element if it is top level. For lower level tags use :etree:`ElementTree.TreeBuilder` to collect them. ### Response: def start(self, tag, attrs): """Handle the start tag. Call the handler's 'stream_start' methods with an empty root element if it is top level. For lower level tags use :etree:`ElementTree.TreeBuilder` to collect them. """ if self._level == 0: self._root = ElementTree.Element(tag, attrs) self._handler.stream_start(self._root) if self._level < 2: self._builder = ElementTree.TreeBuilder() self._level += 1 return self._builder.start(tag, attrs)
def intraday(ticker, dt, session='', **kwargs) -> pd.DataFrame: """ Bloomberg intraday bar data within market session Args: ticker: ticker dt: date session: examples include day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000 **kwargs: ref: reference ticker or exchange for timezone keep_tz: if keep tz if reference ticker / exchange is given start_time: start time end_time: end time typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK] Returns: pd.DataFrame """ from xbbg.core import intervals cur_data = bdib(ticker=ticker, dt=dt, typ=kwargs.get('typ', 'TRADE')) if cur_data.empty: return pd.DataFrame() fmt = '%H:%M:%S' ss = intervals.SessNA ref = kwargs.get('ref', None) exch = pd.Series() if ref is None else const.exch_info(ticker=ref) if session: ss = intervals.get_interval( ticker=kwargs.get('ref', ticker), session=session ) start_time = kwargs.get('start_time', None) end_time = kwargs.get('end_time', None) if ss != intervals.SessNA: start_time = pd.Timestamp(ss.start_time).strftime(fmt) end_time = pd.Timestamp(ss.end_time).strftime(fmt) if start_time and end_time: kw = dict(start_time=start_time, end_time=end_time) if not exch.empty: cur_tz = cur_data.index.tz res = cur_data.tz_convert(exch.tz).between_time(**kw) if kwargs.get('keep_tz', False): res = res.tz_convert(cur_tz) return pd.DataFrame(res) return pd.DataFrame(cur_data.between_time(**kw)) return cur_data
Bloomberg intraday bar data within market session Args: ticker: ticker dt: date session: examples include day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000 **kwargs: ref: reference ticker or exchange for timezone keep_tz: if keep tz if reference ticker / exchange is given start_time: start time end_time: end time typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK] Returns: pd.DataFrame
Below is the the instruction that describes the task: ### Input: Bloomberg intraday bar data within market session Args: ticker: ticker dt: date session: examples include day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000 **kwargs: ref: reference ticker or exchange for timezone keep_tz: if keep tz if reference ticker / exchange is given start_time: start time end_time: end time typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK] Returns: pd.DataFrame ### Response: def intraday(ticker, dt, session='', **kwargs) -> pd.DataFrame: """ Bloomberg intraday bar data within market session Args: ticker: ticker dt: date session: examples include day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000 **kwargs: ref: reference ticker or exchange for timezone keep_tz: if keep tz if reference ticker / exchange is given start_time: start time end_time: end time typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK] Returns: pd.DataFrame """ from xbbg.core import intervals cur_data = bdib(ticker=ticker, dt=dt, typ=kwargs.get('typ', 'TRADE')) if cur_data.empty: return pd.DataFrame() fmt = '%H:%M:%S' ss = intervals.SessNA ref = kwargs.get('ref', None) exch = pd.Series() if ref is None else const.exch_info(ticker=ref) if session: ss = intervals.get_interval( ticker=kwargs.get('ref', ticker), session=session ) start_time = kwargs.get('start_time', None) end_time = kwargs.get('end_time', None) if ss != intervals.SessNA: start_time = pd.Timestamp(ss.start_time).strftime(fmt) end_time = pd.Timestamp(ss.end_time).strftime(fmt) if start_time and end_time: kw = dict(start_time=start_time, end_time=end_time) if not exch.empty: cur_tz = cur_data.index.tz res = cur_data.tz_convert(exch.tz).between_time(**kw) if kwargs.get('keep_tz', False): res = res.tz_convert(cur_tz) return pd.DataFrame(res) return pd.DataFrame(cur_data.between_time(**kw)) return cur_data
def get_symmetry_dataset(cell, symprec=1e-5, angle_tolerance=-1.0, hall_number=0): """Search symmetry dataset from an input cell. Args: cell, symprec, angle_tolerance: See the docstring of get_symmetry. hall_number: If a serial number of Hall symbol (>0) is given, the database corresponding to the Hall symbol is made. Return: A dictionary is returned. Dictionary keys: number (int): International space group number international (str): International symbol hall (str): Hall symbol choice (str): Centring, origin, basis vector setting transformation_matrix (3x3 float): Transformation matrix from input lattice to standardized lattice: L^original = L^standardized * Tmat origin shift (3 float): Origin shift from standardized to input origin rotations (3x3 int), translations (float vector): Rotation matrices and translation vectors. Space group operations are obtained by [(r,t) for r, t in zip(rotations, translations)] wyckoffs (n char): Wyckoff letters equivalent_atoms (n int): Symmetrically equivalent atoms mapping_to_primitive (n int): Original cell atom index mapping to primivie cell atom index Idealized standardized unit cell: std_lattice (3x3 float, row vectors), std_positions (Nx3 float), std_types (N int) std_rotation_matrix: Rigid rotation matrix to rotate from standardized basis vectors to idealized standardized basis vectors L^idealized = R * L^standardized std_mapping_to_primitive (m int): std_positions index mapping to those of primivie cell atoms pointgroup (str): Pointgroup symbol If it fails, None is returned. """ _set_no_error() lattice, positions, numbers, _ = _expand_cell(cell) if lattice is None: return None spg_ds = spg.dataset(lattice, positions, numbers, hall_number, symprec, angle_tolerance) if spg_ds is None: _set_error_message() return None keys = ('number', 'hall_number', 'international', 'hall', 'choice', 'transformation_matrix', 'origin_shift', 'rotations', 'translations', 'wyckoffs', 'site_symmetry_symbols', 'equivalent_atoms', 'mapping_to_primitive', 'std_lattice', 'std_types', 'std_positions', 'std_rotation_matrix', 'std_mapping_to_primitive', # 'pointgroup_number', 'pointgroup') dataset = {} for key, data in zip(keys, spg_ds): dataset[key] = data dataset['international'] = dataset['international'].strip() dataset['hall'] = dataset['hall'].strip() dataset['choice'] = dataset['choice'].strip() dataset['transformation_matrix'] = np.array( dataset['transformation_matrix'], dtype='double', order='C') dataset['origin_shift'] = np.array(dataset['origin_shift'], dtype='double') dataset['rotations'] = np.array(dataset['rotations'], dtype='intc', order='C') dataset['translations'] = np.array(dataset['translations'], dtype='double', order='C') letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" dataset['wyckoffs'] = [letters[x] for x in dataset['wyckoffs']] dataset['site_symmetry_symbols'] = [ s.strip() for s in dataset['site_symmetry_symbols']] dataset['equivalent_atoms'] = np.array(dataset['equivalent_atoms'], dtype='intc') dataset['mapping_to_primitive'] = np.array(dataset['mapping_to_primitive'], dtype='intc') dataset['std_lattice'] = np.array(np.transpose(dataset['std_lattice']), dtype='double', order='C') dataset['std_types'] = np.array(dataset['std_types'], dtype='intc') dataset['std_positions'] = np.array(dataset['std_positions'], dtype='double', order='C') dataset['std_rotation_matrix'] = np.array(dataset['std_rotation_matrix'], dtype='double', order='C') dataset['std_mapping_to_primitive'] = np.array( dataset['std_mapping_to_primitive'], dtype='intc') dataset['pointgroup'] = dataset['pointgroup'].strip() _set_error_message() return dataset
Search symmetry dataset from an input cell. Args: cell, symprec, angle_tolerance: See the docstring of get_symmetry. hall_number: If a serial number of Hall symbol (>0) is given, the database corresponding to the Hall symbol is made. Return: A dictionary is returned. Dictionary keys: number (int): International space group number international (str): International symbol hall (str): Hall symbol choice (str): Centring, origin, basis vector setting transformation_matrix (3x3 float): Transformation matrix from input lattice to standardized lattice: L^original = L^standardized * Tmat origin shift (3 float): Origin shift from standardized to input origin rotations (3x3 int), translations (float vector): Rotation matrices and translation vectors. Space group operations are obtained by [(r,t) for r, t in zip(rotations, translations)] wyckoffs (n char): Wyckoff letters equivalent_atoms (n int): Symmetrically equivalent atoms mapping_to_primitive (n int): Original cell atom index mapping to primivie cell atom index Idealized standardized unit cell: std_lattice (3x3 float, row vectors), std_positions (Nx3 float), std_types (N int) std_rotation_matrix: Rigid rotation matrix to rotate from standardized basis vectors to idealized standardized basis vectors L^idealized = R * L^standardized std_mapping_to_primitive (m int): std_positions index mapping to those of primivie cell atoms pointgroup (str): Pointgroup symbol If it fails, None is returned.
Below is the the instruction that describes the task: ### Input: Search symmetry dataset from an input cell. Args: cell, symprec, angle_tolerance: See the docstring of get_symmetry. hall_number: If a serial number of Hall symbol (>0) is given, the database corresponding to the Hall symbol is made. Return: A dictionary is returned. Dictionary keys: number (int): International space group number international (str): International symbol hall (str): Hall symbol choice (str): Centring, origin, basis vector setting transformation_matrix (3x3 float): Transformation matrix from input lattice to standardized lattice: L^original = L^standardized * Tmat origin shift (3 float): Origin shift from standardized to input origin rotations (3x3 int), translations (float vector): Rotation matrices and translation vectors. Space group operations are obtained by [(r,t) for r, t in zip(rotations, translations)] wyckoffs (n char): Wyckoff letters equivalent_atoms (n int): Symmetrically equivalent atoms mapping_to_primitive (n int): Original cell atom index mapping to primivie cell atom index Idealized standardized unit cell: std_lattice (3x3 float, row vectors), std_positions (Nx3 float), std_types (N int) std_rotation_matrix: Rigid rotation matrix to rotate from standardized basis vectors to idealized standardized basis vectors L^idealized = R * L^standardized std_mapping_to_primitive (m int): std_positions index mapping to those of primivie cell atoms pointgroup (str): Pointgroup symbol If it fails, None is returned. ### Response: def get_symmetry_dataset(cell, symprec=1e-5, angle_tolerance=-1.0, hall_number=0): """Search symmetry dataset from an input cell. Args: cell, symprec, angle_tolerance: See the docstring of get_symmetry. hall_number: If a serial number of Hall symbol (>0) is given, the database corresponding to the Hall symbol is made. Return: A dictionary is returned. Dictionary keys: number (int): International space group number international (str): International symbol hall (str): Hall symbol choice (str): Centring, origin, basis vector setting transformation_matrix (3x3 float): Transformation matrix from input lattice to standardized lattice: L^original = L^standardized * Tmat origin shift (3 float): Origin shift from standardized to input origin rotations (3x3 int), translations (float vector): Rotation matrices and translation vectors. Space group operations are obtained by [(r,t) for r, t in zip(rotations, translations)] wyckoffs (n char): Wyckoff letters equivalent_atoms (n int): Symmetrically equivalent atoms mapping_to_primitive (n int): Original cell atom index mapping to primivie cell atom index Idealized standardized unit cell: std_lattice (3x3 float, row vectors), std_positions (Nx3 float), std_types (N int) std_rotation_matrix: Rigid rotation matrix to rotate from standardized basis vectors to idealized standardized basis vectors L^idealized = R * L^standardized std_mapping_to_primitive (m int): std_positions index mapping to those of primivie cell atoms pointgroup (str): Pointgroup symbol If it fails, None is returned. """ _set_no_error() lattice, positions, numbers, _ = _expand_cell(cell) if lattice is None: return None spg_ds = spg.dataset(lattice, positions, numbers, hall_number, symprec, angle_tolerance) if spg_ds is None: _set_error_message() return None keys = ('number', 'hall_number', 'international', 'hall', 'choice', 'transformation_matrix', 'origin_shift', 'rotations', 'translations', 'wyckoffs', 'site_symmetry_symbols', 'equivalent_atoms', 'mapping_to_primitive', 'std_lattice', 'std_types', 'std_positions', 'std_rotation_matrix', 'std_mapping_to_primitive', # 'pointgroup_number', 'pointgroup') dataset = {} for key, data in zip(keys, spg_ds): dataset[key] = data dataset['international'] = dataset['international'].strip() dataset['hall'] = dataset['hall'].strip() dataset['choice'] = dataset['choice'].strip() dataset['transformation_matrix'] = np.array( dataset['transformation_matrix'], dtype='double', order='C') dataset['origin_shift'] = np.array(dataset['origin_shift'], dtype='double') dataset['rotations'] = np.array(dataset['rotations'], dtype='intc', order='C') dataset['translations'] = np.array(dataset['translations'], dtype='double', order='C') letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" dataset['wyckoffs'] = [letters[x] for x in dataset['wyckoffs']] dataset['site_symmetry_symbols'] = [ s.strip() for s in dataset['site_symmetry_symbols']] dataset['equivalent_atoms'] = np.array(dataset['equivalent_atoms'], dtype='intc') dataset['mapping_to_primitive'] = np.array(dataset['mapping_to_primitive'], dtype='intc') dataset['std_lattice'] = np.array(np.transpose(dataset['std_lattice']), dtype='double', order='C') dataset['std_types'] = np.array(dataset['std_types'], dtype='intc') dataset['std_positions'] = np.array(dataset['std_positions'], dtype='double', order='C') dataset['std_rotation_matrix'] = np.array(dataset['std_rotation_matrix'], dtype='double', order='C') dataset['std_mapping_to_primitive'] = np.array( dataset['std_mapping_to_primitive'], dtype='intc') dataset['pointgroup'] = dataset['pointgroup'].strip() _set_error_message() return dataset
def get_build_report(self, project, build_id, type=None): """GetBuildReport. [Preview API] Gets a build report. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str type: :rtype: :class:`<BuildReportMetadata> <azure.devops.v5_0.build.models.BuildReportMetadata>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if type is not None: query_parameters['type'] = self._serialize.query('type', type, 'str') response = self._send(http_method='GET', location_id='45bcaa88-67e1-4042-a035-56d3b4a7d44c', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('BuildReportMetadata', response)
GetBuildReport. [Preview API] Gets a build report. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str type: :rtype: :class:`<BuildReportMetadata> <azure.devops.v5_0.build.models.BuildReportMetadata>`
Below is the the instruction that describes the task: ### Input: GetBuildReport. [Preview API] Gets a build report. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str type: :rtype: :class:`<BuildReportMetadata> <azure.devops.v5_0.build.models.BuildReportMetadata>` ### Response: def get_build_report(self, project, build_id, type=None): """GetBuildReport. [Preview API] Gets a build report. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str type: :rtype: :class:`<BuildReportMetadata> <azure.devops.v5_0.build.models.BuildReportMetadata>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if type is not None: query_parameters['type'] = self._serialize.query('type', type, 'str') response = self._send(http_method='GET', location_id='45bcaa88-67e1-4042-a035-56d3b4a7d44c', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('BuildReportMetadata', response)
def align_blocks(source_sentences, target_sentences, params = LanguageIndependent): """Creates the sentence alignment of two blocks of texts (usually paragraphs). @param source_sentences: The list of source sentence lengths. @param target_sentences: The list of target sentence lengths. @param params: the sentence alignment parameters. @return: The sentence alignments, a list of index pairs. """ alignment_types = list(params.PRIORS.keys()) # there are always three rows in the history (with the last of them being filled) # and the rows are always |target_text| + 2, so that we never have to do # boundary checks D = [(len(target_sentences) + 2) * [0] for x in range(2)] # for the first sentence, only substitution, insertion or deletion are # allowed, and they are all equally likely ( == 1) D.append([0, 1]) try: D[-2][1] = 1 D[-2][2] = 1 except: pass backlinks = {} for i in range(len(source_sentences)): for j in range(len(target_sentences)): m = [] for a in alignment_types: k = D[-(1 + a[0])][j + 2 - a[1]] if k > 0: p = k * \ align_probability(i, j, source_sentences, target_sentences, a, params) m.append((p, a)) if len(m) > 0: v = max(m) backlinks[(i, j)] = v[1] D[-1].append(v[0]) else: backlinks[(i, j)] = (1, 1) D[-1].append(0) D.pop(0) D.append([0, 0]) return trace(backlinks, source_sentences, target_sentences)
Creates the sentence alignment of two blocks of texts (usually paragraphs). @param source_sentences: The list of source sentence lengths. @param target_sentences: The list of target sentence lengths. @param params: the sentence alignment parameters. @return: The sentence alignments, a list of index pairs.
Below is the the instruction that describes the task: ### Input: Creates the sentence alignment of two blocks of texts (usually paragraphs). @param source_sentences: The list of source sentence lengths. @param target_sentences: The list of target sentence lengths. @param params: the sentence alignment parameters. @return: The sentence alignments, a list of index pairs. ### Response: def align_blocks(source_sentences, target_sentences, params = LanguageIndependent): """Creates the sentence alignment of two blocks of texts (usually paragraphs). @param source_sentences: The list of source sentence lengths. @param target_sentences: The list of target sentence lengths. @param params: the sentence alignment parameters. @return: The sentence alignments, a list of index pairs. """ alignment_types = list(params.PRIORS.keys()) # there are always three rows in the history (with the last of them being filled) # and the rows are always |target_text| + 2, so that we never have to do # boundary checks D = [(len(target_sentences) + 2) * [0] for x in range(2)] # for the first sentence, only substitution, insertion or deletion are # allowed, and they are all equally likely ( == 1) D.append([0, 1]) try: D[-2][1] = 1 D[-2][2] = 1 except: pass backlinks = {} for i in range(len(source_sentences)): for j in range(len(target_sentences)): m = [] for a in alignment_types: k = D[-(1 + a[0])][j + 2 - a[1]] if k > 0: p = k * \ align_probability(i, j, source_sentences, target_sentences, a, params) m.append((p, a)) if len(m) > 0: v = max(m) backlinks[(i, j)] = v[1] D[-1].append(v[0]) else: backlinks[(i, j)] = (1, 1) D[-1].append(0) D.pop(0) D.append([0, 0]) return trace(backlinks, source_sentences, target_sentences)
def add_distdiff_optgroup(parser): """ Option group relating to the use of a DistChange or DistReport """ # for the --processes default cpus = cpu_count() og = parser.add_argument_group("Distribution Checking Options") og.add_argument("--processes", type=int, default=cpus, help="Number of child processes to spawn to handle" " sub-reports. Set to 0 to disable multi-processing." " Defaults to the number of CPUs (%r)" % cpus) og.add_argument("--shallow", action="store_true", default=False, help="Check only that the files of this dist have" "changed, do not infer the meaning") og.add_argument("--ignore-filenames", action="append", default=[], help="file glob to ignore. Can be specified multiple" " times") og.add_argument("--ignore-trailing-whitespace", action="store_true", default=False, help="ignore trailing whitespace when comparing text" " files")
Option group relating to the use of a DistChange or DistReport
Below is the the instruction that describes the task: ### Input: Option group relating to the use of a DistChange or DistReport ### Response: def add_distdiff_optgroup(parser): """ Option group relating to the use of a DistChange or DistReport """ # for the --processes default cpus = cpu_count() og = parser.add_argument_group("Distribution Checking Options") og.add_argument("--processes", type=int, default=cpus, help="Number of child processes to spawn to handle" " sub-reports. Set to 0 to disable multi-processing." " Defaults to the number of CPUs (%r)" % cpus) og.add_argument("--shallow", action="store_true", default=False, help="Check only that the files of this dist have" "changed, do not infer the meaning") og.add_argument("--ignore-filenames", action="append", default=[], help="file glob to ignore. Can be specified multiple" " times") og.add_argument("--ignore-trailing-whitespace", action="store_true", default=False, help="ignore trailing whitespace when comparing text" " files")
def local( year, month, day, hour=0, minute=0, second=0, microsecond=0 ): # type: (int, int, int, int, int, int, int) -> DateTime """ Return a DateTime in the local timezone. """ return datetime( year, month, day, hour, minute, second, microsecond, tz=local_timezone() )
Return a DateTime in the local timezone.
Below is the the instruction that describes the task: ### Input: Return a DateTime in the local timezone. ### Response: def local( year, month, day, hour=0, minute=0, second=0, microsecond=0 ): # type: (int, int, int, int, int, int, int) -> DateTime """ Return a DateTime in the local timezone. """ return datetime( year, month, day, hour, minute, second, microsecond, tz=local_timezone() )
def parts(self, *args, **kwargs): """Retrieve parts belonging to this activity. Without any arguments it retrieves the Instances related to this task only. This call only returns the configured properties in an activity. So properties that are not configured are not in the returned parts. See :class:`pykechain.Client.parts` for additional available parameters. Example ------- >>> task = project.activity('Specify Wheel Diameter') >>> parts = task.parts() To retrieve the models only. >>> parts = task.parts(category=Category.MODEL) """ return self._client.parts(*args, activity=self.id, **kwargs)
Retrieve parts belonging to this activity. Without any arguments it retrieves the Instances related to this task only. This call only returns the configured properties in an activity. So properties that are not configured are not in the returned parts. See :class:`pykechain.Client.parts` for additional available parameters. Example ------- >>> task = project.activity('Specify Wheel Diameter') >>> parts = task.parts() To retrieve the models only. >>> parts = task.parts(category=Category.MODEL)
Below is the the instruction that describes the task: ### Input: Retrieve parts belonging to this activity. Without any arguments it retrieves the Instances related to this task only. This call only returns the configured properties in an activity. So properties that are not configured are not in the returned parts. See :class:`pykechain.Client.parts` for additional available parameters. Example ------- >>> task = project.activity('Specify Wheel Diameter') >>> parts = task.parts() To retrieve the models only. >>> parts = task.parts(category=Category.MODEL) ### Response: def parts(self, *args, **kwargs): """Retrieve parts belonging to this activity. Without any arguments it retrieves the Instances related to this task only. This call only returns the configured properties in an activity. So properties that are not configured are not in the returned parts. See :class:`pykechain.Client.parts` for additional available parameters. Example ------- >>> task = project.activity('Specify Wheel Diameter') >>> parts = task.parts() To retrieve the models only. >>> parts = task.parts(category=Category.MODEL) """ return self._client.parts(*args, activity=self.id, **kwargs)
def benchmark_eight_schools_hmc( num_results=int(5e3), num_burnin_steps=int(3e3), num_leapfrog_steps=3, step_size=0.4): """Runs HMC on the eight-schools unnormalized posterior.""" num_schools = 8 treatment_effects = tf.constant( [28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float32, name='treatment_effects') treatment_stddevs = tf.constant( [15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32, name='treatment_stddevs') def unnormalized_posterior_log_prob( avg_effect, avg_stddev, school_effects_standard): """Eight-schools unnormalized log posterior.""" return eight_schools_joint_log_prob( treatment_effects, treatment_stddevs, avg_effect, avg_stddev, school_effects_standard) if tf.executing_eagerly(): sample_chain = tf.function(tfp.mcmc.sample_chain) else: sample_chain = tfp.mcmc.sample_chain def computation(): """The benchmark computation.""" _, kernel_results = sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=( tf.zeros([], name='init_avg_effect'), tf.zeros([], name='init_avg_stddev'), tf.ones([num_schools], name='init_school_effects_standard'), ), kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=unnormalized_posterior_log_prob, step_size=step_size, num_leapfrog_steps=num_leapfrog_steps)) return kernel_results.is_accepted # Let's force evaluation of graph to ensure build time is not part of our time # trial. is_accepted_tensor = computation() if not tf.executing_eagerly(): session = tf.compat.v1.Session() session.run(is_accepted_tensor) start_time = time.time() if tf.executing_eagerly(): is_accepted = computation() else: is_accepted = session.run(is_accepted_tensor) wall_time = time.time() - start_time num_accepted = np.sum(is_accepted) acceptance_rate = np.float32(num_accepted) / np.float32(num_results) return dict( iters=(num_results + num_burnin_steps) * num_leapfrog_steps, extras={'acceptance_rate': acceptance_rate}, wall_time=wall_time)
Runs HMC on the eight-schools unnormalized posterior.
Below is the the instruction that describes the task: ### Input: Runs HMC on the eight-schools unnormalized posterior. ### Response: def benchmark_eight_schools_hmc( num_results=int(5e3), num_burnin_steps=int(3e3), num_leapfrog_steps=3, step_size=0.4): """Runs HMC on the eight-schools unnormalized posterior.""" num_schools = 8 treatment_effects = tf.constant( [28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float32, name='treatment_effects') treatment_stddevs = tf.constant( [15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32, name='treatment_stddevs') def unnormalized_posterior_log_prob( avg_effect, avg_stddev, school_effects_standard): """Eight-schools unnormalized log posterior.""" return eight_schools_joint_log_prob( treatment_effects, treatment_stddevs, avg_effect, avg_stddev, school_effects_standard) if tf.executing_eagerly(): sample_chain = tf.function(tfp.mcmc.sample_chain) else: sample_chain = tfp.mcmc.sample_chain def computation(): """The benchmark computation.""" _, kernel_results = sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=( tf.zeros([], name='init_avg_effect'), tf.zeros([], name='init_avg_stddev'), tf.ones([num_schools], name='init_school_effects_standard'), ), kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=unnormalized_posterior_log_prob, step_size=step_size, num_leapfrog_steps=num_leapfrog_steps)) return kernel_results.is_accepted # Let's force evaluation of graph to ensure build time is not part of our time # trial. is_accepted_tensor = computation() if not tf.executing_eagerly(): session = tf.compat.v1.Session() session.run(is_accepted_tensor) start_time = time.time() if tf.executing_eagerly(): is_accepted = computation() else: is_accepted = session.run(is_accepted_tensor) wall_time = time.time() - start_time num_accepted = np.sum(is_accepted) acceptance_rate = np.float32(num_accepted) / np.float32(num_results) return dict( iters=(num_results + num_burnin_steps) * num_leapfrog_steps, extras={'acceptance_rate': acceptance_rate}, wall_time=wall_time)
def z2h(text, ignore='', kana=True, ascii=False, digit=False): """Convert Full-width (Zenkaku) Katakana to Half-width (Hankaku) Katakana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.z2h('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.z2h('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.z2h('ABCD', ascii=True)) abcd >>> print(jaconv.z2h('1234', digit=True)) 1234 """ if ascii: if digit: if kana: z2h_map = Z2H_ALL else: z2h_map = Z2H_AD elif kana: z2h_map = Z2H_AK else: z2h_map = Z2H_A elif digit: if kana: z2h_map = Z2H_DK else: z2h_map = Z2H_D else: z2h_map = Z2H_K if ignore: z2h_map = _exclude_ignorechar(ignore, z2h_map.copy()) return _convert(text, z2h_map)
Convert Full-width (Zenkaku) Katakana to Half-width (Hankaku) Katakana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.z2h('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.z2h('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.z2h('ABCD', ascii=True)) abcd >>> print(jaconv.z2h('1234', digit=True)) 1234
Below is the the instruction that describes the task: ### Input: Convert Full-width (Zenkaku) Katakana to Half-width (Hankaku) Katakana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.z2h('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.z2h('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.z2h('ABCD', ascii=True)) abcd >>> print(jaconv.z2h('1234', digit=True)) 1234 ### Response: def z2h(text, ignore='', kana=True, ascii=False, digit=False): """Convert Full-width (Zenkaku) Katakana to Half-width (Hankaku) Katakana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.z2h('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.z2h('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.z2h('ABCD', ascii=True)) abcd >>> print(jaconv.z2h('1234', digit=True)) 1234 """ if ascii: if digit: if kana: z2h_map = Z2H_ALL else: z2h_map = Z2H_AD elif kana: z2h_map = Z2H_AK else: z2h_map = Z2H_A elif digit: if kana: z2h_map = Z2H_DK else: z2h_map = Z2H_D else: z2h_map = Z2H_K if ignore: z2h_map = _exclude_ignorechar(ignore, z2h_map.copy()) return _convert(text, z2h_map)
def probability_density(self, X): """Compute density function for given copula family.""" self.check_fit() U, V = self.split_matrix(X) if self.theta == 1: return np.multiply(U, V) else: a = np.power(np.multiply(U, V), -1) tmp = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta) b = np.power(tmp, -2 + 2.0 / self.theta) c = np.power(np.multiply(np.log(U), np.log(V)), self.theta - 1) d = 1 + (self.theta - 1) * np.power(tmp, -1.0 / self.theta) return self.cumulative_distribution(X) * a * b * c * d
Compute density function for given copula family.
Below is the the instruction that describes the task: ### Input: Compute density function for given copula family. ### Response: def probability_density(self, X): """Compute density function for given copula family.""" self.check_fit() U, V = self.split_matrix(X) if self.theta == 1: return np.multiply(U, V) else: a = np.power(np.multiply(U, V), -1) tmp = np.power(-np.log(U), self.theta) + np.power(-np.log(V), self.theta) b = np.power(tmp, -2 + 2.0 / self.theta) c = np.power(np.multiply(np.log(U), np.log(V)), self.theta - 1) d = 1 + (self.theta - 1) * np.power(tmp, -1.0 / self.theta) return self.cumulative_distribution(X) * a * b * c * d
def run (self): """Remove share directory on clean.""" if self.all: # remove share directory directory = os.path.join("build", "share") if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: log.warn("'%s' does not exist -- can't clean it", directory) clean.run(self)
Remove share directory on clean.
Below is the the instruction that describes the task: ### Input: Remove share directory on clean. ### Response: def run (self): """Remove share directory on clean.""" if self.all: # remove share directory directory = os.path.join("build", "share") if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: log.warn("'%s' does not exist -- can't clean it", directory) clean.run(self)
def pon_to_bed(pon_file, out_dir, data): """Extract BED intervals from a GATK4 hdf5 panel of normal file. """ out_file = os.path.join(out_dir, "%s-intervals.bed" % (utils.splitext_plus(os.path.basename(pon_file))[0])) if not utils.file_uptodate(out_file, pon_file): import h5py with file_transaction(data, out_file) as tx_out_file: with h5py.File(pon_file, "r") as f: with open(tx_out_file, "w") as out_handle: intervals = f["original_data"]["intervals"] for i in range(len(intervals["transposed_index_start_end"][0])): chrom = intervals["indexed_contig_names"][intervals["transposed_index_start_end"][0][i]] start = int(intervals["transposed_index_start_end"][1][i]) - 1 end = int(intervals["transposed_index_start_end"][2][i]) out_handle.write("%s\t%s\t%s\n" % (chrom, start, end)) return out_file
Extract BED intervals from a GATK4 hdf5 panel of normal file.
Below is the the instruction that describes the task: ### Input: Extract BED intervals from a GATK4 hdf5 panel of normal file. ### Response: def pon_to_bed(pon_file, out_dir, data): """Extract BED intervals from a GATK4 hdf5 panel of normal file. """ out_file = os.path.join(out_dir, "%s-intervals.bed" % (utils.splitext_plus(os.path.basename(pon_file))[0])) if not utils.file_uptodate(out_file, pon_file): import h5py with file_transaction(data, out_file) as tx_out_file: with h5py.File(pon_file, "r") as f: with open(tx_out_file, "w") as out_handle: intervals = f["original_data"]["intervals"] for i in range(len(intervals["transposed_index_start_end"][0])): chrom = intervals["indexed_contig_names"][intervals["transposed_index_start_end"][0][i]] start = int(intervals["transposed_index_start_end"][1][i]) - 1 end = int(intervals["transposed_index_start_end"][2][i]) out_handle.write("%s\t%s\t%s\n" % (chrom, start, end)) return out_file
def create(self, name, group_id, avatar_url=None, callback_url=None, dm_notification=None, **kwargs): """Create a new bot in a particular group. :param str name: bot name :param str group_id: the group_id of a group :param str avatar_url: the URL of an image to use as an avatar :param str callback_url: a POST-back URL for each new message :param bool dm_notification: whether to POST-back for direct messages? :return: the new bot :rtype: :class:`~groupy.api.bots.Bot` """ payload = { 'bot': { 'name': name, 'group_id': group_id, 'avatar_url': avatar_url, 'callback_url': callback_url, 'dm_notification': dm_notification, }, } payload['bot'].update(kwargs) response = self.session.post(self.url, json=payload) bot = response.data['bot'] return Bot(self, **bot)
Create a new bot in a particular group. :param str name: bot name :param str group_id: the group_id of a group :param str avatar_url: the URL of an image to use as an avatar :param str callback_url: a POST-back URL for each new message :param bool dm_notification: whether to POST-back for direct messages? :return: the new bot :rtype: :class:`~groupy.api.bots.Bot`
Below is the the instruction that describes the task: ### Input: Create a new bot in a particular group. :param str name: bot name :param str group_id: the group_id of a group :param str avatar_url: the URL of an image to use as an avatar :param str callback_url: a POST-back URL for each new message :param bool dm_notification: whether to POST-back for direct messages? :return: the new bot :rtype: :class:`~groupy.api.bots.Bot` ### Response: def create(self, name, group_id, avatar_url=None, callback_url=None, dm_notification=None, **kwargs): """Create a new bot in a particular group. :param str name: bot name :param str group_id: the group_id of a group :param str avatar_url: the URL of an image to use as an avatar :param str callback_url: a POST-back URL for each new message :param bool dm_notification: whether to POST-back for direct messages? :return: the new bot :rtype: :class:`~groupy.api.bots.Bot` """ payload = { 'bot': { 'name': name, 'group_id': group_id, 'avatar_url': avatar_url, 'callback_url': callback_url, 'dm_notification': dm_notification, }, } payload['bot'].update(kwargs) response = self.session.post(self.url, json=payload) bot = response.data['bot'] return Bot(self, **bot)
def ErrorMessage(text, **kwargs): """Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--text=%s' % text] for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) run_zenity('--error', *args).wait()
Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc.
Below is the the instruction that describes the task: ### Input: Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc. ### Response: def ErrorMessage(text, **kwargs): """Show an error message dialog to the user. This will raise a Zenity Error Dialog with a description of the error. text - A description of the error. kwargs - Optional command line parameters for Zenity such as height, width, etc.""" args = ['--text=%s' % text] for generic_args in kwargs_helper(kwargs): args.append('--%s=%s' % generic_args) run_zenity('--error', *args).wait()
def _to_str(uri: URIRef) -> str: """ Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference :param uri: URI to convert :return: tag name """ local_name = str(uri).replace(str(FHIR), '') return local_name.rsplit('.', 1)[1] if '.' in local_name else local_name
Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference :param uri: URI to convert :return: tag name
Below is the the instruction that describes the task: ### Input: Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference :param uri: URI to convert :return: tag name ### Response: def _to_str(uri: URIRef) -> str: """ Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference :param uri: URI to convert :return: tag name """ local_name = str(uri).replace(str(FHIR), '') return local_name.rsplit('.', 1)[1] if '.' in local_name else local_name
def _createAbsMagEstimationDict(): """ loads magnitude_estimation.dat which is from http://xoomer.virgilio.it/hrtrace/Sk.htm on 24/01/2014 and based on Schmid-Kaler (1982) creates a dict in the form [Classletter][ClassNumber][List of values for each L Class] """ magnitude_estimation_filepath = resource_filename( __name__, 'data/magnitude_estimation.dat') raw_table = np.loadtxt(magnitude_estimation_filepath, '|S5') absMagDict = { 'O': {}, 'B': {}, 'A': {}, 'F': {}, 'G': {}, 'K': {}, 'M': {}} for row in raw_table: if sys.hexversion >= 0x03000000: # otherwise we get byte ints or b' caused by 2to3 starClass = row[0].decode("utf-8") absMagDict[starClass[0]][int(starClass[1])] = [ float(x) for x in row[1:]] else: # dict of spectral type = {abs mag for each luminosity class} absMagDict[row[0][0]][int(row[0][1])] = [float(x) for x in row[1:]] # manually typed from table headers - used to match columns with the L # class (header) LClassRef = { 'V': 0, 'IV': 1, 'III': 2, 'II': 3, 'Ib': 4, 'Iab': 5, 'Ia': 6, 'Ia0': 7} return absMagDict, LClassRef
loads magnitude_estimation.dat which is from http://xoomer.virgilio.it/hrtrace/Sk.htm on 24/01/2014 and based on Schmid-Kaler (1982) creates a dict in the form [Classletter][ClassNumber][List of values for each L Class]
Below is the the instruction that describes the task: ### Input: loads magnitude_estimation.dat which is from http://xoomer.virgilio.it/hrtrace/Sk.htm on 24/01/2014 and based on Schmid-Kaler (1982) creates a dict in the form [Classletter][ClassNumber][List of values for each L Class] ### Response: def _createAbsMagEstimationDict(): """ loads magnitude_estimation.dat which is from http://xoomer.virgilio.it/hrtrace/Sk.htm on 24/01/2014 and based on Schmid-Kaler (1982) creates a dict in the form [Classletter][ClassNumber][List of values for each L Class] """ magnitude_estimation_filepath = resource_filename( __name__, 'data/magnitude_estimation.dat') raw_table = np.loadtxt(magnitude_estimation_filepath, '|S5') absMagDict = { 'O': {}, 'B': {}, 'A': {}, 'F': {}, 'G': {}, 'K': {}, 'M': {}} for row in raw_table: if sys.hexversion >= 0x03000000: # otherwise we get byte ints or b' caused by 2to3 starClass = row[0].decode("utf-8") absMagDict[starClass[0]][int(starClass[1])] = [ float(x) for x in row[1:]] else: # dict of spectral type = {abs mag for each luminosity class} absMagDict[row[0][0]][int(row[0][1])] = [float(x) for x in row[1:]] # manually typed from table headers - used to match columns with the L # class (header) LClassRef = { 'V': 0, 'IV': 1, 'III': 2, 'II': 3, 'Ib': 4, 'Iab': 5, 'Ia': 6, 'Ia0': 7} return absMagDict, LClassRef
def rollforward(self, date): """Roll date forward to nearest start of quarter""" if self.onOffset(date): return date else: return date + QuarterBegin(month=self.month)
Roll date forward to nearest start of quarter
Below is the the instruction that describes the task: ### Input: Roll date forward to nearest start of quarter ### Response: def rollforward(self, date): """Roll date forward to nearest start of quarter""" if self.onOffset(date): return date else: return date + QuarterBegin(month=self.month)
def timestring_to_datetime(timestring): """ Convert an ISO formated date and time string to a datetime object. :param str timestring: String with date and time in ISO format. :rtype: datetime :return: datetime object """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UnicodeWarning) result = dateutil_parser(timestring) return result
Convert an ISO formated date and time string to a datetime object. :param str timestring: String with date and time in ISO format. :rtype: datetime :return: datetime object
Below is the the instruction that describes the task: ### Input: Convert an ISO formated date and time string to a datetime object. :param str timestring: String with date and time in ISO format. :rtype: datetime :return: datetime object ### Response: def timestring_to_datetime(timestring): """ Convert an ISO formated date and time string to a datetime object. :param str timestring: String with date and time in ISO format. :rtype: datetime :return: datetime object """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UnicodeWarning) result = dateutil_parser(timestring) return result
def _process_raw_report(self, raw_report): "Default raw input report data handler" if not self.is_opened(): return if not self.__evt_handlers and not self.__raw_handler: return if not raw_report[0] and \ (raw_report[0] not in self.__input_report_templates): # windows sends an empty array when disconnecting # but, this might have a collision with report_id = 0 if not hid_device_path_exists(self.device_path): #windows XP sends empty report when disconnecting self.__reading_thread.abort() #device disconnected return if self.__raw_handler: #this might slow down data throughput, but at the expense of safety self.__raw_handler(helpers.ReadOnlyList(raw_report)) return # using pre-parsed report templates, by report id report_template = self.__input_report_templates[raw_report[0]] # old condition snapshot old_values = report_template.get_usages() # parse incoming data report_template.set_raw_data(raw_report) # and compare it event_applies = self.evt_decision evt_handlers = self.__evt_handlers for key in report_template.keys(): if key not in evt_handlers: continue #check if event handler exist! for event_kind, handlers in evt_handlers[key].items(): #key=event_kind, values=handler set new_value = report_template[key].value if not event_applies[event_kind](old_values[key], new_value): continue #decision applies, call handlers for function_handler in handlers: #check if the application wants some particular parameter if handlers[function_handler]: function_handler(new_value, event_kind, handlers[function_handler]) else: function_handler(new_value, event_kind)
Default raw input report data handler
Below is the the instruction that describes the task: ### Input: Default raw input report data handler ### Response: def _process_raw_report(self, raw_report): "Default raw input report data handler" if not self.is_opened(): return if not self.__evt_handlers and not self.__raw_handler: return if not raw_report[0] and \ (raw_report[0] not in self.__input_report_templates): # windows sends an empty array when disconnecting # but, this might have a collision with report_id = 0 if not hid_device_path_exists(self.device_path): #windows XP sends empty report when disconnecting self.__reading_thread.abort() #device disconnected return if self.__raw_handler: #this might slow down data throughput, but at the expense of safety self.__raw_handler(helpers.ReadOnlyList(raw_report)) return # using pre-parsed report templates, by report id report_template = self.__input_report_templates[raw_report[0]] # old condition snapshot old_values = report_template.get_usages() # parse incoming data report_template.set_raw_data(raw_report) # and compare it event_applies = self.evt_decision evt_handlers = self.__evt_handlers for key in report_template.keys(): if key not in evt_handlers: continue #check if event handler exist! for event_kind, handlers in evt_handlers[key].items(): #key=event_kind, values=handler set new_value = report_template[key].value if not event_applies[event_kind](old_values[key], new_value): continue #decision applies, call handlers for function_handler in handlers: #check if the application wants some particular parameter if handlers[function_handler]: function_handler(new_value, event_kind, handlers[function_handler]) else: function_handler(new_value, event_kind)
def _extract_one_pair(body): """ Extract one language-text pair from a :class:`~.LanguageMap`. This is used for tracking. """ if not body: return None, None try: return None, body[None] except KeyError: return min(body.items(), key=lambda x: x[0])
Extract one language-text pair from a :class:`~.LanguageMap`. This is used for tracking.
Below is the the instruction that describes the task: ### Input: Extract one language-text pair from a :class:`~.LanguageMap`. This is used for tracking. ### Response: def _extract_one_pair(body): """ Extract one language-text pair from a :class:`~.LanguageMap`. This is used for tracking. """ if not body: return None, None try: return None, body[None] except KeyError: return min(body.items(), key=lambda x: x[0])
def extend_src_text(self, content, context, text_list, category): """Extend the source text list with the gathered text data.""" prefix = self.prefix + '-' if self.prefix else '' for comment, line, encoding in text_list: content.append( filters.SourceText( textwrap.dedent(comment), "%s (%d)" % (context, line), encoding, prefix + category ) )
Extend the source text list with the gathered text data.
Below is the the instruction that describes the task: ### Input: Extend the source text list with the gathered text data. ### Response: def extend_src_text(self, content, context, text_list, category): """Extend the source text list with the gathered text data.""" prefix = self.prefix + '-' if self.prefix else '' for comment, line, encoding in text_list: content.append( filters.SourceText( textwrap.dedent(comment), "%s (%d)" % (context, line), encoding, prefix + category ) )
def create_generic(self, content_object=None, **kwargs): """Create a generic object. :param content_object: the content object to create a new object for. """ if content_object: kwargs['content_type'] = ContentType.objects.get_for_model( content_object ) kwargs['object_id'] = content_object.id return self.create(**kwargs)
Create a generic object. :param content_object: the content object to create a new object for.
Below is the the instruction that describes the task: ### Input: Create a generic object. :param content_object: the content object to create a new object for. ### Response: def create_generic(self, content_object=None, **kwargs): """Create a generic object. :param content_object: the content object to create a new object for. """ if content_object: kwargs['content_type'] = ContentType.objects.get_for_model( content_object ) kwargs['object_id'] = content_object.id return self.create(**kwargs)
def _basis2name(basis): """ converts the 'basis' into the proper name. """ component_name = ( 'DC' if basis == 'diffmap' else 'tSNE' if basis == 'tsne' else 'UMAP' if basis == 'umap' else 'PC' if basis == 'pca' else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis else basis) return component_name
converts the 'basis' into the proper name.
Below is the the instruction that describes the task: ### Input: converts the 'basis' into the proper name. ### Response: def _basis2name(basis): """ converts the 'basis' into the proper name. """ component_name = ( 'DC' if basis == 'diffmap' else 'tSNE' if basis == 'tsne' else 'UMAP' if basis == 'umap' else 'PC' if basis == 'pca' else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis else basis) return component_name
def DBundle_for_Ntubes_HEDH(N, Do, pitch, angle=30): r'''A rough equation presented in the HEDH for estimating the tube bundle diameter necessary to fit a given number of tubes. No accuracy estimation given. Only 1 pass is supported. .. math:: D_{bundle} = (D_o + (\text{pitch})\sqrt{\frac{1}{0.78}}\cdot \sqrt{C_1\cdot N}) C1 = 0.866 for 30° and 60° layouts, and 1 for 45 and 90° layouts. Parameters ---------- N : float Number of tubes, [-] Do : float Tube outer diameter, [m] pitch : float Pitch; distance between two orthogonal tube centers, [m] angle : float The angle the tubes are positioned; 30, 45, 60 or 90, [degrees] Returns ------- DBundle : float Outer diameter of tube bundle, [m] Notes ----- Easily reversed from the main formulation. Examples -------- >>> DBundle_for_Ntubes_HEDH(N=928, Do=.028, pitch=.036, angle=30) 1.1839930795640605 References ---------- .. [1] Schlunder, Ernst U, and International Center for Heat and Mass Transfer. Heat Exchanger Design Handbook. Washington: Hemisphere Pub. Corp., 1983. ''' if angle == 30 or angle == 60: C1 = 13/15. elif angle == 45 or angle == 90: C1 = 1. else: raise Exception('Only 30, 60, 45 and 90 degree layouts are supported') return (Do + (1./.78)**0.5*pitch*(C1*N)**0.5)
r'''A rough equation presented in the HEDH for estimating the tube bundle diameter necessary to fit a given number of tubes. No accuracy estimation given. Only 1 pass is supported. .. math:: D_{bundle} = (D_o + (\text{pitch})\sqrt{\frac{1}{0.78}}\cdot \sqrt{C_1\cdot N}) C1 = 0.866 for 30° and 60° layouts, and 1 for 45 and 90° layouts. Parameters ---------- N : float Number of tubes, [-] Do : float Tube outer diameter, [m] pitch : float Pitch; distance between two orthogonal tube centers, [m] angle : float The angle the tubes are positioned; 30, 45, 60 or 90, [degrees] Returns ------- DBundle : float Outer diameter of tube bundle, [m] Notes ----- Easily reversed from the main formulation. Examples -------- >>> DBundle_for_Ntubes_HEDH(N=928, Do=.028, pitch=.036, angle=30) 1.1839930795640605 References ---------- .. [1] Schlunder, Ernst U, and International Center for Heat and Mass Transfer. Heat Exchanger Design Handbook. Washington: Hemisphere Pub. Corp., 1983.
Below is the the instruction that describes the task: ### Input: r'''A rough equation presented in the HEDH for estimating the tube bundle diameter necessary to fit a given number of tubes. No accuracy estimation given. Only 1 pass is supported. .. math:: D_{bundle} = (D_o + (\text{pitch})\sqrt{\frac{1}{0.78}}\cdot \sqrt{C_1\cdot N}) C1 = 0.866 for 30° and 60° layouts, and 1 for 45 and 90° layouts. Parameters ---------- N : float Number of tubes, [-] Do : float Tube outer diameter, [m] pitch : float Pitch; distance between two orthogonal tube centers, [m] angle : float The angle the tubes are positioned; 30, 45, 60 or 90, [degrees] Returns ------- DBundle : float Outer diameter of tube bundle, [m] Notes ----- Easily reversed from the main formulation. Examples -------- >>> DBundle_for_Ntubes_HEDH(N=928, Do=.028, pitch=.036, angle=30) 1.1839930795640605 References ---------- .. [1] Schlunder, Ernst U, and International Center for Heat and Mass Transfer. Heat Exchanger Design Handbook. Washington: Hemisphere Pub. Corp., 1983. ### Response: def DBundle_for_Ntubes_HEDH(N, Do, pitch, angle=30): r'''A rough equation presented in the HEDH for estimating the tube bundle diameter necessary to fit a given number of tubes. No accuracy estimation given. Only 1 pass is supported. .. math:: D_{bundle} = (D_o + (\text{pitch})\sqrt{\frac{1}{0.78}}\cdot \sqrt{C_1\cdot N}) C1 = 0.866 for 30° and 60° layouts, and 1 for 45 and 90° layouts. Parameters ---------- N : float Number of tubes, [-] Do : float Tube outer diameter, [m] pitch : float Pitch; distance between two orthogonal tube centers, [m] angle : float The angle the tubes are positioned; 30, 45, 60 or 90, [degrees] Returns ------- DBundle : float Outer diameter of tube bundle, [m] Notes ----- Easily reversed from the main formulation. Examples -------- >>> DBundle_for_Ntubes_HEDH(N=928, Do=.028, pitch=.036, angle=30) 1.1839930795640605 References ---------- .. [1] Schlunder, Ernst U, and International Center for Heat and Mass Transfer. Heat Exchanger Design Handbook. Washington: Hemisphere Pub. Corp., 1983. ''' if angle == 30 or angle == 60: C1 = 13/15. elif angle == 45 or angle == 90: C1 = 1. else: raise Exception('Only 30, 60, 45 and 90 degree layouts are supported') return (Do + (1./.78)**0.5*pitch*(C1*N)**0.5)
def solar_azimuth(self, dateandtime=None): """Calculates the solar azimuth angle for a specific date/time. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :returns: The azimuth angle in degrees clockwise from North. :rtype: float """ if self.astral is None: self.astral = Astral() if dateandtime is None: dateandtime = datetime.datetime.now(self.tz) elif not dateandtime.tzinfo: dateandtime = self.tz.localize(dateandtime) dateandtime = dateandtime.astimezone(pytz.UTC) return self.astral.solar_azimuth(dateandtime, self.latitude, self.longitude)
Calculates the solar azimuth angle for a specific date/time. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :returns: The azimuth angle in degrees clockwise from North. :rtype: float
Below is the the instruction that describes the task: ### Input: Calculates the solar azimuth angle for a specific date/time. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :returns: The azimuth angle in degrees clockwise from North. :rtype: float ### Response: def solar_azimuth(self, dateandtime=None): """Calculates the solar azimuth angle for a specific date/time. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :returns: The azimuth angle in degrees clockwise from North. :rtype: float """ if self.astral is None: self.astral = Astral() if dateandtime is None: dateandtime = datetime.datetime.now(self.tz) elif not dateandtime.tzinfo: dateandtime = self.tz.localize(dateandtime) dateandtime = dateandtime.astimezone(pytz.UTC) return self.astral.solar_azimuth(dateandtime, self.latitude, self.longitude)
def get_git_status(self): """ Gets git and init versions and commits since the init version """ ## get git branch self._get_git_branch() ## get tag in the init file self._get_init_release_tag() ## get log commits since <tag> try: self._get_log_commits() except Exception as inst: raise Exception( """ Error: the version in __init__.py is {}, so 'git log' is looking for commits that have happened since that version, but it appears there is not existing tag for that version. You may need to roll back the version in __init__.py to what is actually commited. Check with `git tag`. -------- {} """.format(self.init_version, inst)) ## where are we at? print("__init__.__version__ == '{}':".format(self.init_version)) print("'{}' is {} commits ahead of origin/{}" .format(self.tag, len(self.commits), self.init_version))
Gets git and init versions and commits since the init version
Below is the the instruction that describes the task: ### Input: Gets git and init versions and commits since the init version ### Response: def get_git_status(self): """ Gets git and init versions and commits since the init version """ ## get git branch self._get_git_branch() ## get tag in the init file self._get_init_release_tag() ## get log commits since <tag> try: self._get_log_commits() except Exception as inst: raise Exception( """ Error: the version in __init__.py is {}, so 'git log' is looking for commits that have happened since that version, but it appears there is not existing tag for that version. You may need to roll back the version in __init__.py to what is actually commited. Check with `git tag`. -------- {} """.format(self.init_version, inst)) ## where are we at? print("__init__.__version__ == '{}':".format(self.init_version)) print("'{}' is {} commits ahead of origin/{}" .format(self.tag, len(self.commits), self.init_version))
def aschannel(self) -> 'Channel': """Converts a Gate into a Channel""" N = self.qubit_nb R = 4 tensor = bk.outer(self.tensor, self.H.tensor) tensor = bk.reshape(tensor, [2**N]*R) tensor = bk.transpose(tensor, [0, 3, 1, 2]) return Channel(tensor, self.qubits)
Converts a Gate into a Channel
Below is the the instruction that describes the task: ### Input: Converts a Gate into a Channel ### Response: def aschannel(self) -> 'Channel': """Converts a Gate into a Channel""" N = self.qubit_nb R = 4 tensor = bk.outer(self.tensor, self.H.tensor) tensor = bk.reshape(tensor, [2**N]*R) tensor = bk.transpose(tensor, [0, 3, 1, 2]) return Channel(tensor, self.qubits)
def to_generics(instruments, weights): """ Map tradeable instruments to generics given weights and tradeable instrument holdings. This is solving the equation Ax = b where A is the weights, and b is the instrument holdings. When Ax = b has no solution we solve for x' such that Ax' is closest to b in the least squares sense with the additional constraint that sum(x') = sum(instruments). Scenarios with exact solutions and non exact solutions are depicted below +------------+-----+-----+ Instruments | contract | CL1 | CL2 | ------------------------------------ |------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3 | CLX16 | 0.5 | 0 | 10 | 10 | 10 | CLZ16 | 0.5 | 0.5 | 20 | 20 | 25 | CLF17 | 0 | 0.5 | 10 | 11 | 11 +------------+-----+-----+ In scenario 1 the solution is given by x = [20, 20], in scenario 2 the solution is given by x = [19.5, 21.5], and in scenario 3 the solution is given by x = [22, 24]. NOTE: Integer solutions are not guruanteed, as demonstrated above. This is intended for use with contract numbers but can also be used with notional amounts of contracts. Parameters ---------- instruments: pandas.Series Series of tradeable instrument holdings where the index is the name of the tradeable instrument and the value is the number of that instrument held. weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instruments and the index is strings representing instrument names. If dict is given keys should be root generic, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index Returns ------- A pandas.Series where the index is the generic and the value is the number of contracts, sorted by index. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=["CL1", "CL2"]) >>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"]) >>> generics = mappings.to_generics(instrs, wts) """ if not isinstance(weights, dict): weights = {"": weights} allocations = [] unmapped_instr = instruments.index for key in weights: w = weights[key] # may not always have instrument holdings for a set of weights so allow # weights to be a superset of instruments, drop values where no # holdings winstrs = instruments.reindex(w.index).dropna() w = w.loc[winstrs.index] # drop generics where all weights for instruments on the genric are 0. # This avoids numerical rounding issues where solution has epsilon # weight on a generic w = w.loc[:, ~(w == 0).all(axis=0)] unmapped_instr = unmapped_instr.difference(winstrs.index) A = w.values b = winstrs.values x = cvxpy.Variable(A.shape[1]) constrs = [CVX_SUM(x) == np.sum(b)] obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b)) prob = cvxpy.Problem(obj, constrs) prob.solve() vals = np.array(x.value).squeeze() idx = w.columns.tolist() allocations.append(pd.Series(vals, index=idx)) if len(unmapped_instr) > 0: raise KeyError("Unmapped instruments %s. weights must be a superset of" " instruments" % unmapped_instr.tolist()) allocations = pd.concat(allocations, axis=0) allocations = allocations.sort_index() return allocations
Map tradeable instruments to generics given weights and tradeable instrument holdings. This is solving the equation Ax = b where A is the weights, and b is the instrument holdings. When Ax = b has no solution we solve for x' such that Ax' is closest to b in the least squares sense with the additional constraint that sum(x') = sum(instruments). Scenarios with exact solutions and non exact solutions are depicted below +------------+-----+-----+ Instruments | contract | CL1 | CL2 | ------------------------------------ |------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3 | CLX16 | 0.5 | 0 | 10 | 10 | 10 | CLZ16 | 0.5 | 0.5 | 20 | 20 | 25 | CLF17 | 0 | 0.5 | 10 | 11 | 11 +------------+-----+-----+ In scenario 1 the solution is given by x = [20, 20], in scenario 2 the solution is given by x = [19.5, 21.5], and in scenario 3 the solution is given by x = [22, 24]. NOTE: Integer solutions are not guruanteed, as demonstrated above. This is intended for use with contract numbers but can also be used with notional amounts of contracts. Parameters ---------- instruments: pandas.Series Series of tradeable instrument holdings where the index is the name of the tradeable instrument and the value is the number of that instrument held. weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instruments and the index is strings representing instrument names. If dict is given keys should be root generic, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index Returns ------- A pandas.Series where the index is the generic and the value is the number of contracts, sorted by index. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=["CL1", "CL2"]) >>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"]) >>> generics = mappings.to_generics(instrs, wts)
Below is the the instruction that describes the task: ### Input: Map tradeable instruments to generics given weights and tradeable instrument holdings. This is solving the equation Ax = b where A is the weights, and b is the instrument holdings. When Ax = b has no solution we solve for x' such that Ax' is closest to b in the least squares sense with the additional constraint that sum(x') = sum(instruments). Scenarios with exact solutions and non exact solutions are depicted below +------------+-----+-----+ Instruments | contract | CL1 | CL2 | ------------------------------------ |------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3 | CLX16 | 0.5 | 0 | 10 | 10 | 10 | CLZ16 | 0.5 | 0.5 | 20 | 20 | 25 | CLF17 | 0 | 0.5 | 10 | 11 | 11 +------------+-----+-----+ In scenario 1 the solution is given by x = [20, 20], in scenario 2 the solution is given by x = [19.5, 21.5], and in scenario 3 the solution is given by x = [22, 24]. NOTE: Integer solutions are not guruanteed, as demonstrated above. This is intended for use with contract numbers but can also be used with notional amounts of contracts. Parameters ---------- instruments: pandas.Series Series of tradeable instrument holdings where the index is the name of the tradeable instrument and the value is the number of that instrument held. weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instruments and the index is strings representing instrument names. If dict is given keys should be root generic, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index Returns ------- A pandas.Series where the index is the generic and the value is the number of contracts, sorted by index. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=["CL1", "CL2"]) >>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"]) >>> generics = mappings.to_generics(instrs, wts) ### Response: def to_generics(instruments, weights): """ Map tradeable instruments to generics given weights and tradeable instrument holdings. This is solving the equation Ax = b where A is the weights, and b is the instrument holdings. When Ax = b has no solution we solve for x' such that Ax' is closest to b in the least squares sense with the additional constraint that sum(x') = sum(instruments). Scenarios with exact solutions and non exact solutions are depicted below +------------+-----+-----+ Instruments | contract | CL1 | CL2 | ------------------------------------ |------------+-----+-----| Scenario 1 | Scenario 2 | Scenario 3 | CLX16 | 0.5 | 0 | 10 | 10 | 10 | CLZ16 | 0.5 | 0.5 | 20 | 20 | 25 | CLF17 | 0 | 0.5 | 10 | 11 | 11 +------------+-----+-----+ In scenario 1 the solution is given by x = [20, 20], in scenario 2 the solution is given by x = [19.5, 21.5], and in scenario 3 the solution is given by x = [22, 24]. NOTE: Integer solutions are not guruanteed, as demonstrated above. This is intended for use with contract numbers but can also be used with notional amounts of contracts. Parameters ---------- instruments: pandas.Series Series of tradeable instrument holdings where the index is the name of the tradeable instrument and the value is the number of that instrument held. weights: pandas.DataFrame or dict A pandas.DataFrame of loadings of generic contracts on tradeable instruments for a given date. The columns are generic instruments and the index is strings representing instrument names. If dict is given keys should be root generic, e.g. 'CL', and values should be pandas.DataFrames of loadings. The union of all indexes should be a superset of the instruments.index Returns ------- A pandas.Series where the index is the generic and the value is the number of contracts, sorted by index. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]], ... index=["CLX16", "CLZ16", "CLF17"], ... columns=["CL1", "CL2"]) >>> instrs = pd.Series([10, 20, 10], index=["CLX16", "CLZ16", "CLF17"]) >>> generics = mappings.to_generics(instrs, wts) """ if not isinstance(weights, dict): weights = {"": weights} allocations = [] unmapped_instr = instruments.index for key in weights: w = weights[key] # may not always have instrument holdings for a set of weights so allow # weights to be a superset of instruments, drop values where no # holdings winstrs = instruments.reindex(w.index).dropna() w = w.loc[winstrs.index] # drop generics where all weights for instruments on the genric are 0. # This avoids numerical rounding issues where solution has epsilon # weight on a generic w = w.loc[:, ~(w == 0).all(axis=0)] unmapped_instr = unmapped_instr.difference(winstrs.index) A = w.values b = winstrs.values x = cvxpy.Variable(A.shape[1]) constrs = [CVX_SUM(x) == np.sum(b)] obj = cvxpy.Minimize(cvxpy.sum_squares(A * x - b)) prob = cvxpy.Problem(obj, constrs) prob.solve() vals = np.array(x.value).squeeze() idx = w.columns.tolist() allocations.append(pd.Series(vals, index=idx)) if len(unmapped_instr) > 0: raise KeyError("Unmapped instruments %s. weights must be a superset of" " instruments" % unmapped_instr.tolist()) allocations = pd.concat(allocations, axis=0) allocations = allocations.sort_index() return allocations
def write_file(self): '''save config to local file''' if self.config: try: with open(self.config_file, 'w') as file: json.dump(self.config, file) except IOError as error: print('Error:', error) return
save config to local file
Below is the the instruction that describes the task: ### Input: save config to local file ### Response: def write_file(self): '''save config to local file''' if self.config: try: with open(self.config_file, 'w') as file: json.dump(self.config, file) except IOError as error: print('Error:', error) return
def parse_args(): """Parse the command line arguments.""" parser = argparse.ArgumentParser( description='Check kafka current status', ) parser.add_argument( "--cluster-type", "-t", dest='cluster_type', required=True, help='Type of cluster', default=None, ) parser.add_argument( "--cluster-name", "-c", dest='cluster_name', help='Name of the cluster', ) parser.add_argument( '--discovery-base-path', dest='discovery_base_path', type=str, help='Path of the directory containing the <cluster_type>.yaml config', ) parser.add_argument( "--broker-id", help='The broker id where the check is running. Set to -1 if you use automatic ' 'broker ids, and it will read the id from data-path instead. This parameter is ' 'required only in case controller-only or first-broker-only are used.', type=convert_to_broker_id, ) parser.add_argument( "--data-path", help='Path to the Kafka data folder.', ) parser.add_argument( '--controller-only', action="store_true", help='If this parameter is specified, it will do nothing and succeed on ' 'non-controller brokers. Default: %(default)s', ) parser.add_argument( '--first-broker-only', action='store_true', help='If specified, the command will only perform the check if ' 'broker_id is the lowest broker id in the cluster. If it is not the lowest, ' 'it will not perform any check and succeed immediately. ' 'Default: %(default)s', ) parser.add_argument( '-v', '--verbose', help='print verbose execution information. Default: %(default)s', action="store_true", default=False, ) parser.add_argument( '-j', '--json', help='Print output in json format. Default: %(default)s', action="store_true", default=False, ) subparsers = parser.add_subparsers() MinIsrCmd().add_subparser(subparsers) ReplicaUnavailabilityCmd().add_subparser(subparsers) ReplicationFactorCmd().add_subparser(subparsers) OfflineCmd().add_subparser(subparsers) return parser.parse_args()
Parse the command line arguments.
Below is the the instruction that describes the task: ### Input: Parse the command line arguments. ### Response: def parse_args(): """Parse the command line arguments.""" parser = argparse.ArgumentParser( description='Check kafka current status', ) parser.add_argument( "--cluster-type", "-t", dest='cluster_type', required=True, help='Type of cluster', default=None, ) parser.add_argument( "--cluster-name", "-c", dest='cluster_name', help='Name of the cluster', ) parser.add_argument( '--discovery-base-path', dest='discovery_base_path', type=str, help='Path of the directory containing the <cluster_type>.yaml config', ) parser.add_argument( "--broker-id", help='The broker id where the check is running. Set to -1 if you use automatic ' 'broker ids, and it will read the id from data-path instead. This parameter is ' 'required only in case controller-only or first-broker-only are used.', type=convert_to_broker_id, ) parser.add_argument( "--data-path", help='Path to the Kafka data folder.', ) parser.add_argument( '--controller-only', action="store_true", help='If this parameter is specified, it will do nothing and succeed on ' 'non-controller brokers. Default: %(default)s', ) parser.add_argument( '--first-broker-only', action='store_true', help='If specified, the command will only perform the check if ' 'broker_id is the lowest broker id in the cluster. If it is not the lowest, ' 'it will not perform any check and succeed immediately. ' 'Default: %(default)s', ) parser.add_argument( '-v', '--verbose', help='print verbose execution information. Default: %(default)s', action="store_true", default=False, ) parser.add_argument( '-j', '--json', help='Print output in json format. Default: %(default)s', action="store_true", default=False, ) subparsers = parser.add_subparsers() MinIsrCmd().add_subparser(subparsers) ReplicaUnavailabilityCmd().add_subparser(subparsers) ReplicationFactorCmd().add_subparser(subparsers) OfflineCmd().add_subparser(subparsers) return parser.parse_args()
def send_discuss_msg_async(self, *, discuss_id, message, auto_escape=False): """ 发送讨论组消息 (异步版本) ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None """ return super().__getattr__('send_discuss_msg_async') \ (discuss_id=discuss_id, message=message, auto_escape=auto_escape)
发送讨论组消息 (异步版本) ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None
Below is the the instruction that describes the task: ### Input: 发送讨论组消息 (异步版本) ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None ### Response: def send_discuss_msg_async(self, *, discuss_id, message, auto_escape=False): """ 发送讨论组消息 (异步版本) ------------ :param int discuss_id: 讨论组 ID(正常情况下看不到,需要从讨论组消息上报的数据中获得) :param str | list[ dict[ str, unknown ] ] message: 要发送的内容 :param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效 :return: None :rtype: None """ return super().__getattr__('send_discuss_msg_async') \ (discuss_id=discuss_id, message=message, auto_escape=auto_escape)
def piece_at(self, square): '''Gets the piece at the given square.''' mask = BB_SQUARES[square] color = int(bool(self.occupied[WHITE] & mask)) piece_type = self.piece_type_at(square) if piece_type: return Piece(piece_type, color)
Gets the piece at the given square.
Below is the the instruction that describes the task: ### Input: Gets the piece at the given square. ### Response: def piece_at(self, square): '''Gets the piece at the given square.''' mask = BB_SQUARES[square] color = int(bool(self.occupied[WHITE] & mask)) piece_type = self.piece_type_at(square) if piece_type: return Piece(piece_type, color)
def fetch_file(self, in_path, out_path): ''' save a remote file to the specified path ''' vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) try: self.sftp = self._connect_sftp() except: raise errors.AnsibleError("failed to open a SFTP connection") try: self.sftp.get(in_path, out_path) except IOError: raise errors.AnsibleError("failed to transfer file from %s" % in_path)
save a remote file to the specified path
Below is the the instruction that describes the task: ### Input: save a remote file to the specified path ### Response: def fetch_file(self, in_path, out_path): ''' save a remote file to the specified path ''' vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) try: self.sftp = self._connect_sftp() except: raise errors.AnsibleError("failed to open a SFTP connection") try: self.sftp.get(in_path, out_path) except IOError: raise errors.AnsibleError("failed to transfer file from %s" % in_path)
def _unfloat(flt, precision=5): """Function to convert float to 'decimal point assumed' format >>> _unfloat(0) '00000-0' >>> _unfloat(3.4473e-4) '34473-3' >>> _unfloat(-6.0129e-05) '-60129-4' >>> _unfloat(4.5871e-05) '45871-4' """ if flt == 0.: return "{}-0".format("0" * precision) num, _, exp = "{:.{}e}".format(flt, precision - 1).partition('e') exp = int(exp) num = num.replace('.', '') return "%s%d" % (num, exp + 1)
Function to convert float to 'decimal point assumed' format >>> _unfloat(0) '00000-0' >>> _unfloat(3.4473e-4) '34473-3' >>> _unfloat(-6.0129e-05) '-60129-4' >>> _unfloat(4.5871e-05) '45871-4'
Below is the the instruction that describes the task: ### Input: Function to convert float to 'decimal point assumed' format >>> _unfloat(0) '00000-0' >>> _unfloat(3.4473e-4) '34473-3' >>> _unfloat(-6.0129e-05) '-60129-4' >>> _unfloat(4.5871e-05) '45871-4' ### Response: def _unfloat(flt, precision=5): """Function to convert float to 'decimal point assumed' format >>> _unfloat(0) '00000-0' >>> _unfloat(3.4473e-4) '34473-3' >>> _unfloat(-6.0129e-05) '-60129-4' >>> _unfloat(4.5871e-05) '45871-4' """ if flt == 0.: return "{}-0".format("0" * precision) num, _, exp = "{:.{}e}".format(flt, precision - 1).partition('e') exp = int(exp) num = num.replace('.', '') return "%s%d" % (num, exp + 1)
def info(self): '''Return the header fields as a Message: Returns: Message: An instance of :class:`email.message.Message`. If Python 2, returns an instance of :class:`mimetools.Message`. ''' if sys.version_info[0] == 2: return mimetools.Message(io.StringIO(str(self._response.fields))) else: return email.message_from_string(str(self._response.fields))
Return the header fields as a Message: Returns: Message: An instance of :class:`email.message.Message`. If Python 2, returns an instance of :class:`mimetools.Message`.
Below is the the instruction that describes the task: ### Input: Return the header fields as a Message: Returns: Message: An instance of :class:`email.message.Message`. If Python 2, returns an instance of :class:`mimetools.Message`. ### Response: def info(self): '''Return the header fields as a Message: Returns: Message: An instance of :class:`email.message.Message`. If Python 2, returns an instance of :class:`mimetools.Message`. ''' if sys.version_info[0] == 2: return mimetools.Message(io.StringIO(str(self._response.fields))) else: return email.message_from_string(str(self._response.fields))
def write(self, data, end="\n", flush=True): """ Output data to stdout and/or file """ if not self.nostdout: self.stdout.write(data+end) if self.file is not None: self.file.write(data+end) if flush: self.flush()
Output data to stdout and/or file
Below is the the instruction that describes the task: ### Input: Output data to stdout and/or file ### Response: def write(self, data, end="\n", flush=True): """ Output data to stdout and/or file """ if not self.nostdout: self.stdout.write(data+end) if self.file is not None: self.file.write(data+end) if flush: self.flush()
def external_aborted(self, params): """ Immediately abort the job by server. This runs in the Client:read() thread. """ self.ended = True self.running = False # When the server sends an abort signal, we really have to close immediately, # since for example the job has been already deleted. # without touching the git and client any further os._exit(1)
Immediately abort the job by server. This runs in the Client:read() thread.
Below is the the instruction that describes the task: ### Input: Immediately abort the job by server. This runs in the Client:read() thread. ### Response: def external_aborted(self, params): """ Immediately abort the job by server. This runs in the Client:read() thread. """ self.ended = True self.running = False # When the server sends an abort signal, we really have to close immediately, # since for example the job has been already deleted. # without touching the git and client any further os._exit(1)
def rs_find_errors(err_loc, nmess, generator=2): '''Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).''' # nmess = length of whole codeword (message + ecc symbols) errs = len(err_loc) - 1 err_pos = [] for i in xrange(nmess): # normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols if gf_poly_eval(err_loc, gf_pow(generator, i)) == 0: # It's a 0? Bingo, it's a root of the error locator polynomial, in other terms this is the location of an error err_pos.append(nmess - 1 - i) # Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial if len(err_pos) != errs: # TODO: to decode messages+ecc with length n > 255, we may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so I'm not sure the bruteforce approach may even be possible. raise ReedSolomonError("Too many (or few) errors found by Chien Search for the errata locator polynomial!") return err_pos
Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).
Below is the the instruction that describes the task: ### Input: Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time). ### Response: def rs_find_errors(err_loc, nmess, generator=2): '''Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).''' # nmess = length of whole codeword (message + ecc symbols) errs = len(err_loc) - 1 err_pos = [] for i in xrange(nmess): # normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols if gf_poly_eval(err_loc, gf_pow(generator, i)) == 0: # It's a 0? Bingo, it's a root of the error locator polynomial, in other terms this is the location of an error err_pos.append(nmess - 1 - i) # Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial if len(err_pos) != errs: # TODO: to decode messages+ecc with length n > 255, we may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so I'm not sure the bruteforce approach may even be possible. raise ReedSolomonError("Too many (or few) errors found by Chien Search for the errata locator polynomial!") return err_pos
def find_by_name(collection, name, exact=True): """ Searches collection by resource name. :param rightscale.ResourceCollection collection: The collection in which to look for :attr:`name`. :param str name: The name to look for in collection. :param bool exact: A RightScale ``index`` search with a :attr:`name` filter can return multiple results because it does a substring match on resource names. So any resource that contains the specified name will be returned. The :attr:`exact` flag controls whether to attempt to find an exact match for the given name. If :attr:`exact` is ``False``, this will return a list of all the matches. The default behaviour is to perform an exact match and return a single result. Returns ``None`` if no resources are found with a matching name. """ params = {'filter[]': ['name==%s' % name]} found = collection.index(params=params) if not exact and len(found) > 0: return found for f in found: if f.soul['name'] == name: return f
Searches collection by resource name. :param rightscale.ResourceCollection collection: The collection in which to look for :attr:`name`. :param str name: The name to look for in collection. :param bool exact: A RightScale ``index`` search with a :attr:`name` filter can return multiple results because it does a substring match on resource names. So any resource that contains the specified name will be returned. The :attr:`exact` flag controls whether to attempt to find an exact match for the given name. If :attr:`exact` is ``False``, this will return a list of all the matches. The default behaviour is to perform an exact match and return a single result. Returns ``None`` if no resources are found with a matching name.
Below is the the instruction that describes the task: ### Input: Searches collection by resource name. :param rightscale.ResourceCollection collection: The collection in which to look for :attr:`name`. :param str name: The name to look for in collection. :param bool exact: A RightScale ``index`` search with a :attr:`name` filter can return multiple results because it does a substring match on resource names. So any resource that contains the specified name will be returned. The :attr:`exact` flag controls whether to attempt to find an exact match for the given name. If :attr:`exact` is ``False``, this will return a list of all the matches. The default behaviour is to perform an exact match and return a single result. Returns ``None`` if no resources are found with a matching name. ### Response: def find_by_name(collection, name, exact=True): """ Searches collection by resource name. :param rightscale.ResourceCollection collection: The collection in which to look for :attr:`name`. :param str name: The name to look for in collection. :param bool exact: A RightScale ``index`` search with a :attr:`name` filter can return multiple results because it does a substring match on resource names. So any resource that contains the specified name will be returned. The :attr:`exact` flag controls whether to attempt to find an exact match for the given name. If :attr:`exact` is ``False``, this will return a list of all the matches. The default behaviour is to perform an exact match and return a single result. Returns ``None`` if no resources are found with a matching name. """ params = {'filter[]': ['name==%s' % name]} found = collection.index(params=params) if not exact and len(found) > 0: return found for f in found: if f.soul['name'] == name: return f
def next_basis_label_or_index(self, label_or_index, n=1): """Given the label or index of a basis state, return the label/index of the next basis state. More generally, if `n` is given, return the `n`'th next basis state label/index; `n` may also be negative to obtain previous basis state labels/indices. The return type is the same as the type of `label_or_index`. Args: label_or_index (int or str or SymbolicLabelBase): If `int`, the index of a basis state; if `str`, the label of a basis state n (int): The increment Raises: IndexError: If going beyond the last or first basis state ValueError: If `label` is not a label for any basis state in the Hilbert space .BasisNotSetError: If the Hilbert space has no defined basis TypeError: if `label_or_index` is neither a :class:`str` nor an :class:`int`, nor a :class:`SymbolicLabelBase` """ if isinstance(label_or_index, int): new_index = label_or_index + n if new_index < 0: raise IndexError("index %d < 0" % new_index) if self.has_basis: if new_index >= self.dimension: raise IndexError("index %d out of range for basis %s" % (new_index, self._basis)) return new_index elif isinstance(label_or_index, str): label_index = self.basis_labels.index(label_or_index) new_index = label_index + n if (new_index < 0) or (new_index >= len(self._basis)): raise IndexError("index %d out of range for basis %s" % (new_index, self._basis)) return self._basis[new_index] elif isinstance(label_or_index, SymbolicLabelBase): return label_or_index.__class__(expr=label_or_index.expr + n) else: raise TypeError( "Invalid type for label_or_index: %s" % label_or_index.__class__.__name__)
Given the label or index of a basis state, return the label/index of the next basis state. More generally, if `n` is given, return the `n`'th next basis state label/index; `n` may also be negative to obtain previous basis state labels/indices. The return type is the same as the type of `label_or_index`. Args: label_or_index (int or str or SymbolicLabelBase): If `int`, the index of a basis state; if `str`, the label of a basis state n (int): The increment Raises: IndexError: If going beyond the last or first basis state ValueError: If `label` is not a label for any basis state in the Hilbert space .BasisNotSetError: If the Hilbert space has no defined basis TypeError: if `label_or_index` is neither a :class:`str` nor an :class:`int`, nor a :class:`SymbolicLabelBase`
Below is the the instruction that describes the task: ### Input: Given the label or index of a basis state, return the label/index of the next basis state. More generally, if `n` is given, return the `n`'th next basis state label/index; `n` may also be negative to obtain previous basis state labels/indices. The return type is the same as the type of `label_or_index`. Args: label_or_index (int or str or SymbolicLabelBase): If `int`, the index of a basis state; if `str`, the label of a basis state n (int): The increment Raises: IndexError: If going beyond the last or first basis state ValueError: If `label` is not a label for any basis state in the Hilbert space .BasisNotSetError: If the Hilbert space has no defined basis TypeError: if `label_or_index` is neither a :class:`str` nor an :class:`int`, nor a :class:`SymbolicLabelBase` ### Response: def next_basis_label_or_index(self, label_or_index, n=1): """Given the label or index of a basis state, return the label/index of the next basis state. More generally, if `n` is given, return the `n`'th next basis state label/index; `n` may also be negative to obtain previous basis state labels/indices. The return type is the same as the type of `label_or_index`. Args: label_or_index (int or str or SymbolicLabelBase): If `int`, the index of a basis state; if `str`, the label of a basis state n (int): The increment Raises: IndexError: If going beyond the last or first basis state ValueError: If `label` is not a label for any basis state in the Hilbert space .BasisNotSetError: If the Hilbert space has no defined basis TypeError: if `label_or_index` is neither a :class:`str` nor an :class:`int`, nor a :class:`SymbolicLabelBase` """ if isinstance(label_or_index, int): new_index = label_or_index + n if new_index < 0: raise IndexError("index %d < 0" % new_index) if self.has_basis: if new_index >= self.dimension: raise IndexError("index %d out of range for basis %s" % (new_index, self._basis)) return new_index elif isinstance(label_or_index, str): label_index = self.basis_labels.index(label_or_index) new_index = label_index + n if (new_index < 0) or (new_index >= len(self._basis)): raise IndexError("index %d out of range for basis %s" % (new_index, self._basis)) return self._basis[new_index] elif isinstance(label_or_index, SymbolicLabelBase): return label_or_index.__class__(expr=label_or_index.expr + n) else: raise TypeError( "Invalid type for label_or_index: %s" % label_or_index.__class__.__name__)
def _set_cell(self, column_family_id, column, value, timestamp=None, state=None): """Helper for :meth:`set_cell` Adds a mutation to set the value in a specific cell. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type column: bytes :param column: The column within the column family where the cell is located. :type value: bytes or :class:`int` :param value: The value to set in the cell. If an integer is used, will be interpreted as a 64-bit big-endian signed integer (8 bytes). :type timestamp: :class:`datetime.datetime` :param timestamp: (Optional) The timestamp of the operation. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. """ column = _to_bytes(column) if isinstance(value, six.integer_types): value = _PACK_I64(value) value = _to_bytes(value) if timestamp is None: # Use -1 for current Bigtable server time. timestamp_micros = -1 else: timestamp_micros = _microseconds_from_datetime(timestamp) # Truncate to millisecond granularity. timestamp_micros -= timestamp_micros % 1000 mutation_val = data_v2_pb2.Mutation.SetCell( family_name=column_family_id, column_qualifier=column, timestamp_micros=timestamp_micros, value=value, ) mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) self._get_mutations(state).append(mutation_pb)
Helper for :meth:`set_cell` Adds a mutation to set the value in a specific cell. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type column: bytes :param column: The column within the column family where the cell is located. :type value: bytes or :class:`int` :param value: The value to set in the cell. If an integer is used, will be interpreted as a 64-bit big-endian signed integer (8 bytes). :type timestamp: :class:`datetime.datetime` :param timestamp: (Optional) The timestamp of the operation. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`.
Below is the the instruction that describes the task: ### Input: Helper for :meth:`set_cell` Adds a mutation to set the value in a specific cell. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type column: bytes :param column: The column within the column family where the cell is located. :type value: bytes or :class:`int` :param value: The value to set in the cell. If an integer is used, will be interpreted as a 64-bit big-endian signed integer (8 bytes). :type timestamp: :class:`datetime.datetime` :param timestamp: (Optional) The timestamp of the operation. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. ### Response: def _set_cell(self, column_family_id, column, value, timestamp=None, state=None): """Helper for :meth:`set_cell` Adds a mutation to set the value in a specific cell. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type column: bytes :param column: The column within the column family where the cell is located. :type value: bytes or :class:`int` :param value: The value to set in the cell. If an integer is used, will be interpreted as a 64-bit big-endian signed integer (8 bytes). :type timestamp: :class:`datetime.datetime` :param timestamp: (Optional) The timestamp of the operation. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. """ column = _to_bytes(column) if isinstance(value, six.integer_types): value = _PACK_I64(value) value = _to_bytes(value) if timestamp is None: # Use -1 for current Bigtable server time. timestamp_micros = -1 else: timestamp_micros = _microseconds_from_datetime(timestamp) # Truncate to millisecond granularity. timestamp_micros -= timestamp_micros % 1000 mutation_val = data_v2_pb2.Mutation.SetCell( family_name=column_family_id, column_qualifier=column, timestamp_micros=timestamp_micros, value=value, ) mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) self._get_mutations(state).append(mutation_pb)
def create_volume(self, datacenter_id, volume): """ Creates a volume within the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param volume: A volume dict. :type volume: ``dict`` """ data = (json.dumps(self._create_volume_dict(volume))) response = self._perform_request( url='/datacenters/%s/volumes' % datacenter_id, method='POST', data=data) return response
Creates a volume within the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param volume: A volume dict. :type volume: ``dict``
Below is the the instruction that describes the task: ### Input: Creates a volume within the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param volume: A volume dict. :type volume: ``dict`` ### Response: def create_volume(self, datacenter_id, volume): """ Creates a volume within the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param volume: A volume dict. :type volume: ``dict`` """ data = (json.dumps(self._create_volume_dict(volume))) response = self._perform_request( url='/datacenters/%s/volumes' % datacenter_id, method='POST', data=data) return response
def reset(self): """Clean any processing data, and prepare object for reuse """ self.current_table = None self.tables = [] self.data = [{}] self.additional_data = {} self.lines = [] self.set_state('document') self.current_file = None self.set_of_energies = set()
Clean any processing data, and prepare object for reuse
Below is the the instruction that describes the task: ### Input: Clean any processing data, and prepare object for reuse ### Response: def reset(self): """Clean any processing data, and prepare object for reuse """ self.current_table = None self.tables = [] self.data = [{}] self.additional_data = {} self.lines = [] self.set_state('document') self.current_file = None self.set_of_energies = set()
def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object """ if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r
Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object
Below is the the instruction that describes the task: ### Input: Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object ### Response: def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object """ if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r
def from_avro(data, jsonFormatSchema, options={}): """ Converts a binary column of avro format into its corresponding catalyst value. The specified schema must match the read data, otherwise the behavior is undefined: it may fail or return arbitrary result. Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the application as per the deployment section of "Apache Avro Data Source Guide". :param data: the binary column. :param jsonFormatSchema: the avro schema in JSON string format. :param options: options to control how the Avro record is parsed. >>> from pyspark.sql import Row >>> from pyspark.sql.avro.functions import from_avro, to_avro >>> data = [(1, Row(name='Alice', age=2))] >>> df = spark.createDataFrame(data, ("key", "value")) >>> avroDf = df.select(to_avro(df.value).alias("avro")) >>> avroDf.collect() [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))] >>> jsonFormatSchema = '''{"type":"record","name":"topLevelRecord","fields": ... [{"name":"avro","type":[{"type":"record","name":"value","namespace":"topLevelRecord", ... "fields":[{"name":"age","type":["long","null"]}, ... {"name":"name","type":["string","null"]}]},"null"]}]}''' >>> avroDf.select(from_avro(avroDf.avro, jsonFormatSchema).alias("value")).collect() [Row(value=Row(avro=Row(age=2, name=u'Alice')))] """ sc = SparkContext._active_spark_context try: jc = sc._jvm.org.apache.spark.sql.avro.functions.from_avro( _to_java_column(data), jsonFormatSchema, options) except TypeError as e: if str(e) == "'JavaPackage' object is not callable": _print_missing_jar("Avro", "avro", "avro", sc.version) raise return Column(jc)
Converts a binary column of avro format into its corresponding catalyst value. The specified schema must match the read data, otherwise the behavior is undefined: it may fail or return arbitrary result. Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the application as per the deployment section of "Apache Avro Data Source Guide". :param data: the binary column. :param jsonFormatSchema: the avro schema in JSON string format. :param options: options to control how the Avro record is parsed. >>> from pyspark.sql import Row >>> from pyspark.sql.avro.functions import from_avro, to_avro >>> data = [(1, Row(name='Alice', age=2))] >>> df = spark.createDataFrame(data, ("key", "value")) >>> avroDf = df.select(to_avro(df.value).alias("avro")) >>> avroDf.collect() [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))] >>> jsonFormatSchema = '''{"type":"record","name":"topLevelRecord","fields": ... [{"name":"avro","type":[{"type":"record","name":"value","namespace":"topLevelRecord", ... "fields":[{"name":"age","type":["long","null"]}, ... {"name":"name","type":["string","null"]}]},"null"]}]}''' >>> avroDf.select(from_avro(avroDf.avro, jsonFormatSchema).alias("value")).collect() [Row(value=Row(avro=Row(age=2, name=u'Alice')))]
Below is the the instruction that describes the task: ### Input: Converts a binary column of avro format into its corresponding catalyst value. The specified schema must match the read data, otherwise the behavior is undefined: it may fail or return arbitrary result. Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the application as per the deployment section of "Apache Avro Data Source Guide". :param data: the binary column. :param jsonFormatSchema: the avro schema in JSON string format. :param options: options to control how the Avro record is parsed. >>> from pyspark.sql import Row >>> from pyspark.sql.avro.functions import from_avro, to_avro >>> data = [(1, Row(name='Alice', age=2))] >>> df = spark.createDataFrame(data, ("key", "value")) >>> avroDf = df.select(to_avro(df.value).alias("avro")) >>> avroDf.collect() [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))] >>> jsonFormatSchema = '''{"type":"record","name":"topLevelRecord","fields": ... [{"name":"avro","type":[{"type":"record","name":"value","namespace":"topLevelRecord", ... "fields":[{"name":"age","type":["long","null"]}, ... {"name":"name","type":["string","null"]}]},"null"]}]}''' >>> avroDf.select(from_avro(avroDf.avro, jsonFormatSchema).alias("value")).collect() [Row(value=Row(avro=Row(age=2, name=u'Alice')))] ### Response: def from_avro(data, jsonFormatSchema, options={}): """ Converts a binary column of avro format into its corresponding catalyst value. The specified schema must match the read data, otherwise the behavior is undefined: it may fail or return arbitrary result. Note: Avro is built-in but external data source module since Spark 2.4. Please deploy the application as per the deployment section of "Apache Avro Data Source Guide". :param data: the binary column. :param jsonFormatSchema: the avro schema in JSON string format. :param options: options to control how the Avro record is parsed. >>> from pyspark.sql import Row >>> from pyspark.sql.avro.functions import from_avro, to_avro >>> data = [(1, Row(name='Alice', age=2))] >>> df = spark.createDataFrame(data, ("key", "value")) >>> avroDf = df.select(to_avro(df.value).alias("avro")) >>> avroDf.collect() [Row(avro=bytearray(b'\\x00\\x00\\x04\\x00\\nAlice'))] >>> jsonFormatSchema = '''{"type":"record","name":"topLevelRecord","fields": ... [{"name":"avro","type":[{"type":"record","name":"value","namespace":"topLevelRecord", ... "fields":[{"name":"age","type":["long","null"]}, ... {"name":"name","type":["string","null"]}]},"null"]}]}''' >>> avroDf.select(from_avro(avroDf.avro, jsonFormatSchema).alias("value")).collect() [Row(value=Row(avro=Row(age=2, name=u'Alice')))] """ sc = SparkContext._active_spark_context try: jc = sc._jvm.org.apache.spark.sql.avro.functions.from_avro( _to_java_column(data), jsonFormatSchema, options) except TypeError as e: if str(e) == "'JavaPackage' object is not callable": _print_missing_jar("Avro", "avro", "avro", sc.version) raise return Column(jc)