text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sites_at_edges( self ): """ Finds the six sites with the maximum and minimum coordinates along x, y, and z. Args: None Returns: (List(List)): In the order [ +x, -x, +y, -y, +z, -z ] """
min_x = min( [ s.r[0] for s in self.sites ] ) max_x = max( [ s.r[0] for s in self.sites ] ) min_y = min( [ s.r[1] for s in self.sites ] ) max_y = max( [ s.r[1] for s in self.sites ] ) min_z = min( [ s.r[2] for s in self.sites ] ) max_z = max( [ s.r[2] for s in self.sites ] ) x_max = [ s for s in self.sites if s.r[0] == min_x ] x_min = [ s for s in self.sites if s.r[0] == max_x ] y_max = [ s for s in self.sites if s.r[1] == min_y ] y_min = [ s for s in self.sites if s.r[1] == max_y ] z_max = [ s for s in self.sites if s.r[2] == min_z ] z_min = [ s for s in self.sites if s.r[2] == max_z ] return ( x_max, x_min, y_max, y_min, z_max, z_min )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_periodically_contiguous( self ): """ logical check whether a cluster connects with itself across the simulation periodic boundary conditions. Args: none Returns ( Bool, Bool, Bool ): Contiguity along the x, y, and z coordinate axes """
edges = self.sites_at_edges() is_contiguous = [ False, False, False ] along_x = any( [ s2 in s1.p_neighbours for s1 in edges[0] for s2 in edges[1] ] ) along_y = any( [ s2 in s1.p_neighbours for s1 in edges[2] for s2 in edges[3] ] ) along_z = any( [ s2 in s1.p_neighbours for s1 in edges[4] for s2 in edges[5] ] ) return ( along_x, along_y, along_z )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_sites_from_neighbours( self, remove_labels ): """ Removes sites from the set of neighbouring sites if these have labels in remove_labels. Args: Remove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set. Returns: None """
if type( remove_labels ) is str: remove_labels = [ remove_labels ] self.neighbours = set( n for n in self.neighbours if n.label not in remove_labels )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cumulative_probabilities( self ): """ Cumulative sum of the relative probabilities for all possible jumps. Args: None Returns: (np.array): Cumulative sum of relative jump probabilities. """
partition_function = np.sum( self.p ) return np.cumsum( self.p ) / partition_function
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random( self ): """ Select a jump at random with appropriate relative probabilities. Args: None Returns: (Jump): The randomly selected Jump. """
j = np.searchsorted( self.cumulative_probabilities(), random.random() ) return self.jumps[ j ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def time_to_jump( self ): """ The timestep until the next jump. Args: None Returns: (Float): The timestep until the next jump. """
k_tot = rate_prefactor * np.sum( self.p ) return -( 1.0 / k_tot ) * math.log( random.random() )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_real_ip(self): """ Get IP from request. :param request: A usual request object :type request: HttpRequest :return: ipv4 string or None """
try: # Trying to work with most common proxy headers real_ip = self.request.META['HTTP_X_FORWARDED_FOR'] return real_ip.split(',')[0] except KeyError: return self.request.META['REMOTE_ADDR'] except Exception: # Unknown IP return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_ip_range(self): """ Fetches IpRange instance if request IP is found in database. :param request: A ususal request object :type request: HttpRequest :return: IpRange object or None """
ip = self._get_real_ip() try: geobase_entry = IpRange.objects.by_ip(ip) except IpRange.DoesNotExist: geobase_entry = None return geobase_entry
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_stored_location(self): """ Get location from cookie. :param request: A ususal request object :type request: HttpRequest :return: Custom location model """
location_storage = storage_class(request=self.request, response=None) return location_storage.get()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lazy_translations(cls): """Lazy translations."""
return { cloudfiles.errors.NoSuchContainer: errors.NoContainerException, cloudfiles.errors.NoSuchObject: errors.NoObjectException, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_info(cls, container, info_obj): """Create from subdirectory or file info object."""
create_fn = cls.from_subdir if 'subdir' in info_obj \ else cls.from_file_info return create_fn(container, info_obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_subdir(cls, container, info_obj): """Create from subdirectory info object."""
return cls(container, info_obj['subdir'], obj_type=cls.type_cls.SUBDIR)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choose_type(cls, content_type): """Choose object type from content type."""
return cls.type_cls.SUBDIR if content_type in cls.subdir_types \ else cls.type_cls.FILE
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_connection(self): """Return native connection object."""
kwargs = { 'username': self.account, 'api_key': self.secret_key, } # Only add kwarg for servicenet if True because user could set # environment variable 'RACKSPACE_SERVICENET' separately. if self.servicenet: kwargs['servicenet'] = True if self.authurl: kwargs['authurl'] = self.authurl return cloudfiles.get_connection(**kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delta_E( self ): """ The change in system energy if this jump were accepted. Args: None Returns: (Float): delta E """
site_delta_E = self.final_site.energy - self.initial_site.energy if self.nearest_neighbour_energy: site_delta_E += self.nearest_neighbour_delta_E() if self.coordination_number_energy: site_delta_E += self.coordination_number_delta_E() return site_delta_E
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nearest_neighbour_delta_E( self ): """ Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (nearest-neighbour) """
delta_nn = self.final_site.nn_occupation() - self.initial_site.nn_occupation() - 1 # -1 because the hopping ion is not counted in the final site occupation number return ( delta_nn * self.nearest_neighbour_energy )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def coordination_number_delta_E( self ): """ Coordination-number dependent energy conrtibution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (coordination-number) """
initial_site_neighbours = [ s for s in self.initial_site.p_neighbours if s.is_occupied ] # excludes final site, since this is always unoccupied final_site_neighbours = [ s for s in self.final_site.p_neighbours if s.is_occupied and s is not self.initial_site ] # excludes initial site initial_cn_occupation_energy = ( self.initial_site.cn_occupation_energy() + sum( [ site.cn_occupation_energy() for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy() for site in final_site_neighbours ] ) ) final_cn_occupation_energy = ( self.final_site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.final_site.label : +1 } ) for site in final_site_neighbours ] ) ) return ( final_cn_occupation_energy - initial_cn_occupation_energy )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dr( self, cell_lengths ): """ Particle displacement vector for this jump Args: cell_lengths (np.array(x,y,z)): Cell lengths for the orthogonal simulation cell. Returns (np.array(x,y,z)): dr """
half_cell_lengths = cell_lengths / 2.0 this_dr = self.final_site.r - self.initial_site.r for i in range( 3 ): if this_dr[ i ] > half_cell_lengths[ i ]: this_dr[ i ] -= cell_lengths[ i ] if this_dr[ i ] < -half_cell_lengths[ i ]: this_dr[ i ] += cell_lengths[ i ] return this_dr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def relative_probability_from_lookup_table( self, jump_lookup_table ): """ Relative probability of accepting this jump from a lookup-table. Args: jump_lookup_table (LookupTable): the lookup table to be used for this jump. Returns: (Float): relative probability of accepting this jump. """
l1 = self.initial_site.label l2 = self.final_site.label c1 = self.initial_site.nn_occupation() c2 = self.final_site.nn_occupation() return jump_lookup_table.jump_probability[ l1 ][ l2 ][ c1 ][ c2 ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def module_cache_get(cache, module): """ Import a module with an optional yaml config file, but only if we haven't imported it already. :param cache: object which holds information on which modules and config files have been loaded and whether config files should be loaded. :param module: the path of the module to load. :returns: the loaded module. """
if getattr(cache, "config", False): config_file = module[:-2] + "yaml" if config_file not in cache.config_files and os.path.exists(config_file): try: config = yaml_safe_load(config_file, type=dict) except TypeError as e: tangelo.log_warning("TANGELO", "Bad configuration in file %s: %s" % (config_file, e)) raise except IOError: tangelo.log_warning("TANGELO", "Could not open config file %s" % (config_file)) raise except ValueError as e: tangelo.log_warning("TANGELO", "Error reading config file %s: %s" % (config_file, e)) raise cache.config_files[config_file] = True else: config = {} cherrypy.config["module-config"][module] = config cherrypy.config["module-store"].setdefault(module, {}) # If two threads are importing the same module nearly concurrently, we # could load it twice unless we use the import lock. imp.acquire_lock() try: if module not in cache.modules: name = module[:-3] # load the module. service = imp.load_source(name, module) cache.modules[module] = service else: service = cache.modules[module] finally: imp.release_lock() return service
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self): """Flush data, write 28 bytes BGZF EOF marker, and close BGZF file. samtools will look for a magic EOF marker, just a 28 byte empty BGZF block, and if it is missing warns the BAM file may be truncated. In addition to samtools writing this block, so too does bgzip - so this implementation does too. """
if self._buffer: self.flush() self._handle.write(_bgzf_eof) self._handle.flush() self._handle.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_secret(): """Check if secret key to encryot sessions exists, generate it otherwise."""
home_dir = os.environ['HOME'] file_name = home_dir + "/.ipcamweb" if os.path.exists(file_name): with open(file_name, "r") as s_file: secret = s_file.readline() else: secret = os.urandom(24) with open(file_name, "w") as s_file: secret = s_file.write(secret+"\n") return secret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_snapshots_for_a_minute(path, cam_id, day, hourm): """Returns a list of screenshots"""
screenshoots_path = path+"/"+str(cam_id)+"/"+day+"/"+hourm if os.path.exists(screenshoots_path): screenshots = [scr for scr in sorted(os.listdir(screenshoots_path))] return screenshots else: return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_snv(self): """Return ``True`` if it is a SNV"""
return len(self.REF) == 1 and all(a.type == "SNV" for a in self.ALT)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def affected_start(self): """Return affected start position in 0-based coordinates For SNVs, MNVs, and deletions, the behaviour is the start position. In the case of insertions, the position behind the insert position is returned, yielding a 0-length interval together with :py:meth:`~Record.affected_end` """
types = {alt.type for alt in self.ALT} # set! BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS: # Only insertions, return 0-based position right of first base return self.POS # right of first base else: # Return 0-based start position of first REF base return self.POS - 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_filter(self, label): """Add label to FILTER if not set yet, removing ``PASS`` entry if present """
if label not in self.FILTER: if "PASS" in self.FILTER: self.FILTER = [f for f in self.FILTER if f != "PASS"] self.FILTER.append(label)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_format(self, key, value=None): """Add an entry to format The record's calls ``data[key]`` will be set to ``value`` if not yet set and value is not ``None``. If key is already in FORMAT then nothing is done. """
if key in self.FORMAT: return self.FORMAT.append(key) if value is not None: for call in self: call.data.setdefault(key, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gt_type(self): """The type of genotype, returns one of ``HOM_REF``, ``HOM_ALT``, and ``HET``. """
if not self.called: return None # not called elif all(a == 0 for a in self.gt_alleles): return HOM_REF elif len(set(self.gt_alleles)) == 1: return HOM_ALT else: return HET
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_filtered(self, require=None, ignore=None): """Return ``True`` for filtered calls :param iterable ignore: if set, the filters to ignore, make sure to include 'PASS', when setting, default is ``['PASS']`` :param iterable require: if set, the filters to require for returning ``True`` """
ignore = ignore or ["PASS"] if "FT" not in self.data or not self.data["FT"]: return False for ft in self.data["FT"]: if ft in ignore: continue # skip if not require: return True elif ft in require: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize(self): """Return string representation for VCF"""
if self.mate_chrom is None: remote_tag = "." else: if self.within_main_assembly: mate_chrom = self.mate_chrom else: mate_chrom = "<{}>".format(self.mate_chrom) tpl = {FORWARD: "[{}:{}[", REVERSE: "]{}:{}]"}[self.mate_orientation] remote_tag = tpl.format(mate_chrom, self.mate_pos) if self.orientation == FORWARD: return remote_tag + self.sequence else: return self.sequence + remote_tag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def trend_coefficients(self, order=LINEAR): '''Calculate trend coefficients for the specified order.''' if not len(self.points): raise ArithmeticError('Cannot calculate the trend of an empty series') return LazyImport.numpy().polyfit(self.timestamps, self.values, order)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def moving_average(self, window, method=SIMPLE): '''Calculate a moving average using the specified method and window''' if len(self.points) < window: raise ArithmeticError('Not enough points for moving average') numpy = LazyImport.numpy() if method == TimeSeries.SIMPLE: weights = numpy.ones(window) / float(window) ma_x = self.timestamps[window-1:] ma_y = numpy.convolve(self.values, weights)[window-1:-(window-1)].tolist() return TimeSeries(zip(ma_x, ma_y))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def forecast(self, horizon, method=ARIMA, frequency=None): '''Forecast points beyond the time series range using the specified forecasting method. `horizon` is the number of points to forecast.''' if len(self.points) <= 1: raise ArithmeticError('Cannot run forecast when len(series) <= 1') R = LazyImport.rpy2() series = LazyImport.numpy().array(self.values) if frequency is not None: series = R.ts(series, frequency=frequency) if method == TimeSeries.ARIMA: fit = R.forecast.auto_arima(series) elif method == TimeSeries.ETS: fit = R.forecast.ets(series) else: raise ValueError('Unknown forecast() method') forecasted = R.forecast.forecast(fit, h=horizon) forecast_y = list(forecasted.rx2('mean')) interval = self.interval last_x = self.points[-1][0] forecast_x = [ last_x + x * interval for x in xrange(1, horizon+1) ] return TimeSeries(zip(forecast_x, forecast_y))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def decompose(self, frequency, window=None, periodic=False): '''Use STL to decompose the time series into seasonal, trend, and residual components.''' R = LazyImport.rpy2() if periodic: window = 'periodic' elif window is None: window = frequency timestamps = self.timestamps series = LazyImport.numpy().array(self.values) length = len(series) series = R.ts(series, frequency=frequency) kwargs = { 's.window': window } decomposed = R.robjects.r['stl'](series, **kwargs).rx2('time.series') decomposed = [ row for row in decomposed ] seasonal = decomposed[0:length] trend = decomposed[length:2*length] residual = decomposed[2*length:3*length] seasonal = TimeSeries(zip(timestamps, seasonal)) trend = TimeSeries(zip(timestamps, trend)) residual = TimeSeries(zip(timestamps, residual)) return DataFrame(seasonal=seasonal, trend=trend, residual=residual)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def plot(self, label=None, colour='g', style='-'): # pragma: no cover '''Plot the time series.''' pylab = LazyImport.pylab() pylab.plot(self.dates, self.values, '%s%s' % (colour, style), label=label) if label is not None: pylab.legend() pylab.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def table_output(data): '''Get a table representation of a dictionary.''' if type(data) == DictType: data = data.items() headings = [ item[0] for item in data ] rows = [ item[1] for item in data ] columns = zip(*rows) if len(columns): widths = [ max([ len(str(y)) for y in row ]) for row in rows ] else: widths = [ 0 for c in headings ] for c, heading in enumerate(headings): widths[c] = max(widths[c], len(heading)) column_count = range(len(rows)) table = [ ' '.join([ headings[c].ljust(widths[c]) for c in column_count ]) ] table.append(' '.join([ '=' * widths[c] for c in column_count ])) for column in columns: table.append(' '.join([ str(column[c]).ljust(widths[c]) for c in column_count ])) return '\n'.join(table)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_datetime(time): '''Convert `time` to a datetime.''' if type(time) == IntType or type(time) == LongType: time = datetime.fromtimestamp(time // 1000) return time
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def spellCheckTextgrid(tg, targetTierName, newTierName, isleDict, printEntries=False): ''' Spell check words by using the praatio spellcheck function Incorrect items are noted in a new tier and optionally printed to the screen ''' def checkFunc(word): try: isleDict.lookup(word) except isletool.WordNotInISLE: returnVal = False else: returnVal = True return returnVal tg = praatio_scripts.spellCheckEntries(tg, targetTierName, newTierName, checkFunc, printEntries) return tg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split_mapping(pair_str): """Split the ``str`` in ``pair_str`` at ``'='`` Warn if key needs to be stripped """
orig_key, value = pair_str.split("=", 1) key = orig_key.strip() if key != orig_key: warnings.warn( "Mapping key {} has leading or trailing space".format(repr(orig_key)), LeadingTrailingSpaceInKey, ) return key, value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_mapping(value): """Parse the given VCF header line mapping Such a mapping consists of "key=value" pairs, separated by commas and for certain known keys, exceptions are made, depending on the tag key. this, however, only gets important when serializing. :raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if there was a problem parsing the file """
if not value.startswith("<") or not value.endswith(">"): raise exceptions.InvalidHeaderException( "Header mapping value was not wrapped in angular brackets" ) # split the comma-separated list into pairs, ignoring commas in quotes pairs = split_quoted_string(value[1:-1], delim=",", quote='"') # split these pairs into key/value pairs, converting flags to mappings # to True key_values = [] for pair in pairs: if "=" in pair: key, value = split_mapping(pair) if value.startswith('"') and value.endswith('"'): value = ast.literal_eval(value) elif value.startswith("[") and value.endswith("]"): value = [v.strip() for v in value[1:-1].split(",")] else: key, value = pair, True key_values.append((key, value)) # return completely parsed mapping as OrderedDict return OrderedDict(key_values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_header_parsers(): """Return mapping for parsers to use for each VCF header type Inject the WarningHelper into the parsers. """
result = { "ALT": MappingHeaderLineParser(header.AltAlleleHeaderLine), "contig": MappingHeaderLineParser(header.ContigHeaderLine), "FILTER": MappingHeaderLineParser(header.FilterHeaderLine), "FORMAT": MappingHeaderLineParser(header.FormatHeaderLine), "INFO": MappingHeaderLineParser(header.InfoHeaderLine), "META": MappingHeaderLineParser(header.MetaHeaderLine), "PEDIGREE": MappingHeaderLineParser(header.PedigreeHeaderLine), "SAMPLE": MappingHeaderLineParser(header.SampleHeaderLine), "__default__": StupidHeaderLineParser(), # fallback } return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_field_value(type_, value): """Convert atomic field value according to the type"""
if value == ".": return None elif type_ in ("Character", "String"): if "%" in value: for k, v in record.UNESCAPE_MAPPING: value = value.replace(k, v) return value else: try: return _CONVERTERS[type_](value) except ValueError: warnings.warn( ("{} cannot be converted to {}, keeping as " "string.").format(value, type_), CannotConvertValue, ) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_field_value(field_info, value): """Parse ``value`` according to ``field_info`` """
if field_info.id == "FT": return [x for x in value.split(";") if x != "."] elif field_info.type == "Flag": return True elif field_info.number == 1: return convert_field_value(field_info.type, value) else: if value == ".": return [] else: return [convert_field_value(field_info.type, x) for x in value.split(",")]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_breakend(alt_str): """Parse breakend and return tuple with results, parameters for BreakEnd constructor """
arr = BREAKEND_PATTERN.split(alt_str) mate_chrom, mate_pos = arr[1].split(":", 1) mate_pos = int(mate_pos) if mate_chrom[0] == "<": mate_chrom = mate_chrom[1:-1] within_main_assembly = False else: within_main_assembly = True FWD_REV = {True: record.FORWARD, False: record.REVERSE} orientation = FWD_REV[alt_str[0] == "[" or alt_str[0] == "]"] mate_orientation = FWD_REV["[" in alt_str] if orientation == record.FORWARD: sequence = arr[2] else: sequence = arr[0] return (mate_chrom, mate_pos, orientation, mate_orientation, sequence, within_main_assembly)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_sub_grow(ref, alt_str): """Process substution where the string grows"""
if len(alt_str) == 0: raise exceptions.InvalidRecordException("Invalid VCF, empty ALT") elif len(alt_str) == 1: if ref[0] == alt_str[0]: return record.Substitution(record.DEL, alt_str) else: return record.Substitution(record.INDEL, alt_str) else: return record.Substitution(record.INDEL, alt_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_sub_shrink(ref, alt_str): """Process substution where the string shrink"""
if len(ref) == 0: raise exceptions.InvalidRecordException("Invalid VCF, empty REF") elif len(ref) == 1: if ref[0] == alt_str[0]: return record.Substitution(record.INS, alt_str) else: return record.Substitution(record.INDEL, alt_str) else: return record.Substitution(record.INDEL, alt_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process_alt(header, ref, alt_str): # pylint: disable=W0613 """Process alternative value using Header in ``header``"""
# By its nature, this function contains a large number of case distinctions if "]" in alt_str or "[" in alt_str: return record.BreakEnd(*parse_breakend(alt_str)) elif alt_str[0] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.FORWARD, alt_str[1:]) elif alt_str[-1] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.REVERSE, alt_str[:-1]) elif alt_str[0] == "<" and alt_str[-1] == ">": inner = alt_str[1:-1] return record.SymbolicAllele(inner) else: # substitution return process_sub(ref, alt_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, s): """Split string ``s`` at delimiter, correctly interpreting quotes Further, interprets arrays wrapped in one level of ``[]``. No recursive brackets are interpreted (as this would make the grammar non-regular and currently this complexity is not needed). Currently, quoting inside of braces is not supported either. This is just to support the example from VCF v4.3. """
begins, ends = [0], [] # transition table DISPATCH = { self.NORMAL: self._handle_normal, self.QUOTED: self._handle_quoted, self.ARRAY: self._handle_array, self.DELIM: self._handle_delim, self.ESCAPED: self._handle_escaped, } # run state automaton state = self.NORMAL for pos, c in enumerate(s): state = DISPATCH[state](c, pos, begins, ends) ends.append(len(s)) assert len(begins) == len(ends) # Build resulting list return [s[start:end] for start, end in zip(begins, ends)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_calls(self, alts, format_, format_str, arr): """Handle FORMAT and calls columns, factored out of parse_line"""
if format_str not in self._format_cache: self._format_cache[format_str] = list(map(self.header.get_format_field_info, format_)) # per-sample calls calls = [] for sample, raw_data in zip(self.samples.names, arr[9:]): if self.samples.is_parsed(sample): data = self._parse_calls_data(format_, self._format_cache[format_str], raw_data) call = record.Call(sample, data) self._format_checker.run(call, len(alts)) self._check_filters(call.data.get("FT"), "FORMAT/FT", call.sample) calls.append(call) else: calls.append(record.UnparsedCall(sample, raw_data)) return calls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _split_line(self, line_str): """Split line and check number of columns"""
arr = line_str.rstrip().split("\t") if len(arr) != self.expected_fields: raise exceptions.InvalidRecordException( ( "The line contains an invalid number of fields. Was " "{} but expected {}\n{}".format(len(arr), 9 + len(self.samples.names), line_str) ) ) return arr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_info(self, info_str, num_alts): """Parse INFO column from string"""
result = OrderedDict() if info_str == ".": return result # The standard is very nice to parsers, we can simply split at # semicolon characters, although I (Manuel) don't know how strict # programs follow this for entry in info_str.split(";"): if "=" not in entry: # flag key = entry result[key] = parse_field_value(self.header.get_info_field_info(key), True) else: key, value = split_mapping(entry) result[key] = parse_field_value(self.header.get_info_field_info(key), value) self._info_checker.run(key, result[key], num_alts) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_calls_data(klass, format_, infos, gt_str): """Parse genotype call information from arrays using format array :param list format: List of strings with format names :param gt_str arr: string with genotype information values """
data = OrderedDict() # The standard is very nice to parsers, we can simply split at # colon characters, although I (Manuel) don't know how strict # programs follow this for key, info, value in zip(format_, infos, gt_str.split(":")): data[key] = parse_field_value(info, value) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, call, num_alts): """Check ``FORMAT`` of a record.Call Currently, only checks for consistent counts are implemented """
for key, value in call.data.items(): self._check_count(call, key, value, num_alts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_next_line(self): """Read next line store in self._line and return old one"""
prev_line = self._line self._line = self.stream.readline() return prev_line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_samples_line(klass, arr): """Peform additional check on samples line"""
if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER): if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER: raise exceptions.IncorrectVCFFormat( "Sample header line indicates no sample but does not " "equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER)) ) elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER: raise exceptions.IncorrectVCFFormat( 'Sample header line (starting with "#CHROM") does not ' "start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER)) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def numpy(): '''Lazily import the numpy module''' if LazyImport.numpy_module is None: try: LazyImport.numpy_module = __import__('numpypy') except ImportError: try: LazyImport.numpy_module = __import__('numpy') except ImportError: raise ImportError('The numpy module is required') return LazyImport.numpy_module
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def rpy2(): '''Lazily import the rpy2 module''' if LazyImport.rpy2_module is None: try: rpy2 = __import__('rpy2.robjects') except ImportError: raise ImportError('The rpy2 module is required') LazyImport.rpy2_module = rpy2 try: rpy2.forecast = rpy2.robjects.packages.importr('forecast') except: raise ImportError('R and the "forecast" package are required') rpy2.ts = rpy2.robjects.r['ts'] __import__('rpy2.robjects.numpy2ri') rpy2.robjects.numpy2ri.activate() return LazyImport.rpy2_module
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def map_position(pos): """Map natural position to machine code postion"""
posiction_dict = dict(zip(range(1, 17), [i for i in range(30, 62) if i % 2])) return posiction_dict[pos]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def snap(self, path=None): """Get a snapshot and save it to disk."""
if path is None: path = "/tmp" else: path = path.rstrip("/") day_dir = datetime.datetime.now().strftime("%d%m%Y") hour_dir = datetime.datetime.now().strftime("%H%M") ensure_snapshot_dir(path+"/"+self.cam_id+"/"+day_dir+"/"+hour_dir) f_path = "{0}/{1}/{2}/{3}/{4}.jpg".format( path, self.cam_id, day_dir, hour_dir, datetime.datetime.now().strftime("%S"), ) urllib.urlretrieve( 'http://{0}/snapshot.cgi?user={1}&pwd={2}'.format( self.address, self.user, self.pswd, ), f_path, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def getNumPhones(isleDict, word, maxFlag): ''' Get the number of syllables and phones in this word If maxFlag=True, use the longest pronunciation. Otherwise, take the average length. ''' phoneCount = 0 syllableCount = 0 syllableCountList = [] phoneCountList = [] wordList = isleDict.lookup(word) entryList = zip(*wordList) for lookupResultList in entryList: syllableList = [] for wordSyllableList in lookupResultList: syllableList.extend(wordSyllableList) syllableCountList.append(len(syllableList)) phoneCountList.append(len([phon for phoneList in syllableList for phon in phoneList])) # The average number of phones for all possible pronunciations # of this word if maxFlag is True: syllableCount += max(syllableCountList) phoneCount += max(phoneCountList) else: syllableCount += (sum(syllableCountList) / float(len(syllableCountList))) phoneCount += sum(phoneCountList) / float(len(phoneCountList)) return syllableCount, phoneCount
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def findOODWords(isleDict, wordList): ''' Returns all of the out-of-dictionary words found in a list of utterances ''' oodList = [] for word in wordList: try: isleDict.lookup(word) except WordNotInISLE: oodList.append(word) oodList = list(set(oodList)) oodList.sort() return oodList
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _buildDict(self): ''' Builds the isle textfile into a dictionary for fast searching ''' lexDict = {} with io.open(self.islePath, "r", encoding='utf-8') as fd: wordList = [line.rstrip('\n') for line in fd] for row in wordList: word, pronunciation = row.split(" ", 1) word, extraInfo = word.split("(", 1) extraInfo = extraInfo.replace(")", "") extraInfoList = [segment for segment in extraInfo.split(",") if ("_" not in segment and "+" not in segment and ':' not in segment and segment != '')] lexDict.setdefault(word, []) lexDict[word].append((pronunciation, extraInfoList)) return lexDict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def timestamps(self): '''Get all timestamps from all series in the group.''' timestamps = set() for series in self.groups.itervalues(): timestamps |= set(series.timestamps) return sorted(list(timestamps))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def plot(self, overlay=True, **labels): # pragma: no cover '''Plot all time series in the group.''' pylab = LazyImport.pylab() colours = list('rgbymc') colours_len = len(colours) colours_pos = 0 plots = len(self.groups) for name, series in self.groups.iteritems(): colour = colours[colours_pos % colours_len] colours_pos += 1 if not overlay: pylab.subplot(plots, 1, colours_pos) kwargs = {} if name in labels: name = labels[name] if name is not None: kwargs['label'] = name pylab.plot(series.dates, series.values, '%s-' % colour, **kwargs) if name is not None: pylab.legend() pylab.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def rename(self, **kwargs): '''Rename series in the group.''' for old, new in kwargs.iteritems(): if old in self.groups: self.groups[new] = self.groups[old] del self.groups[old]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch(self, chrom_or_region, begin=None, end=None): """Jump to the start position of the given chromosomal position and limit iteration to the end position :param str chrom_or_region: name of the chromosome to jump to if begin and end are given and a samtools region string otherwise (e.g. "chr1:123,456-123,900"). :param int begin: 0-based begin position (inclusive) :param int end: 0-based end position (exclusive) """
if begin is not None and end is None: raise ValueError("begin and end must both be None or neither") # close tabix file if any and is open if self.tabix_file and not self.tabix_file.closed: self.tabix_file.close() # open tabix file if not yet open if not self.tabix_file or self.tabix_file.closed: self.tabix_file = pysam.TabixFile(filename=self.path, index=self.tabix_path) # jump to the next position if begin is None: self.tabix_iter = self.tabix_file.fetch(region=chrom_or_region) else: self.tabix_iter = self.tabix_file.fetch(reference=chrom_or_region, start=begin, end=end) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self): """Close underlying stream"""
if self.tabix_file and not self.tabix_file.closed: self.tabix_file.close() if self.stream: self.stream.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize_for_header(key, value): """Serialize value for the given mapping key for a VCF header line"""
if key in QUOTE_FIELDS: return json.dumps(value) elif isinstance(value, str): if " " in value or "\t" in value: return json.dumps(value) else: return value elif isinstance(value, list): return "[{}]".format(", ".join(value)) else: return str(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mapping_to_str(mapping): """Convert mapping to string"""
result = ["<"] for i, (key, value) in enumerate(mapping.items()): if i > 0: result.append(",") result += [key, "=", serialize_for_header(key, value)] result += [">"] return "".join(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_indices(self): """Build indices for the different field types"""
result = {key: OrderedDict() for key in LINES_WITH_ID} for line in self.lines: if line.key in LINES_WITH_ID: result.setdefault(line.key, OrderedDict()) if line.mapping["ID"] in result[line.key]: warnings.warn( ("Seen {} header more than once: {}, using first" "occurence").format( line.key, line.mapping["ID"] ), DuplicateHeaderLineWarning, ) else: result[line.key][line.mapping["ID"]] = line else: result.setdefault(line.key, []) result[line.key].append(line) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self): """Return a copy of this header"""
return Header([line.copy() for line in self.lines], self.samples.copy())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_lines(self, key): """Return header lines having the given ``key`` as their type"""
if key in self._indices: return self._indices[key].values() else: return []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_header_line(self, key, id_): """Return whether there is a header line with the given ID of the type given by ``key`` :param key: The VCF header key/line type. :param id_: The ID value to compare fore :return: ``True`` if there is a header line starting with ``##${key}=`` in the VCF file having the mapping entry ``ID`` set to ``id_``. """
if key not in self._indices: return False else: return id_ in self._indices[key]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_line(self, header_line): """Add header line, updating any necessary support indices :return: ``False`` on conflicting line and ``True`` otherwise """
self.lines.append(header_line) self._indices.setdefault(header_line.key, OrderedDict()) if not hasattr(header_line, "mapping"): return False # no registration required if self.has_header_line(header_line.key, header_line.mapping["ID"]): warnings.warn( ( "Detected duplicate header line with type {} and ID {}. " "Ignoring this and subsequent one" ).format(header_line.key, header_line.mapping["ID"]), DuplicateHeaderLineWarning, ) return False else: self._indices[header_line.key][header_line.mapping["ID"]] = header_line return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self): """Return a copy"""
mapping = OrderedDict(self.mapping.items()) return self.__class__(self.key, self.value, mapping)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _adjustSyllabification(adjustedPhoneList, syllableList): ''' Inserts spaces into a syllable if needed Originally the phone list and syllable list contained the same number of phones. But the adjustedPhoneList may have some insertions which are not accounted for in the syllableList. ''' i = 0 retSyllableList = [] for syllableNum, syllable in enumerate(syllableList): j = len(syllable) if syllableNum == len(syllableList) - 1: j = len(adjustedPhoneList) - i tmpPhoneList = adjustedPhoneList[i:i + j] numBlanks = -1 phoneList = tmpPhoneList[:] while numBlanks != 0: numBlanks = tmpPhoneList.count(u"''") if numBlanks > 0: tmpPhoneList = adjustedPhoneList[i + j:i + j + numBlanks] phoneList.extend(tmpPhoneList) j += numBlanks for k, phone in enumerate(phoneList): if phone == u"''": syllable.insert(k, u"''") i += j retSyllableList.append(syllable) return retSyllableList
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _findBestPronunciation(isleWordList, aPron): ''' Words may have multiple candidates in ISLE; returns the 'optimal' one. ''' aP = _prepPronunciation(aPron) # Mapping to simplified phone inventory numDiffList = [] withStress = [] i = 0 alignedSyllabificationList = [] alignedActualPronunciationList = [] for wordTuple in isleWordList: aPronMap = copy.deepcopy(aPron) syllableList = wordTuple[0] # syllableList, stressList iP = [phone for phoneList in syllableList for phone in phoneList] iP = _prepPronunciation(iP) alignedIP, alignedAP = alignPronunciations(iP, aP) # Remapping to actual phones # alignedAP = [origPronDict.get(phon, u"''") for phon in alignedAP] alignedAP = [aPronMap.pop(0) if phon != u"''" else u"''" for phon in alignedAP] alignedActualPronunciationList.append(alignedAP) # Adjusting the syllabification for differences between the dictionary # pronunciation and the actual pronunciation alignedSyllabification = _adjustSyllabification(alignedIP, syllableList) alignedSyllabificationList.append(alignedSyllabification) # Count the number of misalignments between the two numDiff = alignedIP.count(u"''") + alignedAP.count(u"''") numDiffList.append(numDiff) # Is there stress in this word hasStress = False for syllable in syllableList: for phone in syllable: hasStress = u"ˈ" in phone or hasStress if hasStress: withStress.append(i) i += 1 # Return the pronunciation that had the fewest differences # to the actual pronunciation minDiff = min(numDiffList) # When there are multiple candidates that have the minimum number # of differences, prefer one that has stress in it bestIndex = None bestIsStressed = None for i, numDiff in enumerate(numDiffList): if numDiff != minDiff: continue if bestIndex is None: bestIndex = i bestIsStressed = i in withStress else: if not bestIsStressed and i in withStress: bestIndex = i bestIsStressed = True return (isleWordList, alignedActualPronunciationList, alignedSyllabificationList, bestIndex)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _syllabifyPhones(phoneList, syllableList): ''' Given a phone list and a syllable list, syllabify the phones Typically used by findBestSyllabification which first aligns the phoneList with a dictionary phoneList and then uses the dictionary syllabification to syllabify the input phoneList. ''' numPhoneList = [len(syllable) for syllable in syllableList] start = 0 syllabifiedList = [] for end in numPhoneList: syllable = phoneList[start:start + end] syllabifiedList.append(syllable) start += end return syllabifiedList
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def alignPronunciations(pronI, pronA): ''' Align the phones in two pronunciations ''' # First prep the two pronunctions pronI = [char for char in pronI] pronA = [char for char in pronA] # Remove any elements not in the other list (but maintain order) pronITmp = pronI pronATmp = pronA # Find the longest sequence sequence = _lcs(pronITmp, pronATmp) # Find the index of the sequence # TODO: investigate ambiguous cases startA = 0 startI = 0 sequenceIndexListA = [] sequenceIndexListI = [] for phone in sequence: startA = pronA.index(phone, startA) startI = pronI.index(phone, startI) sequenceIndexListA.append(startA) sequenceIndexListI.append(startI) # An index on the tail of both will be used to create output strings # of the same length sequenceIndexListA.append(len(pronA)) sequenceIndexListI.append(len(pronI)) # Fill in any blanks such that the sequential items have the same # index and the two strings are the same length for x in range(len(sequenceIndexListA)): indexA = sequenceIndexListA[x] indexI = sequenceIndexListI[x] if indexA < indexI: for x in range(indexI - indexA): pronA.insert(indexA, "''") sequenceIndexListA = [val + indexI - indexA for val in sequenceIndexListA] elif indexA > indexI: for x in range(indexA - indexI): pronI.insert(indexI, "''") sequenceIndexListI = [val + indexA - indexI for val in sequenceIndexListI] return pronI, pronA
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _findBestSyllabification(inputIsleWordList, actualPronunciationList): ''' Find the best syllabification for a word First find the closest pronunciation to a given pronunciation. Then take the syllabification for that pronunciation and map it onto the input pronunciation. ''' retList = _findBestPronunciation(inputIsleWordList, actualPronunciationList) isleWordList, alignedAPronList, alignedSyllableList, bestIndex = retList alignedPhoneList = alignedAPronList[bestIndex] alignedSyllables = alignedSyllableList[bestIndex] syllabification = isleWordList[bestIndex][0] stressedSyllableIndexList = isleWordList[bestIndex][1] stressedPhoneIndexList = isleWordList[bestIndex][2] syllableList = _syllabifyPhones(alignedPhoneList, alignedSyllables) # Get the location of stress in the generated file try: stressedSyllableI = stressedSyllableIndexList[0] except IndexError: stressedSyllableI = None stressedVowelI = None else: stressedVowelI = _getSyllableNucleus(syllableList[stressedSyllableI]) # Count the index of the stressed phones, if the stress list has # become flattened (no syllable information) flattenedStressIndexList = [] for i, j in zip(stressedSyllableIndexList, stressedPhoneIndexList): k = j for l in range(i): k += len(syllableList[l]) flattenedStressIndexList.append(k) return (stressedSyllableI, stressedVowelI, syllableList, syllabification, stressedSyllableIndexList, stressedPhoneIndexList, flattenedStressIndexList)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _getSyllableNucleus(phoneList): ''' Given the phones in a syllable, retrieves the vowel index ''' cvList = ['V' if isletool.isVowel(phone) else 'C' for phone in phoneList] vowelCount = cvList.count('V') if vowelCount > 1: raise TooManyVowelsInSyllable(phoneList, cvList) if vowelCount == 1: stressI = cvList.index('V') else: stressI = None return stressI
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def findClosestPronunciation(inputIsleWordList, aPron): ''' Find the closest dictionary pronunciation to a provided pronunciation ''' retList = _findBestPronunciation(inputIsleWordList, aPron) isleWordList = retList[0] bestIndex = retList[3] return isleWordList[bestIndex]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_switch(type, settings, pin): """Create a switch. Args: type: (str): type of the switch [A,B,C,D] settings (str): a comma separted list pin (int): wiringPi pin Returns: switch """
switch = None if type == "A": group, device = settings.split(",") switch = pi_switch.RCSwitchA(group, device) elif type == "B": addr, channel = settings.split(",") addr = int(addr) channel = int(channel) switch = pi_switch.RCSwitchB(addr, channel) elif type == "C": family, group, device = settings.split(",") group = int(group) device = int(device) switch = pi_switch.RCSwitchC(family, group, device) elif type == "D": group, device = settings.split(",") device = int(device) switch = pi_switch.RCSwitchD(group, device) else: print "Type %s is not supported!" % type sys.exit() switch.enableTransmit(pin) return switch
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_atomic(value): """Format atomic value This function also takes care of escaping the value in case one of the reserved characters occurs in the value. """
# Perform escaping if isinstance(value, str): if any(r in value for r in record.RESERVED_CHARS): for k, v in record.ESCAPE_MAPPING: value = value.replace(k, v) # String-format the given value if value is None: return "." else: return str(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_value(field_info, value, section): """Format possibly compound value given the FieldInfo"""
if section == "FORMAT" and field_info.id == "FT": if not value: return "." elif isinstance(value, list): return ";".join(map(format_atomic, value)) elif field_info.number == 1: if value is None: return "." else: return format_atomic(value) else: if not value: return "." else: return ",".join(map(format_atomic, value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _write_header(self): """Write out the header"""
for line in self.header.lines: print(line.serialize(), file=self.stream) if self.header.samples.names: print( "\t".join(list(parser.REQUIRE_SAMPLE_HEADER) + self.header.samples.names), file=self.stream, ) else: print("\t".join(parser.REQUIRE_NO_SAMPLE_HEADER), file=self.stream)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _serialize_record(self, record): """Serialize whole Record"""
f = self._empty_to_dot row = [record.CHROM, record.POS] row.append(f(";".join(record.ID))) row.append(f(record.REF)) if not record.ALT: row.append(".") else: row.append(",".join([f(a.serialize()) for a in record.ALT])) row.append(f(record.QUAL)) row.append(f(";".join(record.FILTER))) row.append(f(self._serialize_info(record))) if record.FORMAT: row.append(":".join(record.FORMAT)) row += [ self._serialize_call(record.FORMAT, record.call_for_sample[s]) for s in self.header.samples.names ] print(*row, sep="\t", file=self.stream)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _serialize_info(self, record): """Return serialized version of record.INFO"""
result = [] for key, value in record.INFO.items(): info = self.header.get_info_field_info(key) if info.type == "Flag": result.append(key) else: result.append("{}={}".format(key, format_value(info, value, "INFO"))) return ";".join(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _serialize_call(self, format_, call): """Return serialized version of the Call using the record's FORMAT'"""
if isinstance(call, record.UnparsedCall): return call.unparsed_data else: result = [ format_value(self.header.get_format_field_info(key), call.data.get(key), "FORMAT") for key in format_ ] return ":".join(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_extended_jinja_tags(self, nodes): """Loops through the nodes and looks for special jinja tags that contains more than one tag but only one ending tag."""
jinja_a = None jinja_b = None ext_node = None ext_nodes = [] for node in nodes: if isinstance(node, EmptyLine): continue if node.has_children(): node.children = self._create_extended_jinja_tags(node.children) if not isinstance(node, JinjaTag): jinja_a = None continue if jinja_a is None or ( node.tag_name in self._extended_tags and jinja_a.tag_name not in self._extended_tags[node.tag_name]): jinja_a = node continue if node.tag_name in self._extended_tags and \ jinja_a.tag_name in self._extended_tags[node.tag_name]: if ext_node is None: ext_node = ExtendedJinjaTag() ext_node.add(jinja_a) ext_nodes.append(ext_node) ext_node.add(node) else: ext_node = None jinja_a = node #replace the nodes with the new extended node for node in ext_nodes: nodes.insert(nodes.index(node.children[0]), node) index = nodes.index(node.children[0]) del nodes[index:index+len(node.children)] return nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def has_children(self): "returns False if children is empty or contains only empty lines else True." return bool([x for x in self.children if not isinstance(x, EmptyLine)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_requirements(path): """Parse ``requirements.txt`` at ``path``."""
requirements = [] with open(path, "rt") as reqs_f: for line in reqs_f: line = line.strip() if line.startswith("-r"): fname = line.split()[1] inner_path = os.path.join(os.path.dirname(path), fname) requirements += parse_requirements(inner_path) elif line != "" and not line.startswith("#"): requirements.append(line) return requirements
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def watch(cams, path=None, delay=10): """Get screenshots from all cams at defined intervall."""
while True: for c in cams: c.snap(path) time.sleep(delay)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def score(self, phone_number, account_lifecycle_event, **params): """ Score is an API that delivers reputation scoring based on phone number intelligence, traffic patterns, machine learning, and a global data consortium. See https://developer.telesign.com/docs/score-api for detailed API documentation. """
return self.post(SCORE_RESOURCE.format(phone_number=phone_number), account_lifecycle_event=account_lifecycle_event, **params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_telesign_headers(customer_id, api_key, method_name, resource, url_encoded_fields, date_rfc2616=None, nonce=None, user_agent=None, content_type=None): """ Generates the TeleSign REST API headers used to authenticate requests. Creates the canonicalized string_to_sign and generates the HMAC signature. This is used to authenticate requests against the TeleSign REST API. See https://developer.telesign.com/docs/authentication for detailed API documentation. :param customer_id: Your account customer_id. :param api_key: Your account api_key. :param method_name: The HTTP method name of the request as a upper case string, should be one of 'POST', 'GET', 'PUT' or 'DELETE'. :param resource: The partial resource URI to perform the request against, as a string. :param url_encoded_fields: HTTP body parameters to perform the HTTP request with, must be a urlencoded string. :param date_rfc2616: The date and time of the request formatted in rfc 2616, as a string. :param nonce: A unique cryptographic nonce for the request, as a string. :param user_agent: (optional) User Agent associated with the request, as a string. :param content_type: (optional) ContentType of the request, as a string. :return: The TeleSign authentication headers. """
if date_rfc2616 is None: date_rfc2616 = formatdate(usegmt=True) if nonce is None: nonce = str(uuid.uuid4()) if not content_type: content_type = "application/x-www-form-urlencoded" if method_name in ("POST", "PUT") else "" auth_method = "HMAC-SHA256" string_to_sign_builder = ["{method}".format(method=method_name)] string_to_sign_builder.append("\n{content_type}".format(content_type=content_type)) string_to_sign_builder.append("\n{date}".format(date=date_rfc2616)) string_to_sign_builder.append("\nx-ts-auth-method:{auth_method}".format(auth_method=auth_method)) string_to_sign_builder.append("\nx-ts-nonce:{nonce}".format(nonce=nonce)) if content_type and url_encoded_fields: string_to_sign_builder.append("\n{fields}".format(fields=url_encoded_fields)) string_to_sign_builder.append("\n{resource}".format(resource=resource)) string_to_sign = "".join(string_to_sign_builder) signer = hmac.new(b64decode(api_key), string_to_sign.encode("utf-8"), sha256) signature = b64encode(signer.digest()).decode("utf-8") authorization = "TSA {customer_id}:{signature}".format( customer_id=customer_id, signature=signature) headers = { "Authorization": authorization, "Date": date_rfc2616, "Content-Type": content_type, "x-ts-auth-method": auth_method, "x-ts-nonce": nonce } if user_agent: headers['User-Agent'] = user_agent return headers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def post(self, resource, **params): """ Generic TeleSign REST API POST handler. :param resource: The partial resource URI to perform the request against, as a string. :param params: Body params to perform the POST request with, as a dictionary. :return: The RestClient Response object. """
return self._execute(self.session.post, 'POST', resource, **params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, resource, **params): """ Generic TeleSign REST API GET handler. :param resource: The partial resource URI to perform the request against, as a string. :param params: Body params to perform the GET request with, as a dictionary. :return: The RestClient Response object. """
return self._execute(self.session.get, 'GET', resource, **params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put(self, resource, **params): """ Generic TeleSign REST API PUT handler. :param resource: The partial resource URI to perform the request against, as a string. :param params: Body params to perform the PUT request with, as a dictionary. :return: The RestClient Response object. """
return self._execute(self.session.put, 'PUT', resource, **params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, resource, **params): """ Generic TeleSign REST API DELETE handler. :param resource: The partial resource URI to perform the request against, as a string. :param params: Body params to perform the DELETE request with, as a dictionary. :return: The RestClient Response object. """
return self._execute(self.session.delete, 'DELETE', resource, **params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _execute(self, method_function, method_name, resource, **params): """ Generic TeleSign REST API request handler. :param method_function: The Requests HTTP request function to perform the request. :param method_name: The HTTP method name, as an upper case string. :param resource: The partial resource URI to perform the request against, as a string. :param params: Body params to perform the HTTP request with, as a dictionary. :return: The RestClient Response object. """
resource_uri = "{api_host}{resource}".format(api_host=self.api_host, resource=resource) url_encoded_fields = self._encode_params(params) headers = RestClient.generate_telesign_headers(self.customer_id, self.api_key, method_name, resource, url_encoded_fields, user_agent=self.user_agent) if method_name in ['POST', 'PUT']: payload = {'data': url_encoded_fields} else: payload = {'params': url_encoded_fields} response = self.Response(method_function(resource_uri, headers=headers, timeout=self.timeout, **payload)) return response