repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Metatab/metapack
metapack/appurl.py
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L250-L301
def resolve_url(self, resource_name): """Return a URL to a local copy of a resource, suitable for get_generator() For Package URLS, resolution involves generating a URL to a data file from the package URL and the value of a resource. The resource value, the url, can be one of: - An absolute URL, with a web scheme - A relative URL, relative to the package, with a file scheme. URLs with non-file schemes are returned. File scheme are assumed to be relative to the package, and are resolved according to the type of resource. """ u = parse_app_url(resource_name) if u.scheme != 'file': t = u elif self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE: # Thre are two forms for CSV package URLS: # - A CSV package, which can only have absolute URLs # - A Filesystem package, which can have relative URLs. # The complication is that the filsystem package usually has a metadata file named # DEFAULT_METATAB_FILE, which can distinguish it from a CSV package, but it's also possible # to have a filesystem package with a non standard package name. # So, this clause can happed for two cases: A CSV package or a Filesystem package with a nonstandard # metadata file name. # For CSV packages, need to get the package and open it to get the resource URL, because # they are always absolute web URLs and may not be related to the location of the metadata. s = self.get_resource() rs = s.metadata_url.doc.resource(resource_name) if rs is not None: t = parse_app_url(rs.url) else: raise ResourceError("No resource for '{}' in '{}' ".format(resource_name, self)) else: jt = self.join_target(resource_name) try: rs = jt.get_resource() except DownloadError: raise ResourceError( "Failed to download resource for '{}' for '{}' in '{}'".format(jt, resource_name, self)) t = rs.get_target() return t
[ "def", "resolve_url", "(", "self", ",", "resource_name", ")", ":", "u", "=", "parse_app_url", "(", "resource_name", ")", "if", "u", ".", "scheme", "!=", "'file'", ":", "t", "=", "u", "elif", "self", ".", "target_format", "==", "'csv'", "and", "self", "...
Return a URL to a local copy of a resource, suitable for get_generator() For Package URLS, resolution involves generating a URL to a data file from the package URL and the value of a resource. The resource value, the url, can be one of: - An absolute URL, with a web scheme - A relative URL, relative to the package, with a file scheme. URLs with non-file schemes are returned. File scheme are assumed to be relative to the package, and are resolved according to the type of resource.
[ "Return", "a", "URL", "to", "a", "local", "copy", "of", "a", "resource", "suitable", "for", "get_generator", "()" ]
python
train
numenta/nupic
src/nupic/swarming/utils.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/utils.py#L535-L556
def tick(self): """ Activity tick handler; services all activities Returns: True if controlling iterator says it's okay to keep going; False to stop """ # Run activities whose time has come for act in self.__activities: if not act.iteratorHolder[0]: continue try: next(act.iteratorHolder[0]) except StopIteration: act.cb() if act.repeating: act.iteratorHolder[0] = iter(xrange(act.period)) else: act.iteratorHolder[0] = None return True
[ "def", "tick", "(", "self", ")", ":", "# Run activities whose time has come", "for", "act", "in", "self", ".", "__activities", ":", "if", "not", "act", ".", "iteratorHolder", "[", "0", "]", ":", "continue", "try", ":", "next", "(", "act", ".", "iteratorHol...
Activity tick handler; services all activities Returns: True if controlling iterator says it's okay to keep going; False to stop
[ "Activity", "tick", "handler", ";", "services", "all", "activities" ]
python
valid
addisonlynch/iexfinance
iexfinance/iexdata/base.py
https://github.com/addisonlynch/iexfinance/blob/40f0bdcc51b329031d06178020fd774494250456/iexfinance/iexdata/base.py#L368-L403
def fetch(self): """Unfortunately, IEX's API can only retrieve data one day or one month at a time. Rather than specifying a date range, we will have to run the read function for each date provided. :return: DataFrame """ tlen = self.end - self.start dfs = [] # Build list of all dates within the given range lrange = [x for x in (self.start + timedelta(n) for n in range(tlen.days))] mrange = [] for dt in lrange: if datetime(dt.year, dt.month, 1) not in mrange: mrange.append(datetime(dt.year, dt.month, 1)) lrange = mrange for date in lrange: self.curr_date = date tdf = super(MonthlySummaryReader, self).fetch() # We may not return data if this was a weekend/holiday: if self.output_format == 'pandas': if not tdf.empty: tdf['date'] = date.strftime(self.date_format) dfs.append(tdf) # We may not return any data if we failed to specify useful parameters: if self.output_format == 'pandas': result = pd.concat(dfs) if len(dfs) > 0 else pd.DataFrame() return result.set_index('date') else: return dfs
[ "def", "fetch", "(", "self", ")", ":", "tlen", "=", "self", ".", "end", "-", "self", ".", "start", "dfs", "=", "[", "]", "# Build list of all dates within the given range", "lrange", "=", "[", "x", "for", "x", "in", "(", "self", ".", "start", "+", "tim...
Unfortunately, IEX's API can only retrieve data one day or one month at a time. Rather than specifying a date range, we will have to run the read function for each date provided. :return: DataFrame
[ "Unfortunately", "IEX", "s", "API", "can", "only", "retrieve", "data", "one", "day", "or", "one", "month", "at", "a", "time", ".", "Rather", "than", "specifying", "a", "date", "range", "we", "will", "have", "to", "run", "the", "read", "function", "for", ...
python
train
Chilipp/model-organization
model_organization/config.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/config.py#L519-L529
def remove(self, experiment): """Remove the configuration of an experiment""" try: project_path = self.projects[self[experiment]['project']]['root'] except KeyError: return config_path = osp.join(project_path, '.project', experiment + '.yml') for f in [config_path, config_path + '~', config_path + '.lck']: if os.path.exists(f): os.remove(f) del self[experiment]
[ "def", "remove", "(", "self", ",", "experiment", ")", ":", "try", ":", "project_path", "=", "self", ".", "projects", "[", "self", "[", "experiment", "]", "[", "'project'", "]", "]", "[", "'root'", "]", "except", "KeyError", ":", "return", "config_path", ...
Remove the configuration of an experiment
[ "Remove", "the", "configuration", "of", "an", "experiment" ]
python
train
wummel/linkchecker
third_party/dnspython/dns/message.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/message.py#L431-L472
def use_tsig(self, keyring, keyname=None, fudge=300, original_id=None, tsig_error=0, other_data='', algorithm=dns.tsig.default_algorithm): """When sending, a TSIG signature using the specified keyring and keyname should be added. @param keyring: The TSIG keyring to use; defaults to None. @type keyring: dict @param keyname: The name of the TSIG key to use; defaults to None. The key must be defined in the keyring. If a keyring is specified but a keyname is not, then the key used will be the first key in the keyring. Note that the order of keys in a dictionary is not defined, so applications should supply a keyname when a keyring is used, unless they know the keyring contains only one key. @type keyname: dns.name.Name or string @param fudge: TSIG time fudge; default is 300 seconds. @type fudge: int @param original_id: TSIG original id; defaults to the message's id @type original_id: int @param tsig_error: TSIG error code; default is 0. @type tsig_error: int @param other_data: TSIG other data. @type other_data: string @param algorithm: The TSIG algorithm to use; defaults to dns.tsig.default_algorithm """ self.keyring = keyring if keyname is None: self.keyname = self.keyring.keys()[0] else: if isinstance(keyname, (str, unicode)): keyname = dns.name.from_text(keyname) self.keyname = keyname self.keyalgorithm = algorithm self.fudge = fudge if original_id is None: self.original_id = self.id else: self.original_id = original_id self.tsig_error = tsig_error self.other_data = other_data
[ "def", "use_tsig", "(", "self", ",", "keyring", ",", "keyname", "=", "None", ",", "fudge", "=", "300", ",", "original_id", "=", "None", ",", "tsig_error", "=", "0", ",", "other_data", "=", "''", ",", "algorithm", "=", "dns", ".", "tsig", ".", "defaul...
When sending, a TSIG signature using the specified keyring and keyname should be added. @param keyring: The TSIG keyring to use; defaults to None. @type keyring: dict @param keyname: The name of the TSIG key to use; defaults to None. The key must be defined in the keyring. If a keyring is specified but a keyname is not, then the key used will be the first key in the keyring. Note that the order of keys in a dictionary is not defined, so applications should supply a keyname when a keyring is used, unless they know the keyring contains only one key. @type keyname: dns.name.Name or string @param fudge: TSIG time fudge; default is 300 seconds. @type fudge: int @param original_id: TSIG original id; defaults to the message's id @type original_id: int @param tsig_error: TSIG error code; default is 0. @type tsig_error: int @param other_data: TSIG other data. @type other_data: string @param algorithm: The TSIG algorithm to use; defaults to dns.tsig.default_algorithm
[ "When", "sending", "a", "TSIG", "signature", "using", "the", "specified", "keyring", "and", "keyname", "should", "be", "added", "." ]
python
train
toidi/hadoop-yarn-api-python-client
yarn_api_client/application_master.py
https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/application_master.py#L44-L56
def jobs(self, application_id): """ The jobs resource provides a list of the jobs running on this application master. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` """ path = '/proxy/{appid}/ws/v1/mapreduce/jobs'.format( appid=application_id) return self.request(path)
[ "def", "jobs", "(", "self", ",", "application_id", ")", ":", "path", "=", "'/proxy/{appid}/ws/v1/mapreduce/jobs'", ".", "format", "(", "appid", "=", "application_id", ")", "return", "self", ".", "request", "(", "path", ")" ]
The jobs resource provides a list of the jobs running on this application master. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
[ "The", "jobs", "resource", "provides", "a", "list", "of", "the", "jobs", "running", "on", "this", "application", "master", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/distributed/runfn.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L223-L249
def _read_from_cwlinput(in_file, work_dir, runtime, parallel, input_order, output_cwl_keys): """Read data records from a JSON dump of inputs. Avoids command line flattening of records. """ with open(in_file) as in_handle: inputs = json.load(in_handle) items_by_key = {} input_files = [] passed_keys = set([]) for key, input_val in ((k, v) for (k, v) in inputs.items() if not k.startswith(("sentinel", "ignore"))): if key.endswith("_toolinput"): key = key.replace("_toolinput", "") if input_order[key] == "record": cur_keys, items = _read_cwl_record(input_val) passed_keys |= cur_keys items_by_key[key] = items else: items_by_key[tuple(key.split("__"))] = _cwlvar_to_wdl(input_val) input_files = _find_input_files(input_val, input_files) prepped = _merge_cwlinputs(items_by_key, input_order, parallel) out = [] for data in prepped: if isinstance(data, (list, tuple)): out.append([_finalize_cwl_in(utils.to_single_data(x), work_dir, list(passed_keys), output_cwl_keys, runtime) for x in data]) else: out.append(_finalize_cwl_in(data, work_dir, list(passed_keys), output_cwl_keys, runtime)) return out, input_files
[ "def", "_read_from_cwlinput", "(", "in_file", ",", "work_dir", ",", "runtime", ",", "parallel", ",", "input_order", ",", "output_cwl_keys", ")", ":", "with", "open", "(", "in_file", ")", "as", "in_handle", ":", "inputs", "=", "json", ".", "load", "(", "in_...
Read data records from a JSON dump of inputs. Avoids command line flattening of records.
[ "Read", "data", "records", "from", "a", "JSON", "dump", "of", "inputs", ".", "Avoids", "command", "line", "flattening", "of", "records", "." ]
python
train
ronhanson/python-tbx
fabfile/git.py
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/fabfile/git.py#L14-L17
def pull(remote='origin', branch='master'): """git pull commit""" print(cyan("Pulling changes from repo ( %s / %s)..." % (remote, branch))) local("git pull %s %s" % (remote, branch))
[ "def", "pull", "(", "remote", "=", "'origin'", ",", "branch", "=", "'master'", ")", ":", "print", "(", "cyan", "(", "\"Pulling changes from repo ( %s / %s)...\"", "%", "(", "remote", ",", "branch", ")", ")", ")", "local", "(", "\"git pull %s %s\"", "%", "(",...
git pull commit
[ "git", "pull", "commit" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/cisco_dfa_rest.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/cisco_dfa_rest.py#L208-L221
def _create_org(self, orch_id, name, desc): """Create organization on the DCNM. :param orch_id: orchestrator ID :param name: Name of organization :param desc: Description of organization """ url = self._org_url payload = { "organizationName": name, "description": name if len(desc) == 0 else desc, "orchestrationSource": orch_id} return self._send_request('POST', url, payload, 'organization')
[ "def", "_create_org", "(", "self", ",", "orch_id", ",", "name", ",", "desc", ")", ":", "url", "=", "self", ".", "_org_url", "payload", "=", "{", "\"organizationName\"", ":", "name", ",", "\"description\"", ":", "name", "if", "len", "(", "desc", ")", "=...
Create organization on the DCNM. :param orch_id: orchestrator ID :param name: Name of organization :param desc: Description of organization
[ "Create", "organization", "on", "the", "DCNM", "." ]
python
train
StanfordVL/robosuite
robosuite/models/tasks/pick_place_task.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/models/tasks/pick_place_task.py#L130-L154
def place_visual(self): """Places visual objects randomly until no collisions or max iterations hit.""" index = 0 bin_pos = string_to_array(self.bin2_body.get("pos")) bin_size = self.bin_size for _, obj_mjcf in self.visual_objects: bin_x_low = bin_pos[0] bin_y_low = bin_pos[1] if index == 0 or index == 2: bin_x_low -= bin_size[0] / 2 if index < 2: bin_y_low -= bin_size[1] / 2 bin_x_high = bin_x_low + bin_size[0] / 2 bin_y_high = bin_y_low + bin_size[1] / 2 bottom_offset = obj_mjcf.get_bottom_offset() bin_range = [bin_x_low + bin_x_high, bin_y_low + bin_y_high, 2 * bin_pos[2]] bin_center = np.array(bin_range) / 2.0 pos = bin_center - bottom_offset self.visual_obj_mjcf[index].set("pos", array_to_string(pos)) index += 1
[ "def", "place_visual", "(", "self", ")", ":", "index", "=", "0", "bin_pos", "=", "string_to_array", "(", "self", ".", "bin2_body", ".", "get", "(", "\"pos\"", ")", ")", "bin_size", "=", "self", ".", "bin_size", "for", "_", ",", "obj_mjcf", "in", "self"...
Places visual objects randomly until no collisions or max iterations hit.
[ "Places", "visual", "objects", "randomly", "until", "no", "collisions", "or", "max", "iterations", "hit", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/ngsalign/alignprep.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/alignprep.py#L532-L586
def _bgzip_from_bam(bam_file, dirs, data, is_retry=False, output_infix=''): """Create bgzipped fastq files from an input BAM file. """ # tools config = data["config"] bamtofastq = config_utils.get_program("bamtofastq", config) resources = config_utils.get_resources("bamtofastq", config) cores = config["algorithm"].get("num_cores", 1) max_mem = config_utils.convert_to_bytes(resources.get("memory", "1G")) * cores bgzip = tools.get_bgzip_cmd(config, is_retry) # files work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep")) out_file_1 = os.path.join(work_dir, "%s%s-1.fq.gz" % (os.path.splitext(os.path.basename(bam_file))[0], output_infix)) out_file_2 = out_file_1.replace("-1.fq.gz", "-2.fq.gz") needs_retry = False if is_retry or not utils.file_exists(out_file_1): if not bam.is_paired(bam_file): out_file_2 = None with file_transaction(config, out_file_1) as tx_out_file: for f in [tx_out_file, out_file_1, out_file_2]: if f and os.path.exists(f): os.remove(f) fq1_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, tx_out_file) prep_cmd = _seqtk_fastq_prep_cl(data, read_num=0) if prep_cmd: fq1_bgzip_cmd = prep_cmd + " | " + fq1_bgzip_cmd sortprefix = "%s-sort" % os.path.splitext(tx_out_file)[0] if bam.is_paired(bam_file): prep_cmd = _seqtk_fastq_prep_cl(data, read_num=1) fq2_bgzip_cmd = "%s -c /dev/stdin > %s" % (bgzip, out_file_2) if prep_cmd: fq2_bgzip_cmd = prep_cmd + " | " + fq2_bgzip_cmd out_str = ("F=>({fq1_bgzip_cmd}) F2=>({fq2_bgzip_cmd}) S=/dev/null O=/dev/null " "O2=/dev/null collate=1 colsbs={max_mem}") else: out_str = "S=>({fq1_bgzip_cmd})" bam_file = objectstore.cl_input(bam_file) extra_opts = " ".join([str(x) for x in resources.get("options", [])]) cmd = "{bamtofastq} filename={bam_file} T={sortprefix} {extra_opts} " + out_str try: do.run(cmd.format(**locals()), "BAM to bgzipped fastq", checks=[do.file_reasonable_size(tx_out_file, bam_file)], log_error=False) except subprocess.CalledProcessError as msg: if not is_retry and "deflate failed" in str(msg): logger.info("bamtofastq deflate IO failure preparing %s. Retrying with single core." % (bam_file)) needs_retry = True else: logger.exception() raise if needs_retry: return _bgzip_from_bam(bam_file, dirs, data, is_retry=True) else: return [x for x in [out_file_1, out_file_2] if x is not None and utils.file_exists(x)]
[ "def", "_bgzip_from_bam", "(", "bam_file", ",", "dirs", ",", "data", ",", "is_retry", "=", "False", ",", "output_infix", "=", "''", ")", ":", "# tools", "config", "=", "data", "[", "\"config\"", "]", "bamtofastq", "=", "config_utils", ".", "get_program", "...
Create bgzipped fastq files from an input BAM file.
[ "Create", "bgzipped", "fastq", "files", "from", "an", "input", "BAM", "file", "." ]
python
train
ubccr/pinky
pinky/perception/figueras.py
https://github.com/ubccr/pinky/blob/e9d6e8ff72aa7f670b591e3bd3629cb879db1a93/pinky/perception/figueras.py#L180-L234
def toposort(initialAtoms, initialBonds): """initialAtoms, initialBonds -> atoms, bonds Given the list of atoms and bonds in a ring return the topologically sorted atoms and bonds. That is each atom is connected to the following atom and each bond is connected to the following bond in the following manner a1 - b1 - a2 - b2 - ... """ atoms = [] a_append = atoms.append bonds = [] b_append = bonds.append # for the atom and bond hashes # we ignore the first atom since we # would have deleted it from the hash anyway ahash = {} bhash = {} for atom in initialAtoms[1:]: ahash[atom.handle] = 1 for bond in initialBonds: bhash[bond.handle] = bond next = initialAtoms[0] a_append(next) # do until all the atoms are gone while ahash: # traverse to all the connected atoms for atom in next.oatoms: # both the bond and the atom have to be # in our list of atoms and bonds to use # ugg, nested if's... There has to be a # better control structure if ahash.has_key(atom.handle): bond = next.findbond(atom) assert bond # but wait! the bond has to be in our # list of bonds we can use! if bhash.has_key(bond.handle): a_append(atom) b_append(bond) del ahash[atom.handle] next = atom break else: raise RingException("Atoms are not in ring") assert len(initialAtoms) == len(atoms) assert len(bonds) == len(atoms) - 1 lastBond = atoms[0].findbond(atoms[-1]) assert lastBond b_append(lastBond) return atoms, bonds
[ "def", "toposort", "(", "initialAtoms", ",", "initialBonds", ")", ":", "atoms", "=", "[", "]", "a_append", "=", "atoms", ".", "append", "bonds", "=", "[", "]", "b_append", "=", "bonds", ".", "append", "# for the atom and bond hashes", "# we ignore the first atom...
initialAtoms, initialBonds -> atoms, bonds Given the list of atoms and bonds in a ring return the topologically sorted atoms and bonds. That is each atom is connected to the following atom and each bond is connected to the following bond in the following manner a1 - b1 - a2 - b2 - ...
[ "initialAtoms", "initialBonds", "-", ">", "atoms", "bonds", "Given", "the", "list", "of", "atoms", "and", "bonds", "in", "a", "ring", "return", "the", "topologically", "sorted", "atoms", "and", "bonds", ".", "That", "is", "each", "atom", "is", "connected", ...
python
train
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1426-L1443
def sortlevel(self, level=None, ascending=True, sort_remaining=None): """ For internal compatibility with with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : boolean, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ return self.sort_values(return_indexer=True, ascending=ascending)
[ "def", "sortlevel", "(", "self", ",", "level", "=", "None", ",", "ascending", "=", "True", ",", "sort_remaining", "=", "None", ")", ":", "return", "self", ".", "sort_values", "(", "return_indexer", "=", "True", ",", "ascending", "=", "ascending", ")" ]
For internal compatibility with with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : boolean, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index
[ "For", "internal", "compatibility", "with", "with", "the", "Index", "API", "." ]
python
train
deep-compute/deeputil
deeputil/streamcounter.py
https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/streamcounter.py#L107-L135
def _drop_oldest_chunk(self): ''' To handle the case when the items comming in the chunk is more than the maximum capacity of the chunk. Our intent behind is to remove the oldest chunk. So that the items come flowing in. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 0 >>> s.chunked_counts {0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}} >>> data_stream = ['a','b','c','d','a','e','f'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 2 >>> s.chunked_counts {2: {'f': 1}} ''' chunk_id = min(self.chunked_counts.keys()) chunk = self.chunked_counts.pop(chunk_id) self.n_counts -= len(chunk) for k, v in list(chunk.items()): self.counts[k] -= v self.counts_total -= v
[ "def", "_drop_oldest_chunk", "(", "self", ")", ":", "chunk_id", "=", "min", "(", "self", ".", "chunked_counts", ".", "keys", "(", ")", ")", "chunk", "=", "self", ".", "chunked_counts", ".", "pop", "(", "chunk_id", ")", "self", ".", "n_counts", "-=", "l...
To handle the case when the items comming in the chunk is more than the maximum capacity of the chunk. Our intent behind is to remove the oldest chunk. So that the items come flowing in. >>> s = StreamCounter(5,5) >>> data_stream = ['a','b','c','d'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 0 >>> s.chunked_counts {0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}} >>> data_stream = ['a','b','c','d','a','e','f'] >>> for item in data_stream: ... s.add(item) >>> min(s.chunked_counts.keys()) 2 >>> s.chunked_counts {2: {'f': 1}}
[ "To", "handle", "the", "case", "when", "the", "items", "comming", "in", "the", "chunk", "is", "more", "than", "the", "maximum", "capacity", "of", "the", "chunk", ".", "Our", "intent", "behind", "is", "to", "remove", "the", "oldest", "chunk", ".", "So", ...
python
train
genepattern/genepattern-python
gp/data.py
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/data.py#L315-L321
def _extract_header_number(lines): """ Extracts the number of header lines from the second line of the ODF file """ pair = _extract_header_value(lines[1]) value_list = list(pair.values()) return int(value_list[0])
[ "def", "_extract_header_number", "(", "lines", ")", ":", "pair", "=", "_extract_header_value", "(", "lines", "[", "1", "]", ")", "value_list", "=", "list", "(", "pair", ".", "values", "(", ")", ")", "return", "int", "(", "value_list", "[", "0", "]", ")...
Extracts the number of header lines from the second line of the ODF file
[ "Extracts", "the", "number", "of", "header", "lines", "from", "the", "second", "line", "of", "the", "ODF", "file" ]
python
train
fracpete/python-weka-wrapper3
python/weka/clusterers.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/clusterers.py#L78-L88
def update_clusterer(self, inst): """ Updates the clusterer with the instance. :param inst: the Instance to update the clusterer with :type inst: Instance """ if self.is_updateable: javabridge.call(self.jobject, "updateClusterer", "(Lweka/core/Instance;)V", inst.jobject) else: logger.critical(classes.get_classname(self.jobject) + " is not updateable!")
[ "def", "update_clusterer", "(", "self", ",", "inst", ")", ":", "if", "self", ".", "is_updateable", ":", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"updateClusterer\"", ",", "\"(Lweka/core/Instance;)V\"", ",", "inst", ".", "jobject", ")", ...
Updates the clusterer with the instance. :param inst: the Instance to update the clusterer with :type inst: Instance
[ "Updates", "the", "clusterer", "with", "the", "instance", "." ]
python
train
jciskey/pygraph
pygraph/functions/planarity/kocay_algorithm.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/functions/planarity/kocay_algorithm.py#L743-L776
def __get_dfs_data(graph, adj=None): """Internal function that calculates the depth-first search of the graph. Returns a dictionary with the following data: * 'ordering': A dfs-ordering list of nodes * 'ordering_lookup': A lookup dict mapping nodes to dfs-ordering * 'node_lookup': A lookup dict mapping dfs-ordering to nodes * 'edge_lookup': A lookup dict mapping edges as tree-edges or back-edges * 'parent_lookup': A lookup dict mapping nodes to their parent node * 'children_lookup': A lookup dict mapping nodes to their children """ ordering, parent_lookup, children_lookup = depth_first_search_with_parent_data(graph, adjacency_lists=adj) ordering_lookup = dict(list(zip(ordering, list(range(1, len(ordering) + 1))))) node_lookup = dict(list(zip(list(range(1, len(ordering) + 1)), ordering))) edge_lookup = {} for edge_id in graph.get_all_edge_ids(): edge = graph.get_edge(edge_id) node_a, node_b = edge['vertices'] parent_a = parent_lookup[node_a] parent_b = parent_lookup[node_b] if parent_a == node_b or parent_b == node_a: edge_lookup[edge_id] = 'tree-edge' else: edge_lookup[edge_id] = 'backedge' dfs_data = {} dfs_data['ordering'] = ordering dfs_data['ordering_lookup'] = ordering_lookup dfs_data['node_lookup'] = node_lookup dfs_data['edge_lookup'] = edge_lookup dfs_data['parent_lookup'] = parent_lookup dfs_data['children_lookup'] = children_lookup return dfs_data
[ "def", "__get_dfs_data", "(", "graph", ",", "adj", "=", "None", ")", ":", "ordering", ",", "parent_lookup", ",", "children_lookup", "=", "depth_first_search_with_parent_data", "(", "graph", ",", "adjacency_lists", "=", "adj", ")", "ordering_lookup", "=", "dict", ...
Internal function that calculates the depth-first search of the graph. Returns a dictionary with the following data: * 'ordering': A dfs-ordering list of nodes * 'ordering_lookup': A lookup dict mapping nodes to dfs-ordering * 'node_lookup': A lookup dict mapping dfs-ordering to nodes * 'edge_lookup': A lookup dict mapping edges as tree-edges or back-edges * 'parent_lookup': A lookup dict mapping nodes to their parent node * 'children_lookup': A lookup dict mapping nodes to their children
[ "Internal", "function", "that", "calculates", "the", "depth", "-", "first", "search", "of", "the", "graph", ".", "Returns", "a", "dictionary", "with", "the", "following", "data", ":", "*", "ordering", ":", "A", "dfs", "-", "ordering", "list", "of", "nodes"...
python
train
sammchardy/python-kucoin
kucoin/client.py
https://github.com/sammchardy/python-kucoin/blob/a4cacde413804784bd313f27a0ad37234888be29/kucoin/client.py#L561-L590
def create_deposit_address(self, currency): """Create deposit address of currency for deposit. You can just create one deposit address. https://docs.kucoin.com/#create-deposit-address :param currency: Name of currency :type currency: string .. code:: python address = client.create_deposit_address('NEO') :returns: ApiResponse .. code:: python { "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1", "memo": "5c247c8a03aa677cea2a251d" } :raises: KucoinResponseException, KucoinAPIException """ data = { 'currency': currency } return self._post('deposit-addresses', True, data=data)
[ "def", "create_deposit_address", "(", "self", ",", "currency", ")", ":", "data", "=", "{", "'currency'", ":", "currency", "}", "return", "self", ".", "_post", "(", "'deposit-addresses'", ",", "True", ",", "data", "=", "data", ")" ]
Create deposit address of currency for deposit. You can just create one deposit address. https://docs.kucoin.com/#create-deposit-address :param currency: Name of currency :type currency: string .. code:: python address = client.create_deposit_address('NEO') :returns: ApiResponse .. code:: python { "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1", "memo": "5c247c8a03aa677cea2a251d" } :raises: KucoinResponseException, KucoinAPIException
[ "Create", "deposit", "address", "of", "currency", "for", "deposit", ".", "You", "can", "just", "create", "one", "deposit", "address", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/lib/pretty.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/pretty.py#L232-L255
def begin_group(self, indent=0, open=''): """ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line (usually the width of the opening text), the second the opening text. All parameters are optional. """ if open: self.text(open) group = Group(self.group_stack[-1].depth + 1) self.group_stack.append(group) self.group_queue.enq(group) self.indentation += indent
[ "def", "begin_group", "(", "self", ",", "indent", "=", "0", ",", "open", "=", "''", ")", ":", "if", "open", ":", "self", ".", "text", "(", "open", ")", "group", "=", "Group", "(", "self", ".", "group_stack", "[", "-", "1", "]", ".", "depth", "+...
Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line (usually the width of the opening text), the second the opening text. All parameters are optional.
[ "Begin", "a", "group", ".", "If", "you", "want", "support", "for", "python", "<", "2", ".", "5", "which", "doesn", "t", "has", "the", "with", "statement", "this", "is", "the", "preferred", "way", ":" ]
python
test
bcbio/bcbio-nextgen
bcbio/structural/regions.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/regions.py#L285-L303
def _normalize_sv_coverage_gatk(group_id, inputs, backgrounds, work_dir, back_files, out_files): """Normalize CNV coverage using panel of normals with GATK's de-noise approaches. """ input_backs = set(filter(lambda x: x is not None, [dd.get_background_cnv_reference(d, "gatk-cnv") for d in inputs])) if input_backs: assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs) pon = list(input_backs)[0] elif backgrounds: pon = gatkcnv.create_panel_of_normals(backgrounds, group_id, work_dir) else: pon = None for data in inputs: work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", dd.get_sample_name(data), "bins")) denoise_file = gatkcnv.denoise(data, pon, work_dir) out_files[dd.get_sample_name(data)] = denoise_file back_files[dd.get_sample_name(data)] = pon return back_files, out_files
[ "def", "_normalize_sv_coverage_gatk", "(", "group_id", ",", "inputs", ",", "backgrounds", ",", "work_dir", ",", "back_files", ",", "out_files", ")", ":", "input_backs", "=", "set", "(", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "[",...
Normalize CNV coverage using panel of normals with GATK's de-noise approaches.
[ "Normalize", "CNV", "coverage", "using", "panel", "of", "normals", "with", "GATK", "s", "de", "-", "noise", "approaches", "." ]
python
train
stevearc/dql
dql/engine.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L380-L402
def _explain(self, tree): """ Set up the engine to do a dry run of a query """ self._explaining = True self._call_list = [] old_call = self.connection.call def fake_call(command, **kwargs): """ Replacement for connection.call that logs args """ if command == "describe_table": return old_call(command, **kwargs) self._call_list.append((command, kwargs)) raise ExplainSignal self.connection.call = fake_call try: ret = self._run(tree[1]) try: list(ret) except TypeError: pass finally: self.connection.call = old_call self._explaining = False
[ "def", "_explain", "(", "self", ",", "tree", ")", ":", "self", ".", "_explaining", "=", "True", "self", ".", "_call_list", "=", "[", "]", "old_call", "=", "self", ".", "connection", ".", "call", "def", "fake_call", "(", "command", ",", "*", "*", "kwa...
Set up the engine to do a dry run of a query
[ "Set", "up", "the", "engine", "to", "do", "a", "dry", "run", "of", "a", "query" ]
python
train
programa-stic/barf-project
barf/core/reil/emulator/cpu.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/emulator/cpu.py#L448-L455
def __execute_str(self, instr): """Execute STR instruction. """ op0_val = self.read_operand(instr.operands[0]) self.write_operand(instr.operands[2], op0_val) return None
[ "def", "__execute_str", "(", "self", ",", "instr", ")", ":", "op0_val", "=", "self", ".", "read_operand", "(", "instr", ".", "operands", "[", "0", "]", ")", "self", ".", "write_operand", "(", "instr", ".", "operands", "[", "2", "]", ",", "op0_val", "...
Execute STR instruction.
[ "Execute", "STR", "instruction", "." ]
python
train
jciskey/pygraph
pygraph/classes/undirected_graph.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/classes/undirected_graph.py#L27-L38
def neighbors(self, node_id): """Find all the nodes where there is an edge from the specified node to that node. Returns a list of node ids.""" node = self.get_node(node_id) flattened_nodes_list = [] for a, b in [self.get_edge(edge_id)['vertices'] for edge_id in node['edges']]: flattened_nodes_list.append(a) flattened_nodes_list.append(b) node_set = set(flattened_nodes_list) if node_id in node_set: node_set.remove(node_id) return [nid for nid in node_set]
[ "def", "neighbors", "(", "self", ",", "node_id", ")", ":", "node", "=", "self", ".", "get_node", "(", "node_id", ")", "flattened_nodes_list", "=", "[", "]", "for", "a", ",", "b", "in", "[", "self", ".", "get_edge", "(", "edge_id", ")", "[", "'vertice...
Find all the nodes where there is an edge from the specified node to that node. Returns a list of node ids.
[ "Find", "all", "the", "nodes", "where", "there", "is", "an", "edge", "from", "the", "specified", "node", "to", "that", "node", ".", "Returns", "a", "list", "of", "node", "ids", "." ]
python
train
pypa/pipenv
pipenv/vendor/passa/internals/_pip.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/internals/_pip.py#L258-L272
def _suppress_distutils_logs(): """Hack to hide noise generated by `setup.py develop`. There isn't a good way to suppress them now, so let's monky-patch. See https://bugs.python.org/issue25392. """ f = distutils.log.Log._log def _log(log, level, msg, args): if level >= distutils.log.ERROR: f(log, level, msg, args) distutils.log.Log._log = _log yield distutils.log.Log._log = f
[ "def", "_suppress_distutils_logs", "(", ")", ":", "f", "=", "distutils", ".", "log", ".", "Log", ".", "_log", "def", "_log", "(", "log", ",", "level", ",", "msg", ",", "args", ")", ":", "if", "level", ">=", "distutils", ".", "log", ".", "ERROR", ":...
Hack to hide noise generated by `setup.py develop`. There isn't a good way to suppress them now, so let's monky-patch. See https://bugs.python.org/issue25392.
[ "Hack", "to", "hide", "noise", "generated", "by", "setup", ".", "py", "develop", "." ]
python
train
jgillick/LendingClub
lendingclub/__init__.py
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/__init__.py#L92-L102
def set_logger(self, logger): """ Set a logger to send debug messages to Parameters ---------- logger : `Logger <http://docs.python.org/2/library/logging.html>`_ A python logger used to get debugging output from this module. """ self.__logger = logger self.session.set_logger(self.__logger)
[ "def", "set_logger", "(", "self", ",", "logger", ")", ":", "self", ".", "__logger", "=", "logger", "self", ".", "session", ".", "set_logger", "(", "self", ".", "__logger", ")" ]
Set a logger to send debug messages to Parameters ---------- logger : `Logger <http://docs.python.org/2/library/logging.html>`_ A python logger used to get debugging output from this module.
[ "Set", "a", "logger", "to", "send", "debug", "messages", "to" ]
python
train
dstufft/potpie
potpie/pseudo/types.py
https://github.com/dstufft/potpie/blob/1b12f25b77b8719418f88f49c45920c1eb8ee406/potpie/pseudo/types.py#L47-L55
def _transpose(cls, char): """Convert unicode char to something similar to it.""" try: loc = ord(char) - 65 if loc < 0 or loc > 56: return char return cls.UNICODE_MAP[loc] except UnicodeDecodeError: return char
[ "def", "_transpose", "(", "cls", ",", "char", ")", ":", "try", ":", "loc", "=", "ord", "(", "char", ")", "-", "65", "if", "loc", "<", "0", "or", "loc", ">", "56", ":", "return", "char", "return", "cls", ".", "UNICODE_MAP", "[", "loc", "]", "exc...
Convert unicode char to something similar to it.
[ "Convert", "unicode", "char", "to", "something", "similar", "to", "it", "." ]
python
train
dnmellen/pycolorterm
pycolorterm/pycolorterm.py
https://github.com/dnmellen/pycolorterm/blob/f650eb8dbdce1a283e7b1403be1071b57c4849c6/pycolorterm/pycolorterm.py#L71-L86
def print_pretty(text, **kwargs): ''' Prints using pycolorterm formatting :param text: Text with formatting :type text: string :param kwargs: Keyword args that will be passed to the print function :type kwargs: dict Example:: print_pretty('Hello {BG_RED}WORLD{END}') ''' text = _prepare(text) print('{}{}'.format(text.format(**styles).replace(styles['END'], styles['ALL_OFF']), styles['ALL_OFF']))
[ "def", "print_pretty", "(", "text", ",", "*", "*", "kwargs", ")", ":", "text", "=", "_prepare", "(", "text", ")", "print", "(", "'{}{}'", ".", "format", "(", "text", ".", "format", "(", "*", "*", "styles", ")", ".", "replace", "(", "styles", "[", ...
Prints using pycolorterm formatting :param text: Text with formatting :type text: string :param kwargs: Keyword args that will be passed to the print function :type kwargs: dict Example:: print_pretty('Hello {BG_RED}WORLD{END}')
[ "Prints", "using", "pycolorterm", "formatting" ]
python
train
lreis2415/PyGeoC
pygeoc/TauDEM.py
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L96-L101
def error(msg, log_file=None): """Print, output error message and raise RuntimeError.""" UtilClass.print_msg(msg + os.linesep) if log_file is not None: UtilClass.writelog(log_file, msg, 'append') raise RuntimeError(msg)
[ "def", "error", "(", "msg", ",", "log_file", "=", "None", ")", ":", "UtilClass", ".", "print_msg", "(", "msg", "+", "os", ".", "linesep", ")", "if", "log_file", "is", "not", "None", ":", "UtilClass", ".", "writelog", "(", "log_file", ",", "msg", ",",...
Print, output error message and raise RuntimeError.
[ "Print", "output", "error", "message", "and", "raise", "RuntimeError", "." ]
python
train
napalm-automation/napalm
napalm/ios/ios.py
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/ios/ios.py#L727-L736
def _send_command_postprocess(output): """ Cleanup actions on send_command() for NAPALM getters. Remove "Load for five sec; one minute if in output" Remove "Time source is" """ output = re.sub(r"^Load for five secs.*$", "", output, flags=re.M) output = re.sub(r"^Time source is .*$", "", output, flags=re.M) return output.strip()
[ "def", "_send_command_postprocess", "(", "output", ")", ":", "output", "=", "re", ".", "sub", "(", "r\"^Load for five secs.*$\"", ",", "\"\"", ",", "output", ",", "flags", "=", "re", ".", "M", ")", "output", "=", "re", ".", "sub", "(", "r\"^Time source is ...
Cleanup actions on send_command() for NAPALM getters. Remove "Load for five sec; one minute if in output" Remove "Time source is"
[ "Cleanup", "actions", "on", "send_command", "()", "for", "NAPALM", "getters", "." ]
python
train
mkorpela/overrides
overrides/overrides.py
https://github.com/mkorpela/overrides/blob/196c2fa3c79fe7a7d319d2ade25bb25f6d78f1c2/overrides/overrides.py#L126-L155
def _get_base_class_names(frame): """ Get baseclass names from the code object """ co, lasti = frame.f_code, frame.f_lasti code = co.co_code extends = [] for (op, oparg) in op_stream(code, lasti): if op in dis.hasconst: if type(co.co_consts[oparg]) == str: extends = [] elif op in dis.hasname: if dis.opname[op] == 'LOAD_NAME': extends.append(('name', co.co_names[oparg])) if dis.opname[op] == 'LOAD_ATTR': extends.append(('attr', co.co_names[oparg])) if dis.opname[op] == 'LOAD_GLOBAL': extends.append(('name', co.co_names[oparg])) items = [] previous_item = [] for t, s in extends: if t == 'name': if previous_item: items.append(previous_item) previous_item = [s] else: previous_item += [s] if previous_item: items.append(previous_item) return items
[ "def", "_get_base_class_names", "(", "frame", ")", ":", "co", ",", "lasti", "=", "frame", ".", "f_code", ",", "frame", ".", "f_lasti", "code", "=", "co", ".", "co_code", "extends", "=", "[", "]", "for", "(", "op", ",", "oparg", ")", "in", "op_stream"...
Get baseclass names from the code object
[ "Get", "baseclass", "names", "from", "the", "code", "object" ]
python
train
tjvr/kurt
kurt/scratch14/objtable.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/scratch14/objtable.py#L445-L458
def decode_obj_table(table_entries, plugin): """Return root of obj table. Converts user-class objects""" entries = [] for entry in table_entries: if isinstance(entry, Container): assert not hasattr(entry, '__recursion_lock__') user_obj_def = plugin.user_objects[entry.classID] assert entry.version == user_obj_def.version entry = Container(class_name=entry.classID, **dict(zip(user_obj_def.defaults.keys(), entry.values))) entries.append(entry) return decode_network(entries)
[ "def", "decode_obj_table", "(", "table_entries", ",", "plugin", ")", ":", "entries", "=", "[", "]", "for", "entry", "in", "table_entries", ":", "if", "isinstance", "(", "entry", ",", "Container", ")", ":", "assert", "not", "hasattr", "(", "entry", ",", "...
Return root of obj table. Converts user-class objects
[ "Return", "root", "of", "obj", "table", ".", "Converts", "user", "-", "class", "objects" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/werkzeug/urls.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/werkzeug/urls.py#L548-L576
def url_fix(s, charset='utf-8'): r"""Sometimes you get an URL by a user that just isn't a real URL because it contains unsafe characters like ' ' and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user: >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' :param s: the string with the URL to fix. :param charset: The target charset for the URL if the url was given as unicode string. """ # First step is to switch to unicode processing and to convert # backslashes (which are invalid in URLs anyways) to slashes. This is # consistent with what Chrome does. s = to_unicode(s, charset, 'replace').replace('\\', '/') # For the specific case that we look like a malformed windows URL # we want to fix this up manually: if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'): s = 'file:///' + s[7:] url = url_parse(s) path = url_quote(url.path, charset, safe='/%+$!*\'(),') qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),') anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),') return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
[ "def", "url_fix", "(", "s", ",", "charset", "=", "'utf-8'", ")", ":", "# First step is to switch to unicode processing and to convert", "# backslashes (which are invalid in URLs anyways) to slashes. This is", "# consistent with what Chrome does.", "s", "=", "to_unicode", "(", "s",...
r"""Sometimes you get an URL by a user that just isn't a real URL because it contains unsafe characters like ' ' and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user: >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' :param s: the string with the URL to fix. :param charset: The target charset for the URL if the url was given as unicode string.
[ "r", "Sometimes", "you", "get", "an", "URL", "by", "a", "user", "that", "just", "isn", "t", "a", "real", "URL", "because", "it", "contains", "unsafe", "characters", "like", "and", "so", "on", ".", "This", "function", "can", "fix", "some", "of", "the", ...
python
test
rossant/ipymd
ipymd/lib/opendocument.py
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/lib/opendocument.py#L278-L290
def load_styles(path_or_doc): """Return a dictionary of all styles contained in an ODF document.""" if isinstance(path_or_doc, string_types): doc = load(path_or_doc) else: # Recover the OpenDocumentText instance. if isinstance(path_or_doc, ODFDocument): doc = path_or_doc._doc else: doc = path_or_doc assert isinstance(doc, OpenDocument), doc styles = {_style_name(style): style for style in doc.styles.childNodes} return styles
[ "def", "load_styles", "(", "path_or_doc", ")", ":", "if", "isinstance", "(", "path_or_doc", ",", "string_types", ")", ":", "doc", "=", "load", "(", "path_or_doc", ")", "else", ":", "# Recover the OpenDocumentText instance.", "if", "isinstance", "(", "path_or_doc",...
Return a dictionary of all styles contained in an ODF document.
[ "Return", "a", "dictionary", "of", "all", "styles", "contained", "in", "an", "ODF", "document", "." ]
python
train
mitsei/dlkit
dlkit/services/authorization.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/authorization.py#L1165-L1173
def use_federated_vault_view(self): """Pass through to provider AuthorizationLookupSession.use_federated_vault_view""" self._vault_view = FEDERATED # self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_federated_vault_view() except AttributeError: pass
[ "def", "use_federated_vault_view", "(", "self", ")", ":", "self", ".", "_vault_view", "=", "FEDERATED", "# self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")",...
Pass through to provider AuthorizationLookupSession.use_federated_vault_view
[ "Pass", "through", "to", "provider", "AuthorizationLookupSession", ".", "use_federated_vault_view" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/geoff.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/geoff.py#L13-L34
def node2geoff(node_name, properties, encoder): """converts a NetworkX node into a Geoff string. Parameters ---------- node_name : str or int the ID of a NetworkX node properties : dict a dictionary of node attributes encoder : json.JSONEncoder an instance of a JSON encoder (e.g. `json.JSONEncoder`) Returns ------- geoff : str a Geoff string """ if properties: return '({0} {1})'.format(node_name, encoder.encode(properties)) else: return '({0})'.format(node_name)
[ "def", "node2geoff", "(", "node_name", ",", "properties", ",", "encoder", ")", ":", "if", "properties", ":", "return", "'({0} {1})'", ".", "format", "(", "node_name", ",", "encoder", ".", "encode", "(", "properties", ")", ")", "else", ":", "return", "'({0}...
converts a NetworkX node into a Geoff string. Parameters ---------- node_name : str or int the ID of a NetworkX node properties : dict a dictionary of node attributes encoder : json.JSONEncoder an instance of a JSON encoder (e.g. `json.JSONEncoder`) Returns ------- geoff : str a Geoff string
[ "converts", "a", "NetworkX", "node", "into", "a", "Geoff", "string", "." ]
python
train
misli/django-cms-articles
cms_articles/signals/title.py
https://github.com/misli/django-cms-articles/blob/d96ac77e049022deb4c70d268e4eab74d175145c/cms_articles/signals/title.py#L15-L26
def pre_delete_title(instance, **kwargs): ''' Update article.languages ''' if instance.article.languages: languages = instance.article.languages.split(',') else: languages = [] if instance.language in languages: languages.remove(instance.language) instance.article.languages = ','.join(languages) instance.article._publisher_keep_state = True instance.article.save(no_signals=True)
[ "def", "pre_delete_title", "(", "instance", ",", "*", "*", "kwargs", ")", ":", "if", "instance", ".", "article", ".", "languages", ":", "languages", "=", "instance", ".", "article", ".", "languages", ".", "split", "(", "','", ")", "else", ":", "languages...
Update article.languages
[ "Update", "article", ".", "languages" ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L1542-L1556
def incrby(self, fmt, offset, increment, overflow=None): """ Increment a bitfield by a given amount. :param fmt: format-string for the bitfield being updated, e.g. u8 for an unsigned 8-bit integer. :param int offset: offset (in number of bits). :param int increment: value to increment the bitfield by. :param str overflow: overflow algorithm. Defaults to WRAP, but other acceptable values are SAT and FAIL. See the Redis docs for descriptions of these algorithms. :returns: a :py:class:`BitFieldOperation` instance. """ bfo = BitFieldOperation(self.database, self.key) return bfo.incrby(fmt, offset, increment, overflow)
[ "def", "incrby", "(", "self", ",", "fmt", ",", "offset", ",", "increment", ",", "overflow", "=", "None", ")", ":", "bfo", "=", "BitFieldOperation", "(", "self", ".", "database", ",", "self", ".", "key", ")", "return", "bfo", ".", "incrby", "(", "fmt"...
Increment a bitfield by a given amount. :param fmt: format-string for the bitfield being updated, e.g. u8 for an unsigned 8-bit integer. :param int offset: offset (in number of bits). :param int increment: value to increment the bitfield by. :param str overflow: overflow algorithm. Defaults to WRAP, but other acceptable values are SAT and FAIL. See the Redis docs for descriptions of these algorithms. :returns: a :py:class:`BitFieldOperation` instance.
[ "Increment", "a", "bitfield", "by", "a", "given", "amount", "." ]
python
train
mattrobenolt/python-sourcemap
sourcemap/decoder.py
https://github.com/mattrobenolt/python-sourcemap/blob/8d6969a3ce2c6b139c6e81927beed58ae67e840b/sourcemap/decoder.py#L65-L195
def decode(self, source): """Decode a source map object into a SourceMapIndex. The index is keyed on (dst_line, dst_column) for lookups, and a per row index is kept to help calculate which Token to retrieve. For example: A minified source file has two rows and two tokens per row. # All parsed tokens tokens = [ Token(dst_row=0, dst_col=0), Token(dst_row=0, dst_col=5), Token(dst_row=1, dst_col=0), Token(dst_row=1, dst_col=12), ] Two dimentional array of columns -> row rows = [ [0, 5], [0, 12], ] Token lookup, based on location index = { (0, 0): tokens[0], (0, 5): tokens[1], (1, 0): tokens[2], (1, 12): tokens[3], } To find the token at (1, 20): - Check if there's a direct hit on the index (1, 20) => False - Pull rows[1] => [0, 12] - bisect_right to find the closest match: bisect_right([0, 12], 20) => 2 - Fetch the column number before, since we want the column lte to the bisect_right: 2-1 => row[2-1] => 12 - At this point, we know the token location, (1, 12) - Pull (1, 12) from index => tokens[3] """ # According to spec (https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.h7yy76c5il9v) # A SouceMap may be prepended with ")]}'" to cause a Javascript error. # If the file starts with that string, ignore the entire first line. if source[:4] == ")]}'" or source[:3] == ")]}": source = source.split('\n', 1)[1] smap = json.loads(source) sources = smap['sources'] sourceRoot = smap.get('sourceRoot') names = list(map(text_type, smap['names'])) mappings = smap['mappings'] lines = mappings.split(';') if sourceRoot is not None: sources = list(map(partial(os.path.join, sourceRoot), sources)) # List of all tokens tokens = [] # line_index is used to identify the closest column when looking up a token line_index = [] # Main index of all tokens # The index is keyed on (line, column) index = {} dst_col, src_id, src_line, src_col, name_id = 0, 0, 0, 0, 0 for dst_line, line in enumerate(lines): # Create list for columns in index line_index.append([]) segments = line.split(',') dst_col = 0 for segment in segments: if not segment: continue parse = self.parse_vlq(segment) dst_col += parse[0] src = None name = None if len(parse) > 1: try: src_id += parse[1] if not 0 <= src_id < len(sources): raise SourceMapDecodeError( "Segment %s references source %d; there are " "%d sources" % (segment, src_id, len(sources)) ) src = sources[src_id] src_line += parse[2] src_col += parse[3] if len(parse) > 4: name_id += parse[4] if not 0 <= name_id < len(names): raise SourceMapDecodeError( "Segment %s references name %d; there are " "%d names" % (segment, name_id, len(names)) ) name = names[name_id] except IndexError: raise SourceMapDecodeError( "Invalid segment %s, parsed as %r" % (segment, parse) ) try: assert dst_line >= 0, ('dst_line', dst_line) assert dst_col >= 0, ('dst_col', dst_col) assert src_line >= 0, ('src_line', src_line) assert src_col >= 0, ('src_col', src_col) except AssertionError as e: raise SourceMapDecodeError( "Segment %s has negative %s (%d), in file %s" % (segment, e.message[0], e.message[1], src) ) token = Token(dst_line, dst_col, src, src_line, src_col, name) tokens.append(token) # Insert into main index index[(dst_line, dst_col)] = token # Insert into specific line index line_index[dst_line].append(dst_col) return SourceMapIndex(smap, tokens, line_index, index, sources)
[ "def", "decode", "(", "self", ",", "source", ")", ":", "# According to spec (https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.h7yy76c5il9v)", "# A SouceMap may be prepended with \")]}'\" to cause a Javascript error.", "# If the file starts with that ...
Decode a source map object into a SourceMapIndex. The index is keyed on (dst_line, dst_column) for lookups, and a per row index is kept to help calculate which Token to retrieve. For example: A minified source file has two rows and two tokens per row. # All parsed tokens tokens = [ Token(dst_row=0, dst_col=0), Token(dst_row=0, dst_col=5), Token(dst_row=1, dst_col=0), Token(dst_row=1, dst_col=12), ] Two dimentional array of columns -> row rows = [ [0, 5], [0, 12], ] Token lookup, based on location index = { (0, 0): tokens[0], (0, 5): tokens[1], (1, 0): tokens[2], (1, 12): tokens[3], } To find the token at (1, 20): - Check if there's a direct hit on the index (1, 20) => False - Pull rows[1] => [0, 12] - bisect_right to find the closest match: bisect_right([0, 12], 20) => 2 - Fetch the column number before, since we want the column lte to the bisect_right: 2-1 => row[2-1] => 12 - At this point, we know the token location, (1, 12) - Pull (1, 12) from index => tokens[3]
[ "Decode", "a", "source", "map", "object", "into", "a", "SourceMapIndex", "." ]
python
train
praekeltfoundation/molo.yourtips
molo/yourtips/templatetags/tip_tags.py
https://github.com/praekeltfoundation/molo.yourtips/blob/8b3e3b1ff52cd4a78ccca5d153b3909a1f21625f/molo/yourtips/templatetags/tip_tags.py#L124-L131
def get_your_tip(context): """ A simple tag to return the YourTips page. :param context: takes context :return: A YourTip object """ site_main = context['request'].site.root_page return YourTip.objects.descendant_of(site_main).live().first()
[ "def", "get_your_tip", "(", "context", ")", ":", "site_main", "=", "context", "[", "'request'", "]", ".", "site", ".", "root_page", "return", "YourTip", ".", "objects", ".", "descendant_of", "(", "site_main", ")", ".", "live", "(", ")", ".", "first", "("...
A simple tag to return the YourTips page. :param context: takes context :return: A YourTip object
[ "A", "simple", "tag", "to", "return", "the", "YourTips", "page", ".", ":", "param", "context", ":", "takes", "context", ":", "return", ":", "A", "YourTip", "object" ]
python
train
BD2KGenomics/toil-lib
src/toil_lib/__init__.py
https://github.com/BD2KGenomics/toil-lib/blob/022a615fc3dc98fc1aaa7bfd232409962ca44fbd/src/toil_lib/__init__.py#L54-L70
def required_length(nmin, nmax): """ For use with argparse's action argument. Allows setting a range for nargs. Example: nargs='+', action=required_length(2, 3) :param int nmin: Minimum number of arguments :param int nmax: Maximum number of arguments :return: RequiredLength object """ class RequiredLength(argparse.Action): def __call__(self, parser, args, values, option_string=None): if not nmin <= len(values) <= nmax: msg = 'argument "{f}" requires between {nmin} and {nmax} arguments'.format( f=self.dest, nmin=nmin, nmax=nmax) raise argparse.ArgumentTypeError(msg) setattr(args, self.dest, values) return RequiredLength
[ "def", "required_length", "(", "nmin", ",", "nmax", ")", ":", "class", "RequiredLength", "(", "argparse", ".", "Action", ")", ":", "def", "__call__", "(", "self", ",", "parser", ",", "args", ",", "values", ",", "option_string", "=", "None", ")", ":", "...
For use with argparse's action argument. Allows setting a range for nargs. Example: nargs='+', action=required_length(2, 3) :param int nmin: Minimum number of arguments :param int nmax: Maximum number of arguments :return: RequiredLength object
[ "For", "use", "with", "argparse", "s", "action", "argument", ".", "Allows", "setting", "a", "range", "for", "nargs", ".", "Example", ":", "nargs", "=", "+", "action", "=", "required_length", "(", "2", "3", ")" ]
python
test
napalm-automation/napalm-logs
napalm_logs/transport/__init__.py
https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/transport/__init__.py#L50-L60
def get_transport(name): ''' Return the transport class. ''' try: log.debug('Using %s as transport', name) return TRANSPORT_LOOKUP[name] except KeyError: msg = 'Transport {} is not available. Are the dependencies installed?'.format(name) log.error(msg, exc_info=True) raise InvalidTransportException(msg)
[ "def", "get_transport", "(", "name", ")", ":", "try", ":", "log", ".", "debug", "(", "'Using %s as transport'", ",", "name", ")", "return", "TRANSPORT_LOOKUP", "[", "name", "]", "except", "KeyError", ":", "msg", "=", "'Transport {} is not available. Are the depend...
Return the transport class.
[ "Return", "the", "transport", "class", "." ]
python
train
tcalmant/python-javaobj
javaobj/core.py
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L559-L570
def _readStreamHeader(self): """ Reads the magic header of a Java serialization stream :raise IOError: Invalid magic header (not a Java stream) """ (magic, version) = self._readStruct(">HH") if magic != self.STREAM_MAGIC or version != self.STREAM_VERSION: raise IOError( "The stream is not java serialized object. " "Invalid stream header: {0:04X}{1:04X}".format(magic, version) )
[ "def", "_readStreamHeader", "(", "self", ")", ":", "(", "magic", ",", "version", ")", "=", "self", ".", "_readStruct", "(", "\">HH\"", ")", "if", "magic", "!=", "self", ".", "STREAM_MAGIC", "or", "version", "!=", "self", ".", "STREAM_VERSION", ":", "rais...
Reads the magic header of a Java serialization stream :raise IOError: Invalid magic header (not a Java stream)
[ "Reads", "the", "magic", "header", "of", "a", "Java", "serialization", "stream" ]
python
train
googleapis/google-cloud-python
language/google/cloud/language_v1/gapic/language_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/language/google/cloud/language_v1/gapic/language_service_client.py#L169-L231
def analyze_sentiment( self, document, encoding_type=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Analyzes the sentiment of the provided text. Example: >>> from google.cloud import language_v1 >>> >>> client = language_v1.LanguageServiceClient() >>> >>> # TODO: Initialize `document`: >>> document = {} >>> >>> response = client.analyze_sentiment(document) Args: document (Union[dict, ~google.cloud.language_v1.types.Document]): Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.types.EncodingType): The encoding type used by the API to calculate sentence offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.language_v1.types.AnalyzeSentimentResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "analyze_sentiment" not in self._inner_api_calls: self._inner_api_calls[ "analyze_sentiment" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.analyze_sentiment, default_retry=self._method_configs["AnalyzeSentiment"].retry, default_timeout=self._method_configs["AnalyzeSentiment"].timeout, client_info=self._client_info, ) request = language_service_pb2.AnalyzeSentimentRequest( document=document, encoding_type=encoding_type ) return self._inner_api_calls["analyze_sentiment"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "analyze_sentiment", "(", "self", ",", "document", ",", "encoding_type", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".",...
Analyzes the sentiment of the provided text. Example: >>> from google.cloud import language_v1 >>> >>> client = language_v1.LanguageServiceClient() >>> >>> # TODO: Initialize `document`: >>> document = {} >>> >>> response = client.analyze_sentiment(document) Args: document (Union[dict, ~google.cloud.language_v1.types.Document]): Input document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.language_v1.types.Document` encoding_type (~google.cloud.language_v1.types.EncodingType): The encoding type used by the API to calculate sentence offsets. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.language_v1.types.AnalyzeSentimentResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Analyzes", "the", "sentiment", "of", "the", "provided", "text", "." ]
python
train
maxpumperla/elephas
elephas/ml/adapter.py
https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/ml/adapter.py#L26-L40
def df_to_simple_rdd(df, categorical=False, nb_classes=None, features_col='features', label_col='label'): """Convert DataFrame into RDD of pairs """ sql_context = df.sql_ctx sql_context.registerDataFrameAsTable(df, "temp_table") selected_df = sql_context.sql( "SELECT {0} AS features, {1} as label from temp_table".format(features_col, label_col)) if isinstance(selected_df.first().features, MLLibVector): lp_rdd = selected_df.rdd.map( lambda row: LabeledPoint(row.label, row.features)) else: lp_rdd = selected_df.rdd.map(lambda row: LabeledPoint( row.label, MLLibVectors.fromML(row.features))) rdd = lp_to_simple_rdd(lp_rdd, categorical, nb_classes) return rdd
[ "def", "df_to_simple_rdd", "(", "df", ",", "categorical", "=", "False", ",", "nb_classes", "=", "None", ",", "features_col", "=", "'features'", ",", "label_col", "=", "'label'", ")", ":", "sql_context", "=", "df", ".", "sql_ctx", "sql_context", ".", "registe...
Convert DataFrame into RDD of pairs
[ "Convert", "DataFrame", "into", "RDD", "of", "pairs" ]
python
train
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L1413-L1419
def get_index_from_filename(self, filename): """ Return the position index of a file in the tab bar of the editorstack from its name. """ filenames = [d.filename for d in self.data] return filenames.index(filename)
[ "def", "get_index_from_filename", "(", "self", ",", "filename", ")", ":", "filenames", "=", "[", "d", ".", "filename", "for", "d", "in", "self", ".", "data", "]", "return", "filenames", ".", "index", "(", "filename", ")" ]
Return the position index of a file in the tab bar of the editorstack from its name.
[ "Return", "the", "position", "index", "of", "a", "file", "in", "the", "tab", "bar", "of", "the", "editorstack", "from", "its", "name", "." ]
python
train
unt-libraries/pypairtree
pypairtree/pairtree.py
https://github.com/unt-libraries/pypairtree/blob/2107b46718bbf9ef7ef3d5c63d557d1f772e5d69/pypairtree/pairtree.py#L211-L226
def add_to_pairtree(output_path, meta_id): """Creates pairtree dir structure within pairtree for new element.""" # create the pair path paired_path = pair_tree_creator(meta_id) path_append = '' # for each directory in the pair path for pair_dir in paired_path.split(os.sep): # append the pair path together, one directory at a time path_append = os.path.join(path_append, pair_dir) # append the pair path to the output path combined_path = os.path.join(output_path, path_append) # if the path doesn't already exist, create it if not os.path.exists(combined_path): os.mkdir(combined_path) return combined_path
[ "def", "add_to_pairtree", "(", "output_path", ",", "meta_id", ")", ":", "# create the pair path", "paired_path", "=", "pair_tree_creator", "(", "meta_id", ")", "path_append", "=", "''", "# for each directory in the pair path", "for", "pair_dir", "in", "paired_path", "."...
Creates pairtree dir structure within pairtree for new element.
[ "Creates", "pairtree", "dir", "structure", "within", "pairtree", "for", "new", "element", "." ]
python
train
googleapis/google-cloud-python
videointelligence/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/videointelligence/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py#L175-L289
def annotate_video( self, input_uri=None, input_content=None, features=None, video_context=None, output_uri=None, location_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Performs asynchronous video annotation. Progress and results can be retrieved through the ``google.longrunning.Operations`` interface. ``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress). ``Operation.response`` contains ``AnnotateVideoResponse`` (results). Example: >>> from google.cloud import videointelligence_v1beta2 >>> from google.cloud.videointelligence_v1beta2 import enums >>> >>> client = videointelligence_v1beta2.VideoIntelligenceServiceClient() >>> >>> input_uri = 'gs://demomaker/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] >>> >>> response = client.annotate_video(input_uri=input_uri, features=features) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: input_uri (str): Input video location. Currently, only `Google Cloud Storage <https://cloud.google.com/storage/>`__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs <https://cloud.google.com/storage/docs/reference-uris>`__. A video URI may include wildcards in ``object-id``, and thus identify multiple videos. Supported wildcards: '\*' to match 0 or more characters; '?' to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content (bytes): The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. features (list[~google.cloud.videointelligence_v1beta2.types.Feature]): Requested video annotation features. video_context (Union[dict, ~google.cloud.videointelligence_v1beta2.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1beta2.types.VideoContext` output_uri (str): Optional location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage <https://cloud.google.com/storage/>`__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs <https://cloud.google.com/storage/docs/reference-uris>`__. location_id (str): Optional cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.videointelligence_v1beta2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "annotate_video" not in self._inner_api_calls: self._inner_api_calls[ "annotate_video" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.annotate_video, default_retry=self._method_configs["AnnotateVideo"].retry, default_timeout=self._method_configs["AnnotateVideo"].timeout, client_info=self._client_info, ) request = video_intelligence_pb2.AnnotateVideoRequest( input_uri=input_uri, input_content=input_content, features=features, video_context=video_context, output_uri=output_uri, location_id=location_id, ) operation = self._inner_api_calls["annotate_video"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, video_intelligence_pb2.AnnotateVideoResponse, metadata_type=video_intelligence_pb2.AnnotateVideoProgress, )
[ "def", "annotate_video", "(", "self", ",", "input_uri", "=", "None", ",", "input_content", "=", "None", ",", "features", "=", "None", ",", "video_context", "=", "None", ",", "output_uri", "=", "None", ",", "location_id", "=", "None", ",", "retry", "=", "...
Performs asynchronous video annotation. Progress and results can be retrieved through the ``google.longrunning.Operations`` interface. ``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress). ``Operation.response`` contains ``AnnotateVideoResponse`` (results). Example: >>> from google.cloud import videointelligence_v1beta2 >>> from google.cloud.videointelligence_v1beta2 import enums >>> >>> client = videointelligence_v1beta2.VideoIntelligenceServiceClient() >>> >>> input_uri = 'gs://demomaker/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] >>> >>> response = client.annotate_video(input_uri=input_uri, features=features) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: input_uri (str): Input video location. Currently, only `Google Cloud Storage <https://cloud.google.com/storage/>`__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs <https://cloud.google.com/storage/docs/reference-uris>`__. A video URI may include wildcards in ``object-id``, and thus identify multiple videos. Supported wildcards: '\*' to match 0 or more characters; '?' to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` should be unset. input_content (bytes): The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. features (list[~google.cloud.videointelligence_v1beta2.types.Feature]): Requested video annotation features. video_context (Union[dict, ~google.cloud.videointelligence_v1beta2.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1beta2.types.VideoContext` output_uri (str): Optional location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage <https://cloud.google.com/storage/>`__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs <https://cloud.google.com/storage/docs/reference-uris>`__. location_id (str): Optional cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.videointelligence_v1beta2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Performs", "asynchronous", "video", "annotation", ".", "Progress", "and", "results", "can", "be", "retrieved", "through", "the", "google", ".", "longrunning", ".", "Operations", "interface", ".", "Operation", ".", "metadata", "contains", "AnnotateVideoProgress", "(...
python
train
openvax/mhctools
mhctools/parsing.py
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/parsing.py#L224-L260
def parse_netmhcpan28_stdout( stdout, prediction_method_name="netmhcpan", sequence_key_mapping=None): """ # Affinity Threshold for Strong binding peptides 50.000', # Affinity Threshold for Weak binding peptides 500.000', # Rank Threshold for Strong binding peptides 0.500', # Rank Threshold for Weak binding peptides 2.000', ---------------------------------------------------------------------------- pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel ---------------------------------------------------------------------------- 0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00 1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00 2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00 3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00 4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00 5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00 6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00 7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00 8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00 9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00 10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00 11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB """ check_stdout_error(stdout, "NetMHCpan-2.8") return parse_stdout( stdout=stdout, prediction_method_name=prediction_method_name, sequence_key_mapping=sequence_key_mapping, key_index=3, offset_index=0, peptide_index=2, allele_index=1, ic50_index=5, rank_index=6, log_ic50_index=4)
[ "def", "parse_netmhcpan28_stdout", "(", "stdout", ",", "prediction_method_name", "=", "\"netmhcpan\"", ",", "sequence_key_mapping", "=", "None", ")", ":", "check_stdout_error", "(", "stdout", ",", "\"NetMHCpan-2.8\"", ")", "return", "parse_stdout", "(", "stdout", "=",...
# Affinity Threshold for Strong binding peptides 50.000', # Affinity Threshold for Weak binding peptides 500.000', # Rank Threshold for Strong binding peptides 0.500', # Rank Threshold for Weak binding peptides 2.000', ---------------------------------------------------------------------------- pos HLA peptide Identity 1-log50k(aff) Affinity(nM) %Rank BindLevel ---------------------------------------------------------------------------- 0 HLA-A*02:03 QQQQQYFPE id0 0.024 38534.25 50.00 1 HLA-A*02:03 QQQQYFPEI id0 0.278 2461.53 15.00 2 HLA-A*02:03 QQQYFPEIT id0 0.078 21511.53 50.00 3 HLA-A*02:03 QQYFPEITH id0 0.041 32176.84 50.00 4 HLA-A*02:03 QYFPEITHI id0 0.085 19847.09 32.00 5 HLA-A*02:03 YFPEITHII id0 0.231 4123.85 15.00 6 HLA-A*02:03 FPEITHIII id0 0.060 26134.28 50.00 7 HLA-A*02:03 PEITHIIIA id0 0.034 34524.63 50.00 8 HLA-A*02:03 EITHIIIAS id0 0.076 21974.48 50.00 9 HLA-A*02:03 ITHIIIASS id0 0.170 7934.26 32.00 10 HLA-A*02:03 THIIIASSS id0 0.040 32361.18 50.00 11 HLA-A*02:03 HIIIASSSL id0 0.515 189.74 4.00 <= WB
[ "#", "Affinity", "Threshold", "for", "Strong", "binding", "peptides", "50", ".", "000", "#", "Affinity", "Threshold", "for", "Weak", "binding", "peptides", "500", ".", "000", "#", "Rank", "Threshold", "for", "Strong", "binding", "peptides", "0", ".", "500", ...
python
valid
zimeon/iiif
iiif/flask_utils.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L250-L268
def file(self): """Filename property for the source image for the current identifier.""" file = None if (self.config.klass_name == 'gen'): for ext in ['.py']: file = os.path.join( self.config.generator_dir, self.identifier + ext) if (os.path.isfile(file)): return file else: for ext in ['.jpg', '.png', '.tif']: file = os.path.join(self.config.image_dir, self.identifier + ext) if (os.path.isfile(file)): return file # failed, show list of available identifiers as error available = "\n ".join(identifiers(self.config)) raise IIIFError(code=404, parameter="identifier", text="Image resource '" + self.identifier + "' not found. Local resources available:" + available + "\n")
[ "def", "file", "(", "self", ")", ":", "file", "=", "None", "if", "(", "self", ".", "config", ".", "klass_name", "==", "'gen'", ")", ":", "for", "ext", "in", "[", "'.py'", "]", ":", "file", "=", "os", ".", "path", ".", "join", "(", "self", ".", ...
Filename property for the source image for the current identifier.
[ "Filename", "property", "for", "the", "source", "image", "for", "the", "current", "identifier", "." ]
python
train
DataDog/integrations-core
mysql/datadog_checks/mysql/mysql.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/mysql/datadog_checks/mysql/mysql.py#L720-L755
def _collect_dict(self, metric_type, field_metric_map, query, db, tags): """ Query status and get a dictionary back. Extract each field out of the dictionary and stuff it in the corresponding metric. query: show status... field_metric_map: {"Seconds_behind_master": "mysqlSecondsBehindMaster"} """ try: with closing(db.cursor()) as cursor: cursor.execute(query) result = cursor.fetchone() if result is not None: for field, metric in list(iteritems(field_metric_map)): # Find the column name in the cursor description to identify the column index # http://www.python.org/dev/peps/pep-0249/ # cursor.description is a tuple of (column_name, ..., ...) try: col_idx = [d[0].lower() for d in cursor.description].index(field.lower()) self.log.debug("Collecting metric: %s" % metric) if result[col_idx] is not None: self.log.debug("Collecting done, value %s" % result[col_idx]) if metric_type == GAUGE: self.gauge(metric, float(result[col_idx]), tags=tags) elif metric_type == RATE: self.rate(metric, float(result[col_idx]), tags=tags) else: self.gauge(metric, float(result[col_idx]), tags=tags) else: self.log.debug("Received value is None for index %d" % col_idx) except ValueError: self.log.exception("Cannot find %s in the columns %s" % (field, cursor.description)) except Exception: self.warning("Error while running %s\n%s" % (query, traceback.format_exc())) self.log.exception("Error while running %s" % query)
[ "def", "_collect_dict", "(", "self", ",", "metric_type", ",", "field_metric_map", ",", "query", ",", "db", ",", "tags", ")", ":", "try", ":", "with", "closing", "(", "db", ".", "cursor", "(", ")", ")", "as", "cursor", ":", "cursor", ".", "execute", "...
Query status and get a dictionary back. Extract each field out of the dictionary and stuff it in the corresponding metric. query: show status... field_metric_map: {"Seconds_behind_master": "mysqlSecondsBehindMaster"}
[ "Query", "status", "and", "get", "a", "dictionary", "back", ".", "Extract", "each", "field", "out", "of", "the", "dictionary", "and", "stuff", "it", "in", "the", "corresponding", "metric", "." ]
python
train
fabric-bolt/fabric-bolt
fabric_bolt/projects/models.py
https://github.com/fabric-bolt/fabric-bolt/blob/0f434783026f1b9ce16a416fa496d76921fe49ca/fabric_bolt/projects/models.py#L215-L226
def get_absolute_url(self): """Determine where I am coming from and where I am going""" # Determine if this configuration is on a stage if self.stage: # Stage specific configurations go back to the stage view url = reverse('projects_stage_view', args=(self.project.pk, self.stage.pk)) else: # Project specific configurations go back to the project page url = self.project.get_absolute_url() return url
[ "def", "get_absolute_url", "(", "self", ")", ":", "# Determine if this configuration is on a stage", "if", "self", ".", "stage", ":", "# Stage specific configurations go back to the stage view", "url", "=", "reverse", "(", "'projects_stage_view'", ",", "args", "=", "(", "...
Determine where I am coming from and where I am going
[ "Determine", "where", "I", "am", "coming", "from", "and", "where", "I", "am", "going" ]
python
train
django-userena-ce/django-userena-ce
userena/contrib/umessages/templatetags/umessages_tags.py
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/templatetags/umessages_tags.py#L64-L85
def get_unread_message_count_between(parser, token): """ Returns the unread message count between two users. Syntax:: {% get_unread_message_count_between [user] and [user] as [var_name] %} Example usage:: {% get_unread_message_count_between funky and wunki as message_count %} """ try: tag_name, arg = token.contents.split(None, 1) except ValueError: raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0]) m = re.search(r'(.*?) and (.*?) as (\w+)', arg) if not m: raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name) um_from_user, um_to_user, var_name = m.groups() return MessageCount(um_from_user, var_name, um_to_user)
[ "def", "get_unread_message_count_between", "(", "parser", ",", "token", ")", ":", "try", ":", "tag_name", ",", "arg", "=", "token", ".", "contents", ".", "split", "(", "None", ",", "1", ")", "except", "ValueError", ":", "raise", "template", ".", "TemplateS...
Returns the unread message count between two users. Syntax:: {% get_unread_message_count_between [user] and [user] as [var_name] %} Example usage:: {% get_unread_message_count_between funky and wunki as message_count %}
[ "Returns", "the", "unread", "message", "count", "between", "two", "users", "." ]
python
train
kajic/django-model-changes
django_model_changes/changes.py
https://github.com/kajic/django-model-changes/blob/92124ebdf29cba930eb1ced00135823b961041d3/django_model_changes/changes.py#L149-L167
def was_persisted(self): """ Returns true if the instance was persisted (saved) in its old state. Examples:: >>> user = User() >>> user.save() >>> user.was_persisted() False >>> user = User.objects.get(pk=1) >>> user.delete() >>> user.was_persisted() True """ pk_name = self._meta.pk.name return bool(self.old_state()[pk_name])
[ "def", "was_persisted", "(", "self", ")", ":", "pk_name", "=", "self", ".", "_meta", ".", "pk", ".", "name", "return", "bool", "(", "self", ".", "old_state", "(", ")", "[", "pk_name", "]", ")" ]
Returns true if the instance was persisted (saved) in its old state. Examples:: >>> user = User() >>> user.save() >>> user.was_persisted() False >>> user = User.objects.get(pk=1) >>> user.delete() >>> user.was_persisted() True
[ "Returns", "true", "if", "the", "instance", "was", "persisted", "(", "saved", ")", "in", "its", "old", "state", "." ]
python
train
gwastro/pycbc
pycbc/results/legacy_grb.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/legacy_grb.py#L311-L357
def write_offsource(page, args, grbtag, onsource=False): """ Write offsource SNR versus time plots to markup.page object page """ th = ['Re-weighted SNR', 'Coherent SNR'] if args.time_slides: if onsource: out_dir = 'ZEROLAG_ALL' else: out_dir = 'ZEROLAG_OFF' else: if onsource: out_dir = 'ALL_TIMES' else: out_dir = 'OFFSOURCE' plot = markup.page() p = "%s/plots_clustered/GRB%s_bestnr_vs_time_noinj.png" % (out_dir, grbtag) plot.a(href=p, title="Detection statistic versus time") plot.img(src=p) plot.a.close() td = [ plot() ] plot = markup.page() p = "%s/plots_clustered/GRB%s_triggers_vs_time_noinj.png" % (out_dir, grbtag) plot.a(href=p, title="Coherent SNR versus time") plot.img(src=p) plot.a.close() td.append(plot()) ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)] for ifo in ifos: th.append('%s SNR' % ifo) plot = markup.page() p = "%s/plots_clustered/GRB%s_%s_triggers_vs_time_noinj.png"\ % (out_dir, grbtag, ifo) plot.a(href=p, title="%s SNR versus time" % ifo) plot.img(src=p) plot.a.close() td.append(plot()) page = write_table(page, th, td) return page
[ "def", "write_offsource", "(", "page", ",", "args", ",", "grbtag", ",", "onsource", "=", "False", ")", ":", "th", "=", "[", "'Re-weighted SNR'", ",", "'Coherent SNR'", "]", "if", "args", ".", "time_slides", ":", "if", "onsource", ":", "out_dir", "=", "'Z...
Write offsource SNR versus time plots to markup.page object page
[ "Write", "offsource", "SNR", "versus", "time", "plots", "to", "markup", ".", "page", "object", "page" ]
python
train
mila-iqia/fuel
fuel/bin/fuel_convert.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/bin/fuel_convert.py#L24-L98
def main(args=None): """Entry point for `fuel-convert` script. This function can also be imported and used from Python. Parameters ---------- args : iterable, optional (default: None) A list of arguments that will be passed to Fuel's conversion utility. If this argument is not specified, `sys.argv[1:]` will be used. """ built_in_datasets = dict(converters.all_converters) if fuel.config.extra_converters: for name in fuel.config.extra_converters: extra_datasets = dict( importlib.import_module(name).all_converters) if any(key in built_in_datasets for key in extra_datasets.keys()): raise ValueError('extra converters conflict in name with ' 'built-in converters') built_in_datasets.update(extra_datasets) parser = argparse.ArgumentParser( description='Conversion script for built-in datasets.') subparsers = parser.add_subparsers() parent_parser = argparse.ArgumentParser(add_help=False) parent_parser.add_argument( "-d", "--directory", help="directory in which input files reside", type=str, default=os.getcwd()) convert_functions = {} for name, fill_subparser in built_in_datasets.items(): subparser = subparsers.add_parser( name, parents=[parent_parser], help='Convert the {} dataset'.format(name)) subparser.add_argument( "-o", "--output-directory", help="where to save the dataset", type=str, default=os.getcwd(), action=CheckDirectoryAction) subparser.add_argument( "-r", "--output_filename", help="new name of the created dataset", type=str, default=None) # Allows the parser to know which subparser was called. subparser.set_defaults(which_=name) convert_functions[name] = fill_subparser(subparser) args = parser.parse_args(args) args_dict = vars(args) if args_dict['output_filename'] is not None and\ os.path.splitext(args_dict['output_filename'])[1] not in\ ('.hdf5', '.hdf', '.h5'): args_dict['output_filename'] += '.hdf5' if args_dict['output_filename'] is None: args_dict.pop('output_filename') convert_function = convert_functions[args_dict.pop('which_')] try: output_paths = convert_function(**args_dict) except MissingInputFiles as e: intro = "The following required files were not found:\n" message = "\n".join([intro] + [" * " + f for f in e.filenames]) message += "\n\nDid you forget to run fuel-download?" parser.error(message) # Tag the newly-created file(s) with H5PYDataset version and command-line # options for output_path in output_paths: h5file = h5py.File(output_path, 'a') interface_version = H5PYDataset.interface_version.encode('utf-8') h5file.attrs['h5py_interface_version'] = interface_version fuel_convert_version = converters.__version__.encode('utf-8') h5file.attrs['fuel_convert_version'] = fuel_convert_version command = [os.path.basename(sys.argv[0])] + sys.argv[1:] h5file.attrs['fuel_convert_command'] = ( ' '.join(command).encode('utf-8')) h5file.flush() h5file.close()
[ "def", "main", "(", "args", "=", "None", ")", ":", "built_in_datasets", "=", "dict", "(", "converters", ".", "all_converters", ")", "if", "fuel", ".", "config", ".", "extra_converters", ":", "for", "name", "in", "fuel", ".", "config", ".", "extra_converter...
Entry point for `fuel-convert` script. This function can also be imported and used from Python. Parameters ---------- args : iterable, optional (default: None) A list of arguments that will be passed to Fuel's conversion utility. If this argument is not specified, `sys.argv[1:]` will be used.
[ "Entry", "point", "for", "fuel", "-", "convert", "script", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xconsoleedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xconsoleedit.py#L164-L196
def acceptCompletion( self ): """ Accepts the current completion and inserts the code into the edit. :return <bool> accepted """ tree = self._completerTree if not tree: return False tree.hide() item = tree.currentItem() if not item: return False # clear the previously typed code for the block cursor = self.textCursor() text = cursor.block().text() col = cursor.columnNumber() end = col while col: col -= 1 if text[col] in ('.', ' '): col += 1 break # insert the current text cursor.setPosition(cursor.position() - (end-col), cursor.KeepAnchor) cursor.removeSelectedText() self.insertPlainText(item.text(0)) return True
[ "def", "acceptCompletion", "(", "self", ")", ":", "tree", "=", "self", ".", "_completerTree", "if", "not", "tree", ":", "return", "False", "tree", ".", "hide", "(", ")", "item", "=", "tree", ".", "currentItem", "(", ")", "if", "not", "item", ":", "re...
Accepts the current completion and inserts the code into the edit. :return <bool> accepted
[ "Accepts", "the", "current", "completion", "and", "inserts", "the", "code", "into", "the", "edit", ".", ":", "return", "<bool", ">", "accepted" ]
python
train
jwodder/javaproperties
javaproperties/propclass.py
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propclass.py#L206-L220
def storeToXML(self, out, comment=None, encoding='UTF-8'): """ Write the `Properties` object's entries (in unspecified order) in XML properties format to ``out``. :param out: a file-like object to write the properties to :type out: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :return: `None` """ dump_xml(self.data, out, comment=comment, encoding=encoding)
[ "def", "storeToXML", "(", "self", ",", "out", ",", "comment", "=", "None", ",", "encoding", "=", "'UTF-8'", ")", ":", "dump_xml", "(", "self", ".", "data", ",", "out", ",", "comment", "=", "comment", ",", "encoding", "=", "encoding", ")" ]
Write the `Properties` object's entries (in unspecified order) in XML properties format to ``out``. :param out: a file-like object to write the properties to :type out: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :return: `None`
[ "Write", "the", "Properties", "object", "s", "entries", "(", "in", "unspecified", "order", ")", "in", "XML", "properties", "format", "to", "out", "." ]
python
train
Vital-Fernandez/dazer
bin/lib/Astro_Libraries/f2n.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/f2n.py#L945-L949
def loggray(x, a, b): """Auxiliary function that specifies the logarithmic gray scale. a and b are the cutoffs.""" linval = 10.0 + 990.0 * (x-float(a))/(b-a) return (np.log10(linval)-1.0)*0.5 * 255.0
[ "def", "loggray", "(", "x", ",", "a", ",", "b", ")", ":", "linval", "=", "10.0", "+", "990.0", "*", "(", "x", "-", "float", "(", "a", ")", ")", "/", "(", "b", "-", "a", ")", "return", "(", "np", ".", "log10", "(", "linval", ")", "-", "1.0...
Auxiliary function that specifies the logarithmic gray scale. a and b are the cutoffs.
[ "Auxiliary", "function", "that", "specifies", "the", "logarithmic", "gray", "scale", ".", "a", "and", "b", "are", "the", "cutoffs", "." ]
python
train
mlperf/training
translation/tensorflow/transformer/utils/metrics.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/metrics.py#L222-L285
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True): """Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. use_bp: boolean, whether to apply brevity penalty. Returns: BLEU score. """ reference_length = 0 translation_length = 0 bp = 1.0 geo_mean = 0 matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order precisions = [] for (references, translations) in zip(reference_corpus, translation_corpus): reference_length += len(references) translation_length += len(translations) ref_ngram_counts = _get_ngrams_with_counter(references, max_order) translation_ngram_counts = _get_ngrams_with_counter(translations, max_order) overlap = dict((ngram, min(count, translation_ngram_counts[ngram])) for ngram, count in ref_ngram_counts.items()) for ngram in overlap: matches_by_order[len(ngram) - 1] += overlap[ngram] for ngram in translation_ngram_counts: possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[ ngram] precisions = [0] * max_order smooth = 1.0 for i in xrange(0, max_order): if possible_matches_by_order[i] > 0: precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i] if matches_by_order[i] > 0: precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[ i] else: smooth *= 2 precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) else: precisions[i] = 0.0 if max(precisions) > 0: p_log_sum = sum(math.log(p) for p in precisions if p) geo_mean = math.exp(p_log_sum / max_order) if use_bp: ratio = translation_length / reference_length bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0 bleu = geo_mean * bp return np.float32(bleu)
[ "def", "compute_bleu", "(", "reference_corpus", ",", "translation_corpus", ",", "max_order", "=", "4", ",", "use_bp", "=", "True", ")", ":", "reference_length", "=", "0", "translation_length", "=", "0", "bp", "=", "1.0", "geo_mean", "=", "0", "matches_by_order...
Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. use_bp: boolean, whether to apply brevity penalty. Returns: BLEU score.
[ "Computes", "BLEU", "score", "of", "translated", "segments", "against", "one", "or", "more", "references", "." ]
python
train
CamilleMo/SuperSight
supersight/Main.py
https://github.com/CamilleMo/SuperSight/blob/246ea35f42675801ab54df4ea78e957d592780e0/supersight/Main.py#L456-L465
def add_table(self, dataframe, isStyled = False): """This method stores plain html string.""" if isStyled : table_string = dataframe.render() else : table_string = dataframe.style.render() table_string = table_string.replace("\n", "").replace("<table", """<table class = "table table-sm table-hover" """).replace("<thead>", """<thead class="thead-inverse">""") self.table = table_string
[ "def", "add_table", "(", "self", ",", "dataframe", ",", "isStyled", "=", "False", ")", ":", "if", "isStyled", ":", "table_string", "=", "dataframe", ".", "render", "(", ")", "else", ":", "table_string", "=", "dataframe", ".", "style", ".", "render", "(",...
This method stores plain html string.
[ "This", "method", "stores", "plain", "html", "string", "." ]
python
train
theislab/scanpy
scanpy/tools/_sim.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_sim.py#L538-L616
def set_coupl(self, Coupl=None): """ Construct the coupling matrix (and adjacancy matrix) from predefined models or via sampling. """ self.varNames = collections.OrderedDict([(str(i), i) for i in range(self.dim)]) if (self.model not in self.availModels.keys() and Coupl is None): self.read_model() elif 'var' in self.model: # vector auto regressive process self.Coupl = Coupl self.boolRules = collections.OrderedDict( [(s, '') for s in self.varNames.keys()]) names = list(self.varNames.keys()) for gp in range(self.dim): pas = [] for g in range(self.dim): if np.abs(self.Coupl[gp,g] > 1e-10): pas.append(names[g]) self.boolRules[ names[gp]] = ''.join(pas[:1] + [' or ' + pa for pa in pas[1:]]) self.Adj_signed = np.sign(Coupl) elif self.model in ['6','7','8','9','10']: self.Adj_signed = np.zeros((self.dim,self.dim)) n_sinknodes = 2 # sinknodes = np.random.choice(np.arange(0,self.dim), # size=n_sinknodes,replace=False) sinknodes = np.array([0,1]) # assume sinknodes have feeback self.Adj_signed[sinknodes,sinknodes] = np.ones(n_sinknodes) # # allow negative feedback # if self.model == 10: # plus_minus = (np.random.randint(0,2,n_sinknodes) - 0.5)*2 # self.Adj_signed[sinknodes,sinknodes] = plus_minus leafnodes = np.array(sinknodes) availnodes = np.array([i for i in range(self.dim) if i not in sinknodes]) # settings.m(0,leafnodes,availnodes) while len(availnodes) != 0: # parent parent_idx = np.random.choice(np.arange(0,len(leafnodes)), size=1,replace=False) parent = leafnodes[parent_idx] # children children_ids = np.random.choice(np.arange(0,len(availnodes)), size=2,replace=False) children = availnodes[children_ids] settings.m(0,parent,children) self.Adj_signed[children,parent] = np.ones(2) if self.model == 8: self.Adj_signed[children[0],children[1]] = -1 if self.model in [9,10]: self.Adj_signed[children[0],children[1]] = -1 self.Adj_signed[children[1],children[0]] = -1 # update leafnodes leafnodes = np.delete(leafnodes,parent_idx) leafnodes = np.append(leafnodes,children) # update availnodes availnodes = np.delete(availnodes,children_ids) # settings.m(0,availnodes) # settings.m(0,leafnodes) # settings.m(0,self.Adj) # settings.m(0,'-') else: self.Adj = np.zeros((self.dim,self.dim)) for i in range(self.dim): indep = np.random.binomial(1,self.p_indep) if indep == 0: # this number includes parents (other variables) # and the variable itself, therefore its # self.maxnpar+2 in the following line nr = np.random.randint(1,self.maxnpar+2) j_par = np.random.choice(np.arange(0,self.dim), size=nr,replace=False) self.Adj[i,j_par] = 1 else: self.Adj[i,i] = 1 # self.Adj = np.abs(np.array(self.Adj_signed))
[ "def", "set_coupl", "(", "self", ",", "Coupl", "=", "None", ")", ":", "self", ".", "varNames", "=", "collections", ".", "OrderedDict", "(", "[", "(", "str", "(", "i", ")", ",", "i", ")", "for", "i", "in", "range", "(", "self", ".", "dim", ")", ...
Construct the coupling matrix (and adjacancy matrix) from predefined models or via sampling.
[ "Construct", "the", "coupling", "matrix", "(", "and", "adjacancy", "matrix", ")", "from", "predefined", "models", "or", "via", "sampling", "." ]
python
train
AguaClara/aguaclara
aguaclara/research/environmental_processes_analysis.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/research/environmental_processes_analysis.py#L402-L435
def Solver_AD_Pe(t_data, C_data, theta_guess, C_bar_guess): """Use non-linear least squares to fit the function Tracer_AD_Pe(t_seconds, t_bar, C_bar, Pe) to reactor data. :param t_data: Array of times with units :type t_data: float list :param C_data: Array of tracer concentration data with units :type C_data: float list :param theta_guess: Estimate of time spent in one CMFR with units. :type theta_guess: float :param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR)) :type C_bar_guess: float :return: tuple of * **theta** (*float*)- Residence time in seconds * **C_bar** (*float*) - Average concentration with same units as C_bar_guess * **Pe** (*float*) - Peclet number that best fits the data """ #remove time=0 data to eliminate divide by zero error t_data = t_data[1:-1] C_data = C_data[1:-1] C_unitless = C_data.magnitude C_units = str(C_bar_guess.units) t_seconds = (t_data.to(u.s)).magnitude # assume that a guess of 1 reactor in series is close enough to get a solution p0 = [theta_guess.to(u.s).magnitude, C_bar_guess.magnitude,5] popt, pcov = curve_fit(Tracer_AD_Pe, t_seconds, C_unitless, p0, bounds=(0.01,np.inf)) Solver_theta = popt[0]*u.s Solver_C_bar = popt[1]*u(C_units) Solver_Pe = popt[2] Reactor_results = collections.namedtuple('Reactor_results', 'theta C_bar Pe') AD = Reactor_results(theta=Solver_theta, C_bar=Solver_C_bar, Pe=Solver_Pe) return AD
[ "def", "Solver_AD_Pe", "(", "t_data", ",", "C_data", ",", "theta_guess", ",", "C_bar_guess", ")", ":", "#remove time=0 data to eliminate divide by zero error", "t_data", "=", "t_data", "[", "1", ":", "-", "1", "]", "C_data", "=", "C_data", "[", "1", ":", "-", ...
Use non-linear least squares to fit the function Tracer_AD_Pe(t_seconds, t_bar, C_bar, Pe) to reactor data. :param t_data: Array of times with units :type t_data: float list :param C_data: Array of tracer concentration data with units :type C_data: float list :param theta_guess: Estimate of time spent in one CMFR with units. :type theta_guess: float :param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR)) :type C_bar_guess: float :return: tuple of * **theta** (*float*)- Residence time in seconds * **C_bar** (*float*) - Average concentration with same units as C_bar_guess * **Pe** (*float*) - Peclet number that best fits the data
[ "Use", "non", "-", "linear", "least", "squares", "to", "fit", "the", "function", "Tracer_AD_Pe", "(", "t_seconds", "t_bar", "C_bar", "Pe", ")", "to", "reactor", "data", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/functionprofiler.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/functionprofiler.py#L144-L152
def browseprofilegui(profilelog): ''' Browse interactively a profile log in GUI using RunSnakeRun and SquareMap ''' from runsnakerun import runsnake # runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats for console browsing) app = runsnake.RunSnakeRunApp(0) app.OnInit(profilelog) #app.OnInit() app.MainLoop()
[ "def", "browseprofilegui", "(", "profilelog", ")", ":", "from", "runsnakerun", "import", "runsnake", "# runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats f...
Browse interactively a profile log in GUI using RunSnakeRun and SquareMap
[ "Browse", "interactively", "a", "profile", "log", "in", "GUI", "using", "RunSnakeRun", "and", "SquareMap" ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L2038-L2068
def startDrag( self, supportedActions ): """ Starts a new drag event for this tree widget. Overloading from the default QTreeWidget class to define a better pixmap option when dragging many items. :param supportedActions | <QtCore.Qt.DragActions> """ if ( not self.useDragPixmaps() ): return super(XTreeWidget, self).startDrag(supportedActions) filt = lambda x: x.flags() & QtCore.Qt.ItemIsDragEnabled items = filter(filt, self.selectedItems()) if ( not items ): return data = self.mimeData(items) if ( not data ): return if ( len(items) > 1 ): pixmap = self.dragMultiPixmap() else: pixmap = self.dragSinglePixmap() # create the drag event drag = QtGui.QDrag(self) drag.setMimeData(data) drag.setPixmap(pixmap) drag.exec_(supportedActions, QtCore.Qt.MoveAction)
[ "def", "startDrag", "(", "self", ",", "supportedActions", ")", ":", "if", "(", "not", "self", ".", "useDragPixmaps", "(", ")", ")", ":", "return", "super", "(", "XTreeWidget", ",", "self", ")", ".", "startDrag", "(", "supportedActions", ")", "filt", "=",...
Starts a new drag event for this tree widget. Overloading from the default QTreeWidget class to define a better pixmap option when dragging many items. :param supportedActions | <QtCore.Qt.DragActions>
[ "Starts", "a", "new", "drag", "event", "for", "this", "tree", "widget", ".", "Overloading", "from", "the", "default", "QTreeWidget", "class", "to", "define", "a", "better", "pixmap", "option", "when", "dragging", "many", "items", ".", ":", "param", "supporte...
python
train
praekeltfoundation/seed-message-sender
message_sender/tasks.py
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L118-L124
def fire_metric(metric_name, metric_value): """ Fires a metric using the MetricsApiClient """ metric_value = float(metric_value) metric = {metric_name: metric_value} metric_client.fire_metrics(**metric) return "Fired metric <{}> with value <{}>".format(metric_name, metric_value)
[ "def", "fire_metric", "(", "metric_name", ",", "metric_value", ")", ":", "metric_value", "=", "float", "(", "metric_value", ")", "metric", "=", "{", "metric_name", ":", "metric_value", "}", "metric_client", ".", "fire_metrics", "(", "*", "*", "metric", ")", ...
Fires a metric using the MetricsApiClient
[ "Fires", "a", "metric", "using", "the", "MetricsApiClient" ]
python
train
jplusplus/statscraper
statscraper/scrapers/CranesScraper.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/scrapers/CranesScraper.py#L18-L29
def _fetch_dimensions(self, dataset): """ Declaring available dimensions like this is not mandatory, but nice, especially if they differ from dataset to dataset. If you are using a built in datatype, you can specify the dialect you are expecting, to have values normalized. This scraper will look for Swedish month names (e.g. 'Januari'), but return them according to the Statscraper standard ('january'). """ yield Dimension(u"date", label="Day of the month") yield Dimension(u"month", datatype="month", dialect="swedish") yield Dimension(u"year", datatype="year")
[ "def", "_fetch_dimensions", "(", "self", ",", "dataset", ")", ":", "yield", "Dimension", "(", "u\"date\"", ",", "label", "=", "\"Day of the month\"", ")", "yield", "Dimension", "(", "u\"month\"", ",", "datatype", "=", "\"month\"", ",", "dialect", "=", "\"swedi...
Declaring available dimensions like this is not mandatory, but nice, especially if they differ from dataset to dataset. If you are using a built in datatype, you can specify the dialect you are expecting, to have values normalized. This scraper will look for Swedish month names (e.g. 'Januari'), but return them according to the Statscraper standard ('january').
[ "Declaring", "available", "dimensions", "like", "this", "is", "not", "mandatory", "but", "nice", "especially", "if", "they", "differ", "from", "dataset", "to", "dataset", "." ]
python
train
jupyter-widgets/jupyterlab-sidecar
setupbase.py
https://github.com/jupyter-widgets/jupyterlab-sidecar/blob/8889d09f1a0933e2cbee06d4874f720b075b29e8/setupbase.py#L295-L308
def recursive_mtime(path, newest=True): """Gets the newest/oldest mtime for all files in a directory.""" if os.path.isfile(path): return mtime(path) current_extreme = None for dirname, dirnames, filenames in os.walk(path, topdown=False): for filename in filenames: mt = mtime(pjoin(dirname, filename)) if newest: # Put outside of loop? if mt >= (current_extreme or mt): current_extreme = mt elif mt <= (current_extreme or mt): current_extreme = mt return current_extreme
[ "def", "recursive_mtime", "(", "path", ",", "newest", "=", "True", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "mtime", "(", "path", ")", "current_extreme", "=", "None", "for", "dirname", ",", "dirnames", ",", "...
Gets the newest/oldest mtime for all files in a directory.
[ "Gets", "the", "newest", "/", "oldest", "mtime", "for", "all", "files", "in", "a", "directory", "." ]
python
test
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L3937-L4016
def revert_snapshot(name, vm_snapshot=None, cleanup=False, **kwargs): ''' Revert snapshot to the previous from current (if available) or to the specific. :param name: domain name :param vm_snapshot: name of the snapshot to revert :param cleanup: Remove all newer than reverted snapshots. Values: True or False (default False). :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' virt.revert <domain> salt '*' virt.revert <domain> <snapshot> ''' ret = dict() conn = __get_conn(**kwargs) domain = _get_domain(conn, name) snapshots = domain.listAllSnapshots() _snapshots = list() for snap_obj in snapshots: _snapshots.append({'idx': _parse_snapshot_description(snap_obj, unix_time=True)['created'], 'ptr': snap_obj}) snapshots = [w_ptr['ptr'] for w_ptr in sorted(_snapshots, key=lambda item: item['idx'], reverse=True)] del _snapshots if not snapshots: conn.close() raise CommandExecutionError('No snapshots found') elif len(snapshots) == 1: conn.close() raise CommandExecutionError('Cannot revert to itself: only one snapshot is available.') snap = None for p_snap in snapshots: if not vm_snapshot: if p_snap.isCurrent() and snapshots[snapshots.index(p_snap) + 1:]: snap = snapshots[snapshots.index(p_snap) + 1:][0] break elif p_snap.getName() == vm_snapshot: snap = p_snap break if not snap: conn.close() raise CommandExecutionError( snapshot and 'Snapshot "{0}" not found'.format(vm_snapshot) or 'No more previous snapshots available') elif snap.isCurrent(): conn.close() raise CommandExecutionError('Cannot revert to the currently running snapshot.') domain.revertToSnapshot(snap) ret['reverted'] = snap.getName() if cleanup: delete = list() for p_snap in snapshots: if p_snap.getName() != snap.getName(): delete.append(p_snap.getName()) p_snap.delete() else: break ret['deleted'] = delete else: ret['deleted'] = 'N/A' conn.close() return ret
[ "def", "revert_snapshot", "(", "name", ",", "vm_snapshot", "=", "None", ",", "cleanup", "=", "False", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "dict", "(", ")", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "domain", "=", "_get_domai...
Revert snapshot to the previous from current (if available) or to the specific. :param name: domain name :param vm_snapshot: name of the snapshot to revert :param cleanup: Remove all newer than reverted snapshots. Values: True or False (default False). :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' virt.revert <domain> salt '*' virt.revert <domain> <snapshot>
[ "Revert", "snapshot", "to", "the", "previous", "from", "current", "(", "if", "available", ")", "or", "to", "the", "specific", "." ]
python
train
saltstack/salt
salt/modules/zcbuildout.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zcbuildout.py#L468-L553
def upgrade_bootstrap(directory='.', onlyif=None, unless=None, runas=None, env=(), offline=False, buildout_ver=None): ''' Upgrade current bootstrap.py with the last released one. Indeed, when we first run a buildout, a common source of problem is to have a locally stale bootstrap, we just try to grab a new copy directory directory to execute in offline are we executing buildout in offline mode buildout_ver forcing to use a specific buildout version (1 | 2) onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 CLI Example: .. code-block:: bash salt '*' buildout.upgrade_bootstrap /srv/mybuildout ''' if buildout_ver: booturl = _URL_VERSIONS[buildout_ver] else: buildout_ver = _get_buildout_ver(directory) booturl = _get_bootstrap_url(directory) LOG.debug('Using {0}'.format(booturl)) # pylint: disable=str-format-in-logging # try to download an up-to-date bootstrap # set defaulttimeout # and add possible content directory = os.path.abspath(directory) b_py = os.path.join(directory, 'bootstrap.py') comment = '' try: oldcontent = _get_bootstrap_content(directory) dbuild = _dot_buildout(directory) data = oldcontent updated = False dled = False if not offline: try: if not os.path.isdir(dbuild): os.makedirs(dbuild) # only try to download once per buildout checkout with salt.utils.files.fopen(os.path.join( dbuild, '{0}.updated_bootstrap'.format(buildout_ver))): pass except (OSError, IOError): LOG.info('Bootstrap updated from repository') data = _urlopen(booturl).read() updated = True dled = True if 'socket.setdefaulttimeout' not in data: updated = True ldata = data.splitlines() ldata.insert(1, 'import socket;socket.setdefaulttimeout(2)') data = '\n'.join(ldata) if updated: comment = 'Bootstrap updated' with salt.utils.files.fopen(b_py, 'w') as fic: fic.write(salt.utils.stringutils.to_str(data)) if dled: with salt.utils.files.fopen(os.path.join(dbuild, '{0}.updated_bootstrap'.format( buildout_ver)), 'w') as afic: afic.write('foo') except (OSError, IOError): if oldcontent: with salt.utils.files.fopen(b_py, 'w') as fic: fic.write(salt.utils.stringutils.to_str(oldcontent)) return {'comment': comment}
[ "def", "upgrade_bootstrap", "(", "directory", "=", "'.'", ",", "onlyif", "=", "None", ",", "unless", "=", "None", ",", "runas", "=", "None", ",", "env", "=", "(", ")", ",", "offline", "=", "False", ",", "buildout_ver", "=", "None", ")", ":", "if", ...
Upgrade current bootstrap.py with the last released one. Indeed, when we first run a buildout, a common source of problem is to have a locally stale bootstrap, we just try to grab a new copy directory directory to execute in offline are we executing buildout in offline mode buildout_ver forcing to use a specific buildout version (1 | 2) onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 CLI Example: .. code-block:: bash salt '*' buildout.upgrade_bootstrap /srv/mybuildout
[ "Upgrade", "current", "bootstrap", ".", "py", "with", "the", "last", "released", "one", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/item.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/item.py#L1092-L1101
def old_properties_names_to_new(self): # pragma: no cover, never called """Convert old Nagios2 names to Nagios3 new names TODO: still useful? :return: None """ for i in itertools.chain(iter(list(self.items.values())), iter(list(self.templates.values()))): i.old_properties_names_to_new()
[ "def", "old_properties_names_to_new", "(", "self", ")", ":", "# pragma: no cover, never called", "for", "i", "in", "itertools", ".", "chain", "(", "iter", "(", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ")", ",", "iter", "(", "list", ...
Convert old Nagios2 names to Nagios3 new names TODO: still useful? :return: None
[ "Convert", "old", "Nagios2", "names", "to", "Nagios3", "new", "names" ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/pip/vcs/git.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/pip/vcs/git.py#L143-L160
def get_refs(self, location): """Return map of named refs (branches or tags) to commit hashes.""" output = call_subprocess([self.cmd, 'show-ref'], show_stdout=False, cwd=location) rv = {} for line in output.strip().splitlines(): commit, ref = line.split(' ', 1) ref = ref.strip() ref_name = None if ref.startswith('refs/remotes/'): ref_name = ref[len('refs/remotes/'):] elif ref.startswith('refs/heads/'): ref_name = ref[len('refs/heads/'):] elif ref.startswith('refs/tags/'): ref_name = ref[len('refs/tags/'):] if ref_name is not None: rv[ref_name] = commit.strip() return rv
[ "def", "get_refs", "(", "self", ",", "location", ")", ":", "output", "=", "call_subprocess", "(", "[", "self", ".", "cmd", ",", "'show-ref'", "]", ",", "show_stdout", "=", "False", ",", "cwd", "=", "location", ")", "rv", "=", "{", "}", "for", "line",...
Return map of named refs (branches or tags) to commit hashes.
[ "Return", "map", "of", "named", "refs", "(", "branches", "or", "tags", ")", "to", "commit", "hashes", "." ]
python
test
geertj/gruvi
lib/gruvi/callbacks.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/callbacks.py#L66-L79
def pop_callback(obj): """Pop a single callback.""" callbacks = obj._callbacks if not callbacks: return if isinstance(callbacks, Node): node = callbacks obj._callbacks = None else: node = callbacks.first callbacks.remove(node) if not callbacks: obj._callbacks = None return node.data, node.extra
[ "def", "pop_callback", "(", "obj", ")", ":", "callbacks", "=", "obj", ".", "_callbacks", "if", "not", "callbacks", ":", "return", "if", "isinstance", "(", "callbacks", ",", "Node", ")", ":", "node", "=", "callbacks", "obj", ".", "_callbacks", "=", "None"...
Pop a single callback.
[ "Pop", "a", "single", "callback", "." ]
python
train
csparpa/pyowm
pyowm/weatherapi25/location.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/location.py#L140-L160
def _to_DOM(self): """ Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object """ root_node = ET.Element("location") name_node = ET.SubElement(root_node, "name") name_node.text = self._name coords_node = ET.SubElement(root_node, "coordinates") lon_node = ET.SubElement(coords_node, "lon") lon_node.text = str(self._lon) lat_node = ET.SubElement(coords_node, "lat") lat_node.text = str(self._lat) id_node = ET.SubElement(root_node, "ID") id_node.text = str(self._ID) country_node = ET.SubElement(root_node, "country") country_node.text = self._country return root_node
[ "def", "_to_DOM", "(", "self", ")", ":", "root_node", "=", "ET", ".", "Element", "(", "\"location\"", ")", "name_node", "=", "ET", ".", "SubElement", "(", "root_node", ",", "\"name\"", ")", "name_node", ".", "text", "=", "self", ".", "_name", "coords_nod...
Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object
[ "Dumps", "object", "data", "to", "a", "fully", "traversable", "DOM", "representation", "of", "the", "object", "." ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/errors/ApplicationException.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/errors/ApplicationException.py#L217-L234
def wrap_exception(exception, cause): """ Wraps another exception into specified application exception object. If original exception is of ApplicationException type it is returned without changes. Otherwise the original error is set as a cause to specified ApplicationException object. :param exception: an ApplicationException object to wrap the cause :param cause: an original error object :return: an original or newly created ApplicationException """ if isinstance(cause, ApplicationException): return cause exception.with_cause(cause) return exception
[ "def", "wrap_exception", "(", "exception", ",", "cause", ")", ":", "if", "isinstance", "(", "cause", ",", "ApplicationException", ")", ":", "return", "cause", "exception", ".", "with_cause", "(", "cause", ")", "return", "exception" ]
Wraps another exception into specified application exception object. If original exception is of ApplicationException type it is returned without changes. Otherwise the original error is set as a cause to specified ApplicationException object. :param exception: an ApplicationException object to wrap the cause :param cause: an original error object :return: an original or newly created ApplicationException
[ "Wraps", "another", "exception", "into", "specified", "application", "exception", "object", "." ]
python
train
skggm/skggm
inverse_covariance/quic_graph_lasso.py
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/quic_graph_lasso.py#L308-L359
def fit(self, X, y=None, **fit_params): """Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self """ # quic-specific outputs self.opt_ = None self.cputime_ = None self.iters_ = None self.duality_gap_ = None # these must be updated upon self.fit() self.sample_covariance_ = None self.lam_scale_ = None self.is_fitted_ = False self.path_ = _validate_path(self.path) X = check_array(X, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) self.init_coefs(X) if self.method == "quic": ( self.precision_, self.covariance_, self.opt_, self.cputime_, self.iters_, self.duality_gap_, ) = quic( self.sample_covariance_, self.lam * self.lam_scale_, mode=self.mode, tol=self.tol, max_iter=self.max_iter, Theta0=self.Theta0, Sigma0=self.Sigma0, path=self.path_, msg=self.verbose, ) else: raise NotImplementedError("Only method='quic' has been implemented.") self.is_fitted_ = True return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "fit_params", ")", ":", "# quic-specific outputs", "self", ".", "opt_", "=", "None", "self", ".", "cputime_", "=", "None", "self", ".", "iters_", "=", "None", "self", ".", "...
Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self
[ "Fits", "the", "inverse", "covariance", "model", "according", "to", "the", "given", "training", "data", "and", "parameters", "." ]
python
train
cloudmesh/cloudmesh-common
cloudmesh/common/Printer.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Printer.py#L229-L300
def dict_table(cls, d, order=None, header=None, sort_keys=True, show_none="", max_width=40): """prints a pretty table from an dict of dicts :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param show_none: prints None if True for None values otherwise "" :type show_none: bool :param max_width: maximum width for a cell :type max_width: int """ def _keys(): all_keys = [] for e in d: keys = d[e].keys() all_keys.extend(keys) return list(set(all_keys)) # noinspection PyBroadException def _get(item, key): try: tmp = str(d[item][key]) if tmp == "None": tmp = show_none except: tmp = ' ' return tmp if d is None or d == {}: return None if order is None: order = _keys() if header is None and order is not None: header = order elif header is None: header = _keys() x = PrettyTable(header) x.max_width = max_width if sort_keys: if type(sort_keys) is str: sorted_list = sorted(d, key=lambda x: d[x][sort_keys]) elif type(sort_keys) == tuple: sorted_list = sorted(d, key=lambda x: tuple( [d[x][sort_key] for sort_key in sort_keys])) else: sorted_list = d else: sorted_list = d for element in sorted_list: values = [] for key in order: values.append(_get(element, key)) x.add_row(values) x.align = "l" return x
[ "def", "dict_table", "(", "cls", ",", "d", ",", "order", "=", "None", ",", "header", "=", "None", ",", "sort_keys", "=", "True", ",", "show_none", "=", "\"\"", ",", "max_width", "=", "40", ")", ":", "def", "_keys", "(", ")", ":", "all_keys", "=", ...
prints a pretty table from an dict of dicts :param d: A a dict with dicts of the same type. Each key will be a column :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param header: The Header of each of the columns :type header: A list of string :param sort_keys: Key(s) of the dict to be used for sorting. This specify the column(s) in the table for sorting. :type sort_keys: string or a tuple of string (for sorting with multiple columns) :param show_none: prints None if True for None values otherwise "" :type show_none: bool :param max_width: maximum width for a cell :type max_width: int
[ "prints", "a", "pretty", "table", "from", "an", "dict", "of", "dicts", ":", "param", "d", ":", "A", "a", "dict", "with", "dicts", "of", "the", "same", "type", ".", "Each", "key", "will", "be", "a", "column", ":", "param", "order", ":", "The", "orde...
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/facilities/racks.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/facilities/racks.py#L91-L102
def get_device_topology(self, id_or_uri): """ Retrieves the topology information for the rack resource specified by ID or URI. Args: id_or_uri: Can be either the resource ID or the resource URI. Return: dict: Device topology. """ uri = self._client.build_uri(id_or_uri) + "/deviceTopology" return self._client.get(uri)
[ "def", "get_device_topology", "(", "self", ",", "id_or_uri", ")", ":", "uri", "=", "self", ".", "_client", ".", "build_uri", "(", "id_or_uri", ")", "+", "\"/deviceTopology\"", "return", "self", ".", "_client", ".", "get", "(", "uri", ")" ]
Retrieves the topology information for the rack resource specified by ID or URI. Args: id_or_uri: Can be either the resource ID or the resource URI. Return: dict: Device topology.
[ "Retrieves", "the", "topology", "information", "for", "the", "rack", "resource", "specified", "by", "ID", "or", "URI", "." ]
python
train
pycontribs/pyrax
pyrax/cloudloadbalancers.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudloadbalancers.py#L1169-L1180
def get_device(self): """ Returns a reference to the device that is represented by this node. Returns None if no such device can be determined. """ addr = self.address servers = [server for server in pyrax.cloudservers.list() if addr in server.networks.get("private", "")] try: return servers[0] except IndexError: return None
[ "def", "get_device", "(", "self", ")", ":", "addr", "=", "self", ".", "address", "servers", "=", "[", "server", "for", "server", "in", "pyrax", ".", "cloudservers", ".", "list", "(", ")", "if", "addr", "in", "server", ".", "networks", ".", "get", "("...
Returns a reference to the device that is represented by this node. Returns None if no such device can be determined.
[ "Returns", "a", "reference", "to", "the", "device", "that", "is", "represented", "by", "this", "node", ".", "Returns", "None", "if", "no", "such", "device", "can", "be", "determined", "." ]
python
train
mikeboers/Flask-ACL
flask_acl/extension.py
https://github.com/mikeboers/Flask-ACL/blob/7339b89f96ad8686d1526e25c138244ad912e12d/flask_acl/extension.py#L101-L114
def permission_set(self, name, func=None): """Define a new permission set (directly, or as a decorator). E.g.:: @authz.permission_set('HTTP') def is_http_perm(perm): return perm.startswith('http.') """ if func is None: return functools.partial(self.predicate, name) self.permission_sets[name] = func return func
[ "def", "permission_set", "(", "self", ",", "name", ",", "func", "=", "None", ")", ":", "if", "func", "is", "None", ":", "return", "functools", ".", "partial", "(", "self", ".", "predicate", ",", "name", ")", "self", ".", "permission_sets", "[", "name",...
Define a new permission set (directly, or as a decorator). E.g.:: @authz.permission_set('HTTP') def is_http_perm(perm): return perm.startswith('http.')
[ "Define", "a", "new", "permission", "set", "(", "directly", "or", "as", "a", "decorator", ")", "." ]
python
train
kakwa/ldapcherry
ldapcherry/ppolicy/__init__.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/ppolicy/__init__.py#L42-L59
def get_param(self, param, default=None): """ Get a parameter in config (handle default value) :param param: name of the parameter to recover :type param: string :param default: the default value, raises an exception if param is not in configuration and default is None (which is the default value). :type default: string or None :rtype: the value of the parameter or the default value if not set in configuration """ if param in self.config: return self.config[param] elif default is not None: return default else: raise MissingParameter('ppolicy', param)
[ "def", "get_param", "(", "self", ",", "param", ",", "default", "=", "None", ")", ":", "if", "param", "in", "self", ".", "config", ":", "return", "self", ".", "config", "[", "param", "]", "elif", "default", "is", "not", "None", ":", "return", "default...
Get a parameter in config (handle default value) :param param: name of the parameter to recover :type param: string :param default: the default value, raises an exception if param is not in configuration and default is None (which is the default value). :type default: string or None :rtype: the value of the parameter or the default value if not set in configuration
[ "Get", "a", "parameter", "in", "config", "(", "handle", "default", "value", ")" ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L4768-L4779
def diff_indexes(l1,l2): ''' from elist.elist import * l1 = [1,2,3,5] l2 = [0,2,3,4] diff_indexes(l1,l2) ''' rslt = [] for i in range(0,l1.__len__()): if(l1[i]!=l2[i]): rslt.append(i) return(rslt)
[ "def", "diff_indexes", "(", "l1", ",", "l2", ")", ":", "rslt", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "l1", ".", "__len__", "(", ")", ")", ":", "if", "(", "l1", "[", "i", "]", "!=", "l2", "[", "i", "]", ")", ":", "rslt",...
from elist.elist import * l1 = [1,2,3,5] l2 = [0,2,3,4] diff_indexes(l1,l2)
[ "from", "elist", ".", "elist", "import", "*", "l1", "=", "[", "1", "2", "3", "5", "]", "l2", "=", "[", "0", "2", "3", "4", "]", "diff_indexes", "(", "l1", "l2", ")" ]
python
valid
ChristianTremblay/BAC0
BAC0/core/devices/Device.py
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/devices/Device.py#L484-L527
def _buildPointList(self): """ Upon connection to build the device point list and properties. """ try: self.properties.pss.value = self.properties.network.read( "{} device {} protocolServicesSupported".format( self.properties.address, self.properties.device_id ) ) except NoResponseFromController as error: self._log.error("Controller not found, aborting. ({})".format(error)) return ("Not Found", "", [], []) except SegmentationNotSupported as error: self._log.warning("Segmentation not supported") self.segmentation_supported = False self.new_state(DeviceDisconnected) self.properties.name = self.properties.network.read( "{} device {} objectName".format( self.properties.address, self.properties.device_id ) ) self._log.info( "Device {}:[{}] found... building points list".format( self.properties.device_id, self.properties.name ) ) try: self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints( self.custom_object_list ) if self.properties.pollDelay > 0: self.poll(delay=self.properties.pollDelay) except NoResponseFromController as error: self._log.error("Cannot retrieve object list, disconnecting...") self.segmentation_supported = False self.new_state(DeviceDisconnected) except IndexError as error: self._log.error("Device creation failed... disconnecting") self.new_state(DeviceDisconnected)
[ "def", "_buildPointList", "(", "self", ")", ":", "try", ":", "self", ".", "properties", ".", "pss", ".", "value", "=", "self", ".", "properties", ".", "network", ".", "read", "(", "\"{} device {} protocolServicesSupported\"", ".", "format", "(", "self", ".",...
Upon connection to build the device point list and properties.
[ "Upon", "connection", "to", "build", "the", "device", "point", "list", "and", "properties", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/_image_feature_extractor.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_image_feature_extractor.py#L185-L224
def get_coreml_model(self, mode = 'classifier'): """ Parameters ---------- mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. Returns ------- model: MLModel Return the underlying model. """ import mxnet as _mx from ._mxnet import _mxnet_utils from ._mxnet._mxnet_to_coreml import _mxnet_converter (sym, arg_params, aux_params) = self.ptModel.mxmodel fe_mxmodel = self.ptModel.mxmodel if self.ptModel.is_feature_layer_final: feature_layer_size = self.ptModel.feature_layer_size num_dummy_classes = 10 feature_layer_sym = sym.get_children()[0] fc_symbol = _mx.symbol.FullyConnected(feature_layer_sym, num_hidden=num_dummy_classes) prob = _mx.symbol.SoftmaxOutput(fc_symbol, name = sym.name, attr=sym.attr_dict()[sym.name]) arg_params['%s_weight' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes, feature_layer_size)) arg_params['%s_bias' % fc_symbol.name] = _mx.ndarray.zeros((num_dummy_classes)) fe_mxmodel = (prob, arg_params, aux_params) model = MXFeatureExtractor._get_mx_module(fe_mxmodel, self.data_layer, self.ptModel.output_layer, _mxnet_utils.get_mxnet_context(max_devices=1), self.image_shape, label_layer = self.ptModel.label_layer) preprocessor_args = {'image_input_names': [self.data_layer]} return _mxnet_converter.convert(model, mode = 'classifier', input_shape=[(self.data_layer, (1, ) + self.image_shape)], class_labels = list(map(str, range(self.ptModel.num_classes))), preprocessor_args = preprocessor_args, verbose = False)
[ "def", "get_coreml_model", "(", "self", ",", "mode", "=", "'classifier'", ")", ":", "import", "mxnet", "as", "_mx", "from", ".", "_mxnet", "import", "_mxnet_utils", "from", ".", "_mxnet", ".", "_mxnet_to_coreml", "import", "_mxnet_converter", "(", "sym", ",", ...
Parameters ---------- mode: str ('classifier', 'regressor' or None) Mode of the converted coreml model. When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed. When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed. Returns ------- model: MLModel Return the underlying model.
[ "Parameters", "----------", "mode", ":", "str", "(", "classifier", "regressor", "or", "None", ")", "Mode", "of", "the", "converted", "coreml", "model", ".", "When", "mode", "=", "classifier", "a", "NeuralNetworkClassifier", "spec", "will", "be", "constructed", ...
python
train
DolphDev/ezurl
ezurl/__init__.py
https://github.com/DolphDev/ezurl/blob/deaa755db2c0532c237f9eb4192aa51c7e928a07/ezurl/__init__.py#L123-L129
def page(self, *args): """ Pages takes *args and adds pages in order """ for arg in args: self.__pages__.append(arg) return self
[ "def", "page", "(", "self", ",", "*", "args", ")", ":", "for", "arg", "in", "args", ":", "self", ".", "__pages__", ".", "append", "(", "arg", ")", "return", "self" ]
Pages takes *args and adds pages in order
[ "Pages", "takes", "*", "args", "and", "adds", "pages", "in", "order" ]
python
train
rueckstiess/mtools
mtools/util/grouping.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/grouping.py#L23-L50
def add(self, item, group_by=None): """General purpose class to group items by certain criteria.""" key = None if not group_by: group_by = self.group_by if group_by: # if group_by is a function, use it with item as argument if hasattr(group_by, '__call__'): key = group_by(item) # if the item has attribute of group_by as string, use that as key elif isinstance(group_by, str) and hasattr(item, group_by): key = getattr(item, group_by) else: key = None # try to match str(item) with regular expression if isinstance(group_by, str): match = re.search(group_by, str(item)) if match: if len(match.groups()) > 0: key = match.group(1) else: key = match.group() self.groups.setdefault(key, list()).append(item)
[ "def", "add", "(", "self", ",", "item", ",", "group_by", "=", "None", ")", ":", "key", "=", "None", "if", "not", "group_by", ":", "group_by", "=", "self", ".", "group_by", "if", "group_by", ":", "# if group_by is a function, use it with item as argument", "if"...
General purpose class to group items by certain criteria.
[ "General", "purpose", "class", "to", "group", "items", "by", "certain", "criteria", "." ]
python
train
tcalmant/ipopo
pelix/shell/parser.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/parser.py#L684-L697
def var_unset(session, name): """ Unsets the given variable """ name = name.strip() try: session.unset(name) except KeyError: session.write_line("Unknown variable: {0}", name) return False else: session.write_line("Variable {0} unset.", name) return None
[ "def", "var_unset", "(", "session", ",", "name", ")", ":", "name", "=", "name", ".", "strip", "(", ")", "try", ":", "session", ".", "unset", "(", "name", ")", "except", "KeyError", ":", "session", ".", "write_line", "(", "\"Unknown variable: {0}\"", ",",...
Unsets the given variable
[ "Unsets", "the", "given", "variable" ]
python
train
bfrog/whizzer
whizzer/rpc/msgpackrpc.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/rpc/msgpackrpc.py#L126-L142
def request(self, msgtype, msgid, method, params=[]): """Handle an incoming call request.""" result = None error = None exception = None try: result = self.dispatch.call(method, params) except Exception as e: error = (e.__class__.__name__, str(e)) exception = e if isinstance(result, Deferred): result.add_callback(self._result, msgid) result.add_errback(self._error, msgid) else: self.send_response(msgid, error, result)
[ "def", "request", "(", "self", ",", "msgtype", ",", "msgid", ",", "method", ",", "params", "=", "[", "]", ")", ":", "result", "=", "None", "error", "=", "None", "exception", "=", "None", "try", ":", "result", "=", "self", ".", "dispatch", ".", "cal...
Handle an incoming call request.
[ "Handle", "an", "incoming", "call", "request", "." ]
python
train
delfick/harpoon
harpoon/option_spec/harpoon_specs.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/option_spec/harpoon_specs.py#L142-L157
def wait_condition_spec(self): """Spec for a wait_condition block""" from harpoon.option_spec import image_objs formatted_string = formatted(string_spec(), formatter=MergedOptionStringFormatter) return create_spec(image_objs.WaitCondition , harpoon = formatted(overridden("{harpoon}"), formatter=MergedOptionStringFormatter) , timeout = defaulted(integer_spec(), 300) , wait_between_attempts = defaulted(float_spec(), 5) , greps = optional_spec(dictof(formatted_string, formatted_string)) , command = optional_spec(listof(formatted_string)) , port_open = optional_spec(listof(integer_spec())) , file_value = optional_spec(dictof(formatted_string, formatted_string)) , curl_result = optional_spec(dictof(formatted_string, formatted_string)) , file_exists = optional_spec(listof(formatted_string)) )
[ "def", "wait_condition_spec", "(", "self", ")", ":", "from", "harpoon", ".", "option_spec", "import", "image_objs", "formatted_string", "=", "formatted", "(", "string_spec", "(", ")", ",", "formatter", "=", "MergedOptionStringFormatter", ")", "return", "create_spec"...
Spec for a wait_condition block
[ "Spec", "for", "a", "wait_condition", "block" ]
python
train
potash/drain
drain/aggregation.py
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregation.py#L121-L151
def select(self, df, args, inplace=False): """ After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']} """ if self.prefix is None: raise ValueError('Cannot do selection on an Aggregation without a prefix') # run list_expand and ensure all args to tuples for validation args = [tuple(i) for i in util.list_expand(args)] # check that the args passed are valid for a in args: has_arg = False for argument in self.arguments: if a == tuple(argument[k] for k in self.concat_args): has_arg = True break if not has_arg: raise ValueError('Invalid argument for selection: %s' % str(a)) df = data.select_features( df, exclude=[self.prefix + '_.*'], include=map(lambda a: self.args_prefix(a) + '.*', args), inplace=inplace) return df
[ "def", "select", "(", "self", ",", "df", ",", "args", ",", "inplace", "=", "False", ")", ":", "if", "self", ".", "prefix", "is", "None", ":", "raise", "ValueError", "(", "'Cannot do selection on an Aggregation without a prefix'", ")", "# run list_expand and ensure...
After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']}
[ "After", "joining", "selects", "a", "subset", "of", "arguments", "df", ":", "the", "result", "of", "a", "call", "to", "self", ".", "join", "(", "left", ")", "args", ":", "a", "collcetion", "of", "arguments", "to", "select", "as", "accepted", "by", "dra...
python
train
bcbio/bcbio-nextgen
bcbio/variation/mutect.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect.py#L191-L217
def _SID_call_prep(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Preparation work for SomaticIndelDetector. """ base_config = items[0]["config"] for x in align_bams: bam.index(x, base_config) params = ["-R", ref_file, "-T", "SomaticIndelDetector", "-U", "ALLOW_N_CIGAR_READS"] # Limit per base read start count to between 200-10000, i.e. from any base # can no more 10000 new reads begin. # Further, limit maxNumberOfReads accordingly, otherwise SID discards # windows for high coverage panels. paired = vcfutils.get_paired_bams(align_bams, items) params += ["--read_filter", "NotPrimaryAlignment"] params += ["-I:tumor", paired.tumor_bam] min_af = float(get_in(paired.tumor_config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 if paired.normal_bam is not None: params += ["-I:normal", paired.normal_bam] # notice there must be at least 4 reads of coverage in normal params += ["--filter_expressions", "T_COV<6||N_COV<4||T_INDEL_F<%s||T_INDEL_CF<0.7" % min_af] else: params += ["--unpaired"] params += ["--filter_expressions", "COV<6||INDEL_F<%s||INDEL_CF<0.7" % min_af] if region: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] return params
[ "def", "_SID_call_prep", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "base_config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "for", "x", "in", "al...
Preparation work for SomaticIndelDetector.
[ "Preparation", "work", "for", "SomaticIndelDetector", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/optimizer/differential_evolution.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L605-L671
def _binary_crossover(population, population_size, mutants, crossover_prob, seed): """Performs recombination by binary crossover for the current population. Let v_i denote the i'th component of the member v and m_i the corresponding component of the mutant vector corresponding to v. Then the crossed over vector w_i is determined by setting w_i = (m_i with probability=crossover_prob else v_i). In addition, DE requires that at least one of the components is crossed over (otherwise we end up with no change). This is done by choosing on index say k randomly where a force crossover is performed (i.e. w_k = m_k). This is the scheme implemented in this function. Args: population: A Python list of `Tensor`s where each `Tensor` in the list must be of rank at least 1 and all the elements must have a common first dimension. The base population to cross over. population_size: A scalar integer `Tensor`. The number of elements in the population (i.e. size of the first dimension of any member of `population`). mutants: A Python list of `Tensor`s with the same structure as `population`. The mutated population. crossover_prob: A postive real scalar `Tensor` bounded above by 1.0. The probability of a crossover being performed for each axis. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Returns: A list of `Tensor`s of the same structure, dtype and shape as `population`. The recombined population. """ sizes = [tf.cast(tf.size(input=x), dtype=tf.float64) for x in population] seed_stream = distributions.SeedStream(seed, salt='binary_crossover') force_crossover_group = distributions.Categorical(sizes).sample( [population_size, 1], seed=seed_stream()) recombinants = [] for i, population_part in enumerate(population): pop_part_flat = tf.reshape(population_part, [population_size, -1]) mutant_part_flat = tf.reshape(mutants[i], [population_size, -1]) part_size = tf.size(input=population_part) // population_size force_crossovers = tf.one_hot( tf.random.uniform([population_size], minval=0, maxval=part_size, dtype=tf.int32, seed=seed_stream()), part_size, on_value=True, off_value=False, dtype=tf.bool) # Tensor of shape [population_size, size] group_mask = tf.math.equal(force_crossover_group, i) force_crossovers &= group_mask do_binary_crossover = tf.random.uniform( [population_size, part_size], dtype=crossover_prob.dtype.base_dtype, seed=seed_stream()) < crossover_prob do_binary_crossover |= force_crossovers recombinant_flat = tf.where( do_binary_crossover, x=mutant_part_flat, y=pop_part_flat) recombinant = tf.reshape(recombinant_flat, tf.shape(input=population_part)) recombinants.append(recombinant) return recombinants
[ "def", "_binary_crossover", "(", "population", ",", "population_size", ",", "mutants", ",", "crossover_prob", ",", "seed", ")", ":", "sizes", "=", "[", "tf", ".", "cast", "(", "tf", ".", "size", "(", "input", "=", "x", ")", ",", "dtype", "=", "tf", "...
Performs recombination by binary crossover for the current population. Let v_i denote the i'th component of the member v and m_i the corresponding component of the mutant vector corresponding to v. Then the crossed over vector w_i is determined by setting w_i = (m_i with probability=crossover_prob else v_i). In addition, DE requires that at least one of the components is crossed over (otherwise we end up with no change). This is done by choosing on index say k randomly where a force crossover is performed (i.e. w_k = m_k). This is the scheme implemented in this function. Args: population: A Python list of `Tensor`s where each `Tensor` in the list must be of rank at least 1 and all the elements must have a common first dimension. The base population to cross over. population_size: A scalar integer `Tensor`. The number of elements in the population (i.e. size of the first dimension of any member of `population`). mutants: A Python list of `Tensor`s with the same structure as `population`. The mutated population. crossover_prob: A postive real scalar `Tensor` bounded above by 1.0. The probability of a crossover being performed for each axis. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Returns: A list of `Tensor`s of the same structure, dtype and shape as `population`. The recombined population.
[ "Performs", "recombination", "by", "binary", "crossover", "for", "the", "current", "population", "." ]
python
test
hydraplatform/hydra-base
hydra_base/db/model.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/db/model.py#L254-L266
def check_user(self, user_id): """ Check whether this user can read this dataset """ if self.hidden == 'N': return True for owner in self.owners: if int(owner.user_id) == int(user_id): if owner.view == 'Y': return True return False
[ "def", "check_user", "(", "self", ",", "user_id", ")", ":", "if", "self", ".", "hidden", "==", "'N'", ":", "return", "True", "for", "owner", "in", "self", ".", "owners", ":", "if", "int", "(", "owner", ".", "user_id", ")", "==", "int", "(", "user_i...
Check whether this user can read this dataset
[ "Check", "whether", "this", "user", "can", "read", "this", "dataset" ]
python
train
timkpaine/pyEX
pyEX/stocks.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L92-L127
def batchDF(symbols, fields=None, range_='1m', last=10, token='', version=''): '''Batch several data requests into one invocation https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: DataFrame: results in json ''' x = batch(symbols, fields, range_, last, token, version) ret = {} if isinstance(symbols, str): for field in x.keys(): ret[field] = _MAPPING[field](x[field]) else: for symbol in x.keys(): for field in x[symbol].keys(): if field not in ret: ret[field] = pd.DataFrame() dat = x[symbol][field] dat = _MAPPING[field](dat) dat['symbol'] = symbol ret[field] = pd.concat([ret[field], dat], sort=True) return ret
[ "def", "batchDF", "(", "symbols", ",", "fields", "=", "None", ",", "range_", "=", "'1m'", ",", "last", "=", "10", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "x", "=", "batch", "(", "symbols", ",", "fields", ",", "range_", ",", ...
Batch several data requests into one invocation https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: DataFrame: results in json
[ "Batch", "several", "data", "requests", "into", "one", "invocation" ]
python
valid
thiagopbueno/rddl2tf
rddl2tf/fluentshape.py
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluentshape.py#L90-L125
def broadcast(cls, shape1: 'TensorFluentShape', shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]: '''It broadcasts the fluent shapes if any input is in batch mode. It handles input shapes in different modes, expanding its dimensions if necessary. It outputs a tuple with new shapes. If no input shape is in batch mode, return (None, None). If an input shape does not need to be changed, return None. Args: shape1: A fluent's shape. shape2: A fluent's shape. Returns: A pair of new shapes. ''' reshape_1, reshape_2 = None, None if not (shape1._batch or shape2._batch): return reshape_1, reshape_2 size_1, size_2 = shape1.fluent_size, shape2.fluent_size size_diff = abs(size_1 - size_2) if size_diff == 0: return reshape_1, reshape_2 if size_2 > size_1 and not (size_1 == 0 and not shape1._batch): reshape_1 = [1] * size_diff + list(shape1.fluent_shape) if shape1._batch: reshape_1 = [shape1.batch_size] + reshape_1 elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch): reshape_2 = [1] * size_diff + list(shape2.fluent_shape) if shape2._batch: reshape_2 = [shape2.batch_size] + reshape_2 return reshape_1, reshape_2
[ "def", "broadcast", "(", "cls", ",", "shape1", ":", "'TensorFluentShape'", ",", "shape2", ":", "'TensorFluentShape'", ")", "->", "Tuple", "[", "Reshaping", ",", "Reshaping", "]", ":", "reshape_1", ",", "reshape_2", "=", "None", ",", "None", "if", "not", "(...
It broadcasts the fluent shapes if any input is in batch mode. It handles input shapes in different modes, expanding its dimensions if necessary. It outputs a tuple with new shapes. If no input shape is in batch mode, return (None, None). If an input shape does not need to be changed, return None. Args: shape1: A fluent's shape. shape2: A fluent's shape. Returns: A pair of new shapes.
[ "It", "broadcasts", "the", "fluent", "shapes", "if", "any", "input", "is", "in", "batch", "mode", "." ]
python
train
Murali-group/halp
halp/algorithms/directed_paths.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/algorithms/directed_paths.py#L101-L187
def _x_visit(H, source_node, b_visit): """General form of the B-Visit algorithm, extended to also perform an implicit F-Visit if the b_visit flag is not set (providing better time/memory performance than explcitily taking the hypergraph's symmetric image and then performing the B-Visit on that). Refer to 'b_visit's or 'f_visit's documentation for more details. :param H: the hypergraph to perform the 'B-Visit' algorithm on. :param source_node: the initial node to begin traversal from. :param b_visit: boolean flag representing whether a B-Visit should be performed (vs an F-Visit). :returns: set -- nodes that were x-visited in this traversal. dict -- mapping from each node visited to the ID of the hyperedge that preceeded it in this traversal. dict -- mapping from each hyperedge ID to the node that preceeded it in this traversal. dict -- mapping from each node to an integer representing the cardinality of the path from the source node to that node. :raises: TypeError -- Algorithm only applicable to directed hypergraphs """ if not isinstance(H, DirectedHypergraph): raise TypeError("Algorithm only applicable to directed hypergraphs") # If the b_visit flag is set, perform a traditional B-Visit if b_visit: forward_star = H.get_forward_star hyperedge_tail = H.get_hyperedge_tail hyperedge_head = H.get_hyperedge_head # If the b_visit flag is not set, implicitly perform an F-Visit by # implicitly taking the symmetric image (what the 'else' statement # is for) and then performing a traditional B-Visit else: forward_star = H.get_backward_star hyperedge_tail = H.get_hyperedge_head hyperedge_head = H.get_hyperedge_tail node_set = H.get_node_set() # Pv keeps track of the ID of the hyperedge that directely # preceeded each node in the traversal Pv = {node: None for node in node_set} # v keeps track of the cardinality of the path from the source node to # any other B-connected node ('inf' cardinality for non-B-connected nodes) v = {node: float("inf") for node in node_set} v[source_node] = 0 hyperedge_id_set = H.get_hyperedge_id_set() # Pe keeps track of the node that directedly preceeded # each hyperedge in the traversal Pe = {hyperedge_id: None for hyperedge_id in hyperedge_id_set} # k keeps track of how many nodes in the tail of each hyperedge are # B-connected (when all nodes in a tail are B-connected, that hyperedge # can then be traversed) k = {hyperedge_id: 0 for hyperedge_id in hyperedge_id_set} # Explicitly tracks the set of B-visited nodes x_visited_nodes = set([source_node]) Q = Queue() Q.put(source_node) while not Q.empty(): current_node = Q.get() # At current_node, we can traverse each hyperedge in its forward star for hyperedge_id in forward_star(current_node): # Since we're arrived at a new node, we increment # k[hyperedge_id] to indicate that we've reached 1 new # node in this hyperedge's tail k[hyperedge_id] += 1 # Traverse this hyperedge only when we have reached all the nodes # in its tail (i.e., when k[hyperedge_id] == |T(hyperedge_id)|) if k[hyperedge_id] == len(hyperedge_tail(hyperedge_id)): Pe[hyperedge_id] = current_node # Traversing the hyperedge yields the set of head nodes of # the hyperedge; B-visit each head node for head_node in hyperedge_head(hyperedge_id): if head_node in x_visited_nodes: continue Pv[head_node] = hyperedge_id Q.put(head_node) v[head_node] = v[Pe[hyperedge_id]] + 1 x_visited_nodes.add(head_node) return x_visited_nodes, Pv, Pe, v
[ "def", "_x_visit", "(", "H", ",", "source_node", ",", "b_visit", ")", ":", "if", "not", "isinstance", "(", "H", ",", "DirectedHypergraph", ")", ":", "raise", "TypeError", "(", "\"Algorithm only applicable to directed hypergraphs\"", ")", "# If the b_visit flag is set,...
General form of the B-Visit algorithm, extended to also perform an implicit F-Visit if the b_visit flag is not set (providing better time/memory performance than explcitily taking the hypergraph's symmetric image and then performing the B-Visit on that). Refer to 'b_visit's or 'f_visit's documentation for more details. :param H: the hypergraph to perform the 'B-Visit' algorithm on. :param source_node: the initial node to begin traversal from. :param b_visit: boolean flag representing whether a B-Visit should be performed (vs an F-Visit). :returns: set -- nodes that were x-visited in this traversal. dict -- mapping from each node visited to the ID of the hyperedge that preceeded it in this traversal. dict -- mapping from each hyperedge ID to the node that preceeded it in this traversal. dict -- mapping from each node to an integer representing the cardinality of the path from the source node to that node. :raises: TypeError -- Algorithm only applicable to directed hypergraphs
[ "General", "form", "of", "the", "B", "-", "Visit", "algorithm", "extended", "to", "also", "perform", "an", "implicit", "F", "-", "Visit", "if", "the", "b_visit", "flag", "is", "not", "set", "(", "providing", "better", "time", "/", "memory", "performance", ...
python
train
wal-e/wal-e
wal_e/pep3143daemon/daemon.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/pep3143daemon/daemon.py#L309-L333
def close_filenos(preserve): """ Close unprotected file descriptors Close all open file descriptors that are not in preserve. If ulimit -nofile is "unlimited", all is defined filenos <= 4096, else all is <= the output of resource.getrlimit(). :param preserve: set with protected files :type preserve: set :return: None """ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fileno in range(maxfd): if fileno not in preserve: try: os.close(fileno) except OSError as err: if not err.errno == errno.EBADF: raise DaemonError( 'Failed to close file descriptor {0}: {1}' .format(fileno, err))
[ "def", "close_filenos", "(", "preserve", ")", ":", "maxfd", "=", "resource", ".", "getrlimit", "(", "resource", ".", "RLIMIT_NOFILE", ")", "[", "1", "]", "if", "maxfd", "==", "resource", ".", "RLIM_INFINITY", ":", "maxfd", "=", "4096", "for", "fileno", "...
Close unprotected file descriptors Close all open file descriptors that are not in preserve. If ulimit -nofile is "unlimited", all is defined filenos <= 4096, else all is <= the output of resource.getrlimit(). :param preserve: set with protected files :type preserve: set :return: None
[ "Close", "unprotected", "file", "descriptors" ]
python
train
abau171/highfive
highfive/master.py
https://github.com/abau171/highfive/blob/07b3829331072035ab100d1d66deca3e8f3f372a/highfive/master.py#L193-L207
def close(self): """ Starts closing the HighFive master. The server will be closed and all queued job sets will be cancelled. """ if self._closed: return self._closed = True self._server.close() self._manager.close() for worker in self._workers: worker.close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "return", "self", ".", "_closed", "=", "True", "self", ".", "_server", ".", "close", "(", ")", "self", ".", "_manager", ".", "close", "(", ")", "for", "worker", "in", "self",...
Starts closing the HighFive master. The server will be closed and all queued job sets will be cancelled.
[ "Starts", "closing", "the", "HighFive", "master", ".", "The", "server", "will", "be", "closed", "and", "all", "queued", "job", "sets", "will", "be", "cancelled", "." ]
python
test
pandas-dev/pandas
pandas/core/dtypes/common.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L375-L405
def is_period(arr): """ Check whether an array-like is a periodical index. .. deprecated:: 0.24.0 Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a periodical index. Examples -------- >>> is_period([1, 2, 3]) False >>> is_period(pd.Index([1, 2, 3])) False >>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D")) True """ warnings.warn("'is_period' is deprecated and will be removed in a future " "version. Use 'is_period_dtype' or is_period_arraylike' " "instead.", FutureWarning, stacklevel=2) return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)
[ "def", "is_period", "(", "arr", ")", ":", "warnings", ".", "warn", "(", "\"'is_period' is deprecated and will be removed in a future \"", "\"version. Use 'is_period_dtype' or is_period_arraylike' \"", "\"instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "r...
Check whether an array-like is a periodical index. .. deprecated:: 0.24.0 Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a periodical index. Examples -------- >>> is_period([1, 2, 3]) False >>> is_period(pd.Index([1, 2, 3])) False >>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D")) True
[ "Check", "whether", "an", "array", "-", "like", "is", "a", "periodical", "index", "." ]
python
train
ladybug-tools/ladybug
ladybug/wea.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/wea.py#L177-L216
def from_epw_file(cls, epwfile, timestep=1): """Create a wea object using the solar irradiance values in an epw file. Args: epwfile: Full path to epw weather file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. Note that this input will only do a linear interpolation over the data in the EPW file. While such linear interpolations are suitable for most thermal simulations, where thermal lag "smooths over" the effect of momentary increases in solar energy, it is not recommended for daylight simulations, where momentary increases in solar energy can mean the difference between glare and visual comfort. """ is_leap_year = False # epw file is always for 8760 hours epw = EPW(epwfile) direct_normal, diffuse_horizontal = \ cls._get_data_collections(epw.direct_normal_radiation.values, epw.diffuse_horizontal_radiation.values, epw.metadata, 1, is_leap_year) if timestep != 1: print ("Note: timesteps greater than 1 on epw-generated Wea's \n" + "are suitable for thermal models but are not recommended \n" + "for daylight models.") # interpolate the data direct_normal = direct_normal.interpolate_to_timestep(timestep) diffuse_horizontal = diffuse_horizontal.interpolate_to_timestep(timestep) # create sunpath to check if the sun is up at a given timestep sp = Sunpath.from_location(epw.location) # add correct values to the emply data collection for i, dt in enumerate(cls._get_datetimes(timestep, is_leap_year)): # set irradiance values to 0 when the sun is not up sun = sp.calculate_sun_from_date_time(dt) if sun.altitude < 0: direct_normal[i] = 0 diffuse_horizontal[i] = 0 return cls(epw.location, direct_normal, diffuse_horizontal, timestep, is_leap_year)
[ "def", "from_epw_file", "(", "cls", ",", "epwfile", ",", "timestep", "=", "1", ")", ":", "is_leap_year", "=", "False", "# epw file is always for 8760 hours", "epw", "=", "EPW", "(", "epwfile", ")", "direct_normal", ",", "diffuse_horizontal", "=", "cls", ".", "...
Create a wea object using the solar irradiance values in an epw file. Args: epwfile: Full path to epw weather file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. Note that this input will only do a linear interpolation over the data in the EPW file. While such linear interpolations are suitable for most thermal simulations, where thermal lag "smooths over" the effect of momentary increases in solar energy, it is not recommended for daylight simulations, where momentary increases in solar energy can mean the difference between glare and visual comfort.
[ "Create", "a", "wea", "object", "using", "the", "solar", "irradiance", "values", "in", "an", "epw", "file", "." ]
python
train
apache/incubator-mxnet
python/mxnet/executor_manager.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L279-L282
def forward(self, is_train=False): """Perform a forward pass on each executor.""" for texec in self.train_execs: texec.forward(is_train=is_train)
[ "def", "forward", "(", "self", ",", "is_train", "=", "False", ")", ":", "for", "texec", "in", "self", ".", "train_execs", ":", "texec", ".", "forward", "(", "is_train", "=", "is_train", ")" ]
Perform a forward pass on each executor.
[ "Perform", "a", "forward", "pass", "on", "each", "executor", "." ]
python
train