repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ericmjl/nxviz
nxviz/polcart.py
https://github.com/ericmjl/nxviz/blob/6ea5823a8030a686f165fbe37d7a04d0f037ecc9/nxviz/polcart.py#L25-L40
def to_polar(x, y, theta_units="radians"): """ Converts cartesian x, y to polar r, theta. """ assert theta_units in [ "radians", "degrees", ], "kwarg theta_units must specified in radians or degrees" theta = atan2(y, x) r = sqrt(x ** 2 + y ** 2) if theta_units == "degrees": theta = to_degrees(theta) return r, theta
[ "def", "to_polar", "(", "x", ",", "y", ",", "theta_units", "=", "\"radians\"", ")", ":", "assert", "theta_units", "in", "[", "\"radians\"", ",", "\"degrees\"", ",", "]", ",", "\"kwarg theta_units must specified in radians or degrees\"", "theta", "=", "atan2", "(",...
Converts cartesian x, y to polar r, theta.
[ "Converts", "cartesian", "x", "y", "to", "polar", "r", "theta", "." ]
python
train
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/fields.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/fields.py#L762-L767
def from_dict(cls, d): """Extend Field.from_dict and also load the name from the dict.""" relationship = super(cls, cls).from_dict(d) if relationship.name is not None: relationship.name = Name.from_dict(relationship.name) return relationship
[ "def", "from_dict", "(", "cls", ",", "d", ")", ":", "relationship", "=", "super", "(", "cls", ",", "cls", ")", ".", "from_dict", "(", "d", ")", "if", "relationship", ".", "name", "is", "not", "None", ":", "relationship", ".", "name", "=", "Name", "...
Extend Field.from_dict and also load the name from the dict.
[ "Extend", "Field", ".", "from_dict", "and", "also", "load", "the", "name", "from", "the", "dict", "." ]
python
train
EliotBerriot/lifter
lifter/backends/base.py
https://github.com/EliotBerriot/lifter/blob/9b4394b476cddd952b2af9540affc03f2977163d/lifter/backends/base.py#L20-L31
def setup_fields(attrs): """ Collect all fields declared on the class and remove them from attrs """ fields = {} iterator = list(attrs.items()) for key, value in iterator: if not isinstance(value, Field): continue fields[key] = value del attrs[key] return fields
[ "def", "setup_fields", "(", "attrs", ")", ":", "fields", "=", "{", "}", "iterator", "=", "list", "(", "attrs", ".", "items", "(", ")", ")", "for", "key", ",", "value", "in", "iterator", ":", "if", "not", "isinstance", "(", "value", ",", "Field", ")...
Collect all fields declared on the class and remove them from attrs
[ "Collect", "all", "fields", "declared", "on", "the", "class", "and", "remove", "them", "from", "attrs" ]
python
train
stephantul/reach
reach/reach.py
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L358-L409
def threshold(self, items, threshold=.5, batch_size=100, show_progressbar=False, return_names=True): """ Return all items whose similarity is higher than threshold. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. threshold : float, optional, default .5 The radius within which to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances. """ # This line allows users to input single items. # We used to rely on string identities, but we now also allow # anything hashable as keys. # Might fail if a list of passed items is also in the vocabulary. # but I can't think of cases when this would happen, and what # user expectations are. try: if items in self.items: items = [items] except TypeError: pass x = np.stack([self.norm_vectors[self.items[x]] for x in items]) result = self._threshold_batch(x, batch_size, threshold, show_progressbar, return_names) # list call consumes the generator. return [x[1:] for x in result]
[ "def", "threshold", "(", "self", ",", "items", ",", "threshold", "=", ".5", ",", "batch_size", "=", "100", ",", "show_progressbar", "=", "False", ",", "return_names", "=", "True", ")", ":", "# This line allows users to input single items.", "# We used to rely on str...
Return all items whose similarity is higher than threshold. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. threshold : float, optional, default .5 The radius within which to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances.
[ "Return", "all", "items", "whose", "similarity", "is", "higher", "than", "threshold", "." ]
python
train
tylertreat/BigQuery-Python
bigquery/client.py
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L554-L572
def check_table(self, dataset, table, project_id=None): """Check to see if a table exists. Parameters ---------- dataset : str The dataset to check table : str The name of the table project_id: str, optional The project the table is in Returns ------- bool True if table exists, else False """ table = self.get_table(dataset, table, project_id) return bool(table)
[ "def", "check_table", "(", "self", ",", "dataset", ",", "table", ",", "project_id", "=", "None", ")", ":", "table", "=", "self", ".", "get_table", "(", "dataset", ",", "table", ",", "project_id", ")", "return", "bool", "(", "table", ")" ]
Check to see if a table exists. Parameters ---------- dataset : str The dataset to check table : str The name of the table project_id: str, optional The project the table is in Returns ------- bool True if table exists, else False
[ "Check", "to", "see", "if", "a", "table", "exists", "." ]
python
train
PMBio/limix-backup
limix/deprecated/utils/plot.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/utils/plot.py#L27-L111
def plot_manhattan(posCum,pv,chromBounds=None, thr=None,qv=None,lim=None,xticklabels=True, alphaNS=0.1,alphaS=0.5,colorNS='DarkBlue',colorS='Orange',plt=None,thr_plotting=None,labelS=None,labelNS=None): """ This script makes a manhattan plot ------------------------------------------- posCum cumulative position pv pvalues chromBounds chrom boundaries (optionally). If not supplied, everything will be plotted into a single chromosome qv qvalues if provided, threshold for significance is set on qvalues but pvalues are plotted thr threshold for significance default: 0.01 bonferroni correceted significance levels if qvs are not specified, or 0.01 on qvs if qvs specified lim top limit on y-axis if not provided, -1.2*log(pv.min()) is taken xticklabels if true, xtick labels are printed alphaNS transparency of non-significant SNPs alphaS transparency of significant SNPs plt matplotlib.axes.AxesSubplot, the target handle for this figure (otherwise current axes) thr_plotting plot only P-values that are smaller than thr_plotting to speed up plotting labelS optional plotting label (significant loci) labelNS optional plotting label (non significnat loci) """ if plt is None: plt = pl.gca() if thr==None: thr = 0.01/float(posCum.shape[0]) if lim==None: lim=-1.2*sp.log10(sp.minimum(pv.min(),thr)) if chromBounds is None: chromBounds = sp.array([[0,posCum.max()]]) else: chromBounds = sp.concatenate([chromBounds,sp.array([posCum.max()])]) n_chroms = chromBounds.shape[0] for chrom_i in range(0,n_chroms-1,2): pl.fill_between(posCum,0,lim,where=(posCum>chromBounds[chrom_i]) & (posCum<chromBounds[chrom_i+1]),facecolor='LightGray',linewidth=0,alpha=0.5) if thr_plotting is not None: if pv is not None: i_small = pv<thr_plotting elif qv is not None: i_small = qv<thr_plotting if qv is not None: qv = qv[i_small] if pv is not None: pv = pv[i_small] if posCum is not None: posCum=posCum[i_small] if qv==None: Isign = pv<thr else: Isign = qv<thr pl.plot(posCum[~Isign],-sp.log10(pv[~Isign]),'.',color=colorNS,ms=5,alpha=alphaNS,label=labelNS) pl.plot(posCum[Isign], -sp.log10(pv[Isign]), '.',color=colorS,ms=5,alpha=alphaS,label=labelS) if qv is not None: pl.plot([0,posCum.max()],[-sp.log10(thr),-sp.log10(thr)],'--',color='Gray') pl.ylim(0,lim) pl.ylabel('-log$_{10}$pv') pl.xlim(0,posCum.max()) xticks = sp.array([chromBounds[i:i+2].mean() for i in range(chromBounds.shape[0]-1)]) plt.set_xticks(xticks) pl.xticks(fontsize=6) if xticklabels: plt.set_xticklabels(sp.arange(1,n_chroms+1)) pl.xlabel('genetic position') else: plt.set_xticklabels([]) plt.spines["right"].set_visible(False) plt.spines["top"].set_visible(False) plt.xaxis.set_ticks_position('bottom') plt.yaxis.set_ticks_position('left')
[ "def", "plot_manhattan", "(", "posCum", ",", "pv", ",", "chromBounds", "=", "None", ",", "thr", "=", "None", ",", "qv", "=", "None", ",", "lim", "=", "None", ",", "xticklabels", "=", "True", ",", "alphaNS", "=", "0.1", ",", "alphaS", "=", "0.5", ",...
This script makes a manhattan plot ------------------------------------------- posCum cumulative position pv pvalues chromBounds chrom boundaries (optionally). If not supplied, everything will be plotted into a single chromosome qv qvalues if provided, threshold for significance is set on qvalues but pvalues are plotted thr threshold for significance default: 0.01 bonferroni correceted significance levels if qvs are not specified, or 0.01 on qvs if qvs specified lim top limit on y-axis if not provided, -1.2*log(pv.min()) is taken xticklabels if true, xtick labels are printed alphaNS transparency of non-significant SNPs alphaS transparency of significant SNPs plt matplotlib.axes.AxesSubplot, the target handle for this figure (otherwise current axes) thr_plotting plot only P-values that are smaller than thr_plotting to speed up plotting labelS optional plotting label (significant loci) labelNS optional plotting label (non significnat loci)
[ "This", "script", "makes", "a", "manhattan", "plot", "-------------------------------------------", "posCum", "cumulative", "position", "pv", "pvalues", "chromBounds", "chrom", "boundaries", "(", "optionally", ")", ".", "If", "not", "supplied", "everything", "will", "...
python
train
saltstack/salt
salt/pillar/cmd_json.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/cmd_json.py#L20-L31
def ext_pillar(minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 command): ''' Execute a command and read the output as JSON ''' try: command = command.replace('%s', minion_id) return salt.utils.json.loads(__salt__['cmd.run'](command)) except Exception: log.critical('JSON data from %s failed to parse', command) return {}
[ "def", "ext_pillar", "(", "minion_id", ",", "# pylint: disable=W0613", "pillar", ",", "# pylint: disable=W0613", "command", ")", ":", "try", ":", "command", "=", "command", ".", "replace", "(", "'%s'", ",", "minion_id", ")", "return", "salt", ".", "utils", "."...
Execute a command and read the output as JSON
[ "Execute", "a", "command", "and", "read", "the", "output", "as", "JSON" ]
python
train
nugget/python-insteonplm
insteonplm/tools.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/tools.py#L334-L338
def kpl_on(self, address, group): """Get the status of a KPL button.""" addr = Address(address) device = self.plm.devices[addr.id] device.states[group].on()
[ "def", "kpl_on", "(", "self", ",", "address", ",", "group", ")", ":", "addr", "=", "Address", "(", "address", ")", "device", "=", "self", ".", "plm", ".", "devices", "[", "addr", ".", "id", "]", "device", ".", "states", "[", "group", "]", ".", "o...
Get the status of a KPL button.
[ "Get", "the", "status", "of", "a", "KPL", "button", "." ]
python
train
secdev/scapy
scapy/contrib/macsec.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/macsec.py#L180-L207
def decrypt(self, orig_pkt, assoclen=None): """decrypt a MACsec frame for this Secure Association""" hdr = copy.deepcopy(orig_pkt) del hdr[MACsec].payload pktlen = len(orig_pkt) if self.send_sci: hdrlen = NOSCI_LEN + SCI_LEN else: hdrlen = NOSCI_LEN if assoclen is None or not self.do_encrypt: if self.do_encrypt: assoclen = hdrlen else: assoclen = pktlen - self.icvlen iv = self.make_iv(hdr) assoc, ct, icv = MACsecSA.split_pkt(orig_pkt, assoclen, self.icvlen) decryptor = Cipher( algorithms.AES(self.key), modes.GCM(iv, icv), backend=default_backend() ).decryptor() decryptor.authenticate_additional_data(assoc) pt = assoc[hdrlen:assoclen] pt += decryptor.update(ct) pt += decryptor.finalize() hdr[MACsec].type = struct.unpack('!H', pt[0:2])[0] hdr[MACsec].payload = Raw(pt[2:]) return hdr
[ "def", "decrypt", "(", "self", ",", "orig_pkt", ",", "assoclen", "=", "None", ")", ":", "hdr", "=", "copy", ".", "deepcopy", "(", "orig_pkt", ")", "del", "hdr", "[", "MACsec", "]", ".", "payload", "pktlen", "=", "len", "(", "orig_pkt", ")", "if", "...
decrypt a MACsec frame for this Secure Association
[ "decrypt", "a", "MACsec", "frame", "for", "this", "Secure", "Association" ]
python
train
saltstack/salt
salt/modules/napalm_mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_mod.py#L555-L578
def netmiko_multi_call(*methods, **kwargs): ''' .. versionadded:: 2019.2.0 Execute a list of arbitrary Netmiko methods, passing the authentication details from the existing NAPALM connection. methods List of dictionaries with the following keys: - ``name``: the name of the Netmiko function to invoke. - ``args``: list of arguments to send to the ``name`` method. - ``kwargs``: key-value arguments to send to the ``name`` method. CLI Example: .. code-block:: bash salt '*' napalm.netmiko_multi_call "{'name': 'send_command', 'args': ['show version']}" "{'name': 'send_command', 'args': ['show interfaces']}" ''' netmiko_kwargs = netmiko_args() kwargs.update(netmiko_kwargs) return __salt__['netmiko.multi_call'](*methods, **kwargs)
[ "def", "netmiko_multi_call", "(", "*", "methods", ",", "*", "*", "kwargs", ")", ":", "netmiko_kwargs", "=", "netmiko_args", "(", ")", "kwargs", ".", "update", "(", "netmiko_kwargs", ")", "return", "__salt__", "[", "'netmiko.multi_call'", "]", "(", "*", "meth...
.. versionadded:: 2019.2.0 Execute a list of arbitrary Netmiko methods, passing the authentication details from the existing NAPALM connection. methods List of dictionaries with the following keys: - ``name``: the name of the Netmiko function to invoke. - ``args``: list of arguments to send to the ``name`` method. - ``kwargs``: key-value arguments to send to the ``name`` method. CLI Example: .. code-block:: bash salt '*' napalm.netmiko_multi_call "{'name': 'send_command', 'args': ['show version']}" "{'name': 'send_command', 'args': ['show interfaces']}"
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
drhagen/parsita
parsita/util.py
https://github.com/drhagen/parsita/blob/d97414a05541f48231381f607d1d2e6b50781d39/parsita/util.py#L47-L69
def unsplat(f: Callable[[Iterable], A]) -> Callable[..., A]: """Convert a function taking a single iterable argument into a function taking multiple arguments. Args: f: Any function taking a single iterable argument Returns: A function that accepts multiple arguments. Each argument of this function is passed as an element of an iterable to ``f``. Example: $ def f(a): $ return a[0] + a[1] + a[2] $ $ f([1, 2, 3]) # 6 $ g = unsplat(f) $ g(1, 2, 3) # 6 """ def unsplatted(*args): return f(args) return unsplatted
[ "def", "unsplat", "(", "f", ":", "Callable", "[", "[", "Iterable", "]", ",", "A", "]", ")", "->", "Callable", "[", "...", ",", "A", "]", ":", "def", "unsplatted", "(", "*", "args", ")", ":", "return", "f", "(", "args", ")", "return", "unsplatted"...
Convert a function taking a single iterable argument into a function taking multiple arguments. Args: f: Any function taking a single iterable argument Returns: A function that accepts multiple arguments. Each argument of this function is passed as an element of an iterable to ``f``. Example: $ def f(a): $ return a[0] + a[1] + a[2] $ $ f([1, 2, 3]) # 6 $ g = unsplat(f) $ g(1, 2, 3) # 6
[ "Convert", "a", "function", "taking", "a", "single", "iterable", "argument", "into", "a", "function", "taking", "multiple", "arguments", "." ]
python
test
RazerM/bucketcache
shovel/version.py
https://github.com/RazerM/bucketcache/blob/8d9b163b73da8c498793cce2f22f6a7cbe524d94/shovel/version.py#L75-L82
def tag(): """Tag current version.""" if check_unstaged(): raise EnvironmentError('There are staged changes, abort.') with open(str(INIT_PATH)) as f: metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", f.read())) version = metadata['version'] check_output(['git', 'tag', version, '-m', 'Release v{}'.format(version)])
[ "def", "tag", "(", ")", ":", "if", "check_unstaged", "(", ")", ":", "raise", "EnvironmentError", "(", "'There are staged changes, abort.'", ")", "with", "open", "(", "str", "(", "INIT_PATH", ")", ")", "as", "f", ":", "metadata", "=", "dict", "(", "re", "...
Tag current version.
[ "Tag", "current", "version", "." ]
python
train
Karaage-Cluster/karaage
karaage/datastores/__init__.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/datastores/__init__.py#L265-L271
def add_accounts_to_group(accounts_query, group): """ Add accounts to group. """ query = accounts_query.filter(date_deleted__isnull=True) for account in query: add_account_to_group(account, group)
[ "def", "add_accounts_to_group", "(", "accounts_query", ",", "group", ")", ":", "query", "=", "accounts_query", ".", "filter", "(", "date_deleted__isnull", "=", "True", ")", "for", "account", "in", "query", ":", "add_account_to_group", "(", "account", ",", "group...
Add accounts to group.
[ "Add", "accounts", "to", "group", "." ]
python
train
hotzenklotz/pybeerxml
pybeerxml/hop.py
https://github.com/hotzenklotz/pybeerxml/blob/e9cf8d6090b1e01e5bbb101e255792b134affbe0/pybeerxml/hop.py#L18-L32
def bitterness(self, ibu_method, early_og, batch_size): "Calculate bitterness based on chosen method" if ibu_method == "tinseth": bitterness = 1.65 * math.pow(0.000125, early_og - 1.0) * ((1 - math.pow(math.e, -0.04 * self.time)) / 4.15) * ((self.alpha / 100.0 * self.amount * 1000000) / batch_size) * self.utilization_factor() elif ibu_method == "rager": utilization = 18.11 + 13.86 * math.tanh((self.time - 31.32) / 18.27) adjustment = max(0, (early_og - 1.050) / 0.2) bitterness = self.amount * 100 * utilization * self.utilization_factor() * self.alpha / (batch_size * (1 + adjustment)) else: raise Exception("Unknown IBU method %s!" % ibu_method) return bitterness
[ "def", "bitterness", "(", "self", ",", "ibu_method", ",", "early_og", ",", "batch_size", ")", ":", "if", "ibu_method", "==", "\"tinseth\"", ":", "bitterness", "=", "1.65", "*", "math", ".", "pow", "(", "0.000125", ",", "early_og", "-", "1.0", ")", "*", ...
Calculate bitterness based on chosen method
[ "Calculate", "bitterness", "based", "on", "chosen", "method" ]
python
train
jaraco/keyring
keyring/backend.py
https://github.com/jaraco/keyring/blob/71c798378e365286b7cc03c06e4d7d24c7de8fc4/keyring/backend.py#L120-L136
def get_credential(self, service, username): """Gets the username and password for the service. Returns a Credential instance. The *username* argument is optional and may be omitted by the caller or ignored by the backend. Callers must use the returned username. """ # The default implementation requires a username here. if username is not None: password = self.get_password(service, username) if password is not None: return credentials.SimpleCredential( username, password, ) return None
[ "def", "get_credential", "(", "self", ",", "service", ",", "username", ")", ":", "# The default implementation requires a username here.", "if", "username", "is", "not", "None", ":", "password", "=", "self", ".", "get_password", "(", "service", ",", "username", ")...
Gets the username and password for the service. Returns a Credential instance. The *username* argument is optional and may be omitted by the caller or ignored by the backend. Callers must use the returned username.
[ "Gets", "the", "username", "and", "password", "for", "the", "service", ".", "Returns", "a", "Credential", "instance", "." ]
python
valid
fhs/pyhdf
pyhdf/V.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L822-L849
def delete(self, num_name): """Delete from the HDF file the vgroup identified by its reference number or its name. Args:: num_name either the reference number or the name of the vgroup to delete Returns:: None C library equivalent : Vdelete """ try: vg = self.attach(num_name, 1) except HDF4Error as msg: raise HDF4Error("delete: no such vgroup") # ATTENTION: The HDF documentation says that the vgroup_id # is passed to Vdelete(). This is wrong. # The vgroup reference number must instead be passed. refnum = vg._refnum vg.detach() _checkErr('delete', _C.Vdelete(self._hdf_inst._id, refnum), "error deleting vgroup")
[ "def", "delete", "(", "self", ",", "num_name", ")", ":", "try", ":", "vg", "=", "self", ".", "attach", "(", "num_name", ",", "1", ")", "except", "HDF4Error", "as", "msg", ":", "raise", "HDF4Error", "(", "\"delete: no such vgroup\"", ")", "# ATTENTION: The ...
Delete from the HDF file the vgroup identified by its reference number or its name. Args:: num_name either the reference number or the name of the vgroup to delete Returns:: None C library equivalent : Vdelete
[ "Delete", "from", "the", "HDF", "file", "the", "vgroup", "identified", "by", "its", "reference", "number", "or", "its", "name", "." ]
python
train
galaxy-genome-annotation/python-apollo
arrow/commands/annotations/update_dbxref.py
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/arrow/commands/annotations/update_dbxref.py#L25-L32
def cli(ctx, feature_id, old_db, old_accession, new_db, new_accession, organism="", sequence=""): """Delete a dbxref from a feature Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.update_dbxref(feature_id, old_db, old_accession, new_db, new_accession, organism=organism, sequence=sequence)
[ "def", "cli", "(", "ctx", ",", "feature_id", ",", "old_db", ",", "old_accession", ",", "new_db", ",", "new_accession", ",", "organism", "=", "\"\"", ",", "sequence", "=", "\"\"", ")", ":", "return", "ctx", ".", "gi", ".", "annotations", ".", "update_dbxr...
Delete a dbxref from a feature Output: A standard apollo feature dictionary ({"features": [{...}]})
[ "Delete", "a", "dbxref", "from", "a", "feature" ]
python
train
joshspeagle/dynesty
priors.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/priors.py#L70-L77
def update(self, **kwargs): """Update `params` values using alias. """ for k in self.prior_params: try: self.params[k] = kwargs[self.alias[k]] except(KeyError): pass
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", "in", "self", ".", "prior_params", ":", "try", ":", "self", ".", "params", "[", "k", "]", "=", "kwargs", "[", "self", ".", "alias", "[", "k", "]", "]", "except", "(", ...
Update `params` values using alias.
[ "Update", "params", "values", "using", "alias", "." ]
python
train
twisted/axiom
axiom/scheduler.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/scheduler.py#L351-L357
def migrateUp(self): """ Recreate the hooks in the site store to trigger this SubScheduler. """ te = self.store.findFirst(TimedEvent, sort=TimedEvent.time.descending) if te is not None: self._transientSchedule(te.time, None)
[ "def", "migrateUp", "(", "self", ")", ":", "te", "=", "self", ".", "store", ".", "findFirst", "(", "TimedEvent", ",", "sort", "=", "TimedEvent", ".", "time", ".", "descending", ")", "if", "te", "is", "not", "None", ":", "self", ".", "_transientSchedule...
Recreate the hooks in the site store to trigger this SubScheduler.
[ "Recreate", "the", "hooks", "in", "the", "site", "store", "to", "trigger", "this", "SubScheduler", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/ultratb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/ultratb.py#L218-L235
def fix_frame_records_filenames(records): """Try to fix the filenames in each record from inspect.getinnerframes(). Particularly, modules loaded from within zip files have useless filenames attached to their code object, and inspect.getinnerframes() just uses it. """ fixed_records = [] for frame, filename, line_no, func_name, lines, index in records: # Look inside the frame's globals dictionary for __file__, which should # be better. better_fn = frame.f_globals.get('__file__', None) if isinstance(better_fn, str): # Check the type just in case someone did something weird with # __file__. It might also be None if the error occurred during # import. filename = better_fn fixed_records.append((frame, filename, line_no, func_name, lines, index)) return fixed_records
[ "def", "fix_frame_records_filenames", "(", "records", ")", ":", "fixed_records", "=", "[", "]", "for", "frame", ",", "filename", ",", "line_no", ",", "func_name", ",", "lines", ",", "index", "in", "records", ":", "# Look inside the frame's globals dictionary for __f...
Try to fix the filenames in each record from inspect.getinnerframes(). Particularly, modules loaded from within zip files have useless filenames attached to their code object, and inspect.getinnerframes() just uses it.
[ "Try", "to", "fix", "the", "filenames", "in", "each", "record", "from", "inspect", ".", "getinnerframes", "()", "." ]
python
test
helixyte/everest
everest/repositories/state.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/repositories/state.py#L58-L73
def manage(cls, entity, unit_of_work): """ Manages the given entity under the given Unit Of Work. If `entity` is already managed by the given Unit Of Work, nothing is done. :raises ValueError: If the given entity is already under management by a different Unit Of Work. """ if hasattr(entity, '__everest__'): if not unit_of_work is entity.__everest__.unit_of_work: raise ValueError('Trying to register an entity that has been ' 'registered with another session!') else: entity.__everest__ = cls(entity, unit_of_work)
[ "def", "manage", "(", "cls", ",", "entity", ",", "unit_of_work", ")", ":", "if", "hasattr", "(", "entity", ",", "'__everest__'", ")", ":", "if", "not", "unit_of_work", "is", "entity", ".", "__everest__", ".", "unit_of_work", ":", "raise", "ValueError", "("...
Manages the given entity under the given Unit Of Work. If `entity` is already managed by the given Unit Of Work, nothing is done. :raises ValueError: If the given entity is already under management by a different Unit Of Work.
[ "Manages", "the", "given", "entity", "under", "the", "given", "Unit", "Of", "Work", "." ]
python
train
nutechsoftware/alarmdecoder
examples/socket_example.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/examples/socket_example.py#L9-L25
def main(): """ Example application that opens a device that has been exposed to the network with ser2sock or similar serial-to-IP software. """ try: # Retrieve an AD2 device that has been exposed with ser2sock on localhost:10000. device = AlarmDecoder(SocketDevice(interface=(HOSTNAME, PORT))) # Set up an event handler and open the device device.on_message += handle_message with device.open(): while True: time.sleep(1) except Exception as ex: print('Exception:', ex)
[ "def", "main", "(", ")", ":", "try", ":", "# Retrieve an AD2 device that has been exposed with ser2sock on localhost:10000.", "device", "=", "AlarmDecoder", "(", "SocketDevice", "(", "interface", "=", "(", "HOSTNAME", ",", "PORT", ")", ")", ")", "# Set up an event handl...
Example application that opens a device that has been exposed to the network with ser2sock or similar serial-to-IP software.
[ "Example", "application", "that", "opens", "a", "device", "that", "has", "been", "exposed", "to", "the", "network", "with", "ser2sock", "or", "similar", "serial", "-", "to", "-", "IP", "software", "." ]
python
train
angr/angr
angr/keyed_region.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/keyed_region.py#L340-L402
def __store(self, stored_object, overwrite=False): """ Store a variable into the storage. :param StoredObject stored_object: The descriptor describing start address and the variable. :param bool overwrite: Whether existing objects should be overwritten or not. True to make a strong update, False to make a weak update. :return: None """ start = stored_object.start object_size = stored_object.size end = start + object_size # region items in the middle overlapping_items = list(self._storage.irange(start, end-1)) # is there a region item that begins before the start and overlaps with this variable? floor_key, floor_item = self._get_container(start) if floor_item is not None and floor_key not in overlapping_items: # insert it into the beginning overlapping_items.insert(0, floor_key) # scan through the entire list of region items, split existing regions and insert new regions as needed to_update = {start: RegionObject(start, object_size, {stored_object})} last_end = start for floor_key in overlapping_items: item = self._storage[floor_key] if item.start < start: # we need to break this item into two a, b = item.split(start) if overwrite: b.set_object(stored_object) else: self._add_object_with_check(b, stored_object) to_update[a.start] = a to_update[b.start] = b last_end = b.end elif item.start > last_end: # there is a gap between the last item and the current item # fill in the gap new_item = RegionObject(last_end, item.start - last_end, {stored_object}) to_update[new_item.start] = new_item last_end = new_item.end elif item.end > end: # we need to split this item into two a, b = item.split(end) if overwrite: a.set_object(stored_object) else: self._add_object_with_check(a, stored_object) to_update[a.start] = a to_update[b.start] = b last_end = b.end else: if overwrite: item.set_object(stored_object) else: self._add_object_with_check(item, stored_object) to_update[item.start] = item self._storage.update(to_update)
[ "def", "__store", "(", "self", ",", "stored_object", ",", "overwrite", "=", "False", ")", ":", "start", "=", "stored_object", ".", "start", "object_size", "=", "stored_object", ".", "size", "end", "=", "start", "+", "object_size", "# region items in the middle",...
Store a variable into the storage. :param StoredObject stored_object: The descriptor describing start address and the variable. :param bool overwrite: Whether existing objects should be overwritten or not. True to make a strong update, False to make a weak update. :return: None
[ "Store", "a", "variable", "into", "the", "storage", "." ]
python
train
Parsl/parsl
parsl/dataflow/dflow.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/dataflow/dflow.py#L792-L857
def cleanup(self): """DataFlowKernel cleanup. This involves killing resources explicitly and sending die messages to IPP workers. If the executors are managed (created by the DFK), then we call scale_in on each of the executors and call executor.shutdown. Otherwise, we do nothing, and executor cleanup is left to the user. """ logger.info("DFK cleanup initiated") # this check won't detect two DFK cleanups happening from # different threads extremely close in time because of # non-atomic read/modify of self.cleanup_called if self.cleanup_called: raise Exception("attempt to clean up DFK when it has already been cleaned-up") self.cleanup_called = True self.log_task_states() # Checkpointing takes priority over the rest of the tasks # checkpoint if any valid checkpoint method is specified if self.checkpoint_mode is not None: self.checkpoint() if self._checkpoint_timer: logger.info("Stopping checkpoint timer") self._checkpoint_timer.close() # Send final stats self.usage_tracker.send_message() self.usage_tracker.close() logger.info("Terminating flow_control and strategy threads") self.flowcontrol.close() for executor in self.executors.values(): if executor.managed: if executor.scaling_enabled: job_ids = executor.provider.resources.keys() executor.scale_in(len(job_ids)) executor.shutdown() self.time_completed = datetime.datetime.now() if self.monitoring: self.monitoring.send(MessageType.WORKFLOW_INFO, {'tasks_failed_count': self.tasks_failed_count, 'tasks_completed_count': self.tasks_completed_count, "time_began": self.time_began, 'time_completed': self.time_completed, 'workflow_duration': (self.time_completed - self.time_began).total_seconds(), 'run_id': self.run_id, 'rundir': self.run_dir}) self.monitoring.close() """ if self.logging_server is not None: self.logging_server.terminate() self.logging_server.join() if self.web_app is not None: self.web_app.terminate() self.web_app.join() """ logger.info("DFK cleanup complete")
[ "def", "cleanup", "(", "self", ")", ":", "logger", ".", "info", "(", "\"DFK cleanup initiated\"", ")", "# this check won't detect two DFK cleanups happening from", "# different threads extremely close in time because of", "# non-atomic read/modify of self.cleanup_called", "if", "self...
DataFlowKernel cleanup. This involves killing resources explicitly and sending die messages to IPP workers. If the executors are managed (created by the DFK), then we call scale_in on each of the executors and call executor.shutdown. Otherwise, we do nothing, and executor cleanup is left to the user.
[ "DataFlowKernel", "cleanup", "." ]
python
valid
carpedm20/ndrive
ndrive/models.py
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L687-L712
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100): """GetMusicAlbumList Args: tagtype = ??? startnum pagingrow Returns: ??? False: Failed to get property """ url = nurls['setProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'tagtype': tagtype, 'startnum': startnum, 'pagingrow': pagingrow, } r = self.session.post(url = url, data = data) return resultManager(r.text)
[ "def", "getMusicAlbumList", "(", "self", ",", "tagtype", "=", "0", ",", "startnum", "=", "0", ",", "pagingrow", "=", "100", ")", ":", "url", "=", "nurls", "[", "'setProperty'", "]", "data", "=", "{", "'userid'", ":", "self", ".", "user_id", ",", "'us...
GetMusicAlbumList Args: tagtype = ??? startnum pagingrow Returns: ??? False: Failed to get property
[ "GetMusicAlbumList" ]
python
train
dpkp/kafka-python
kafka/consumer/simple.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/simple.py#L188-L258
def seek(self, offset, whence=None, partition=None): """ Alter the current offset in the consumer, similar to fseek Arguments: offset: how much to modify the offset whence: where to modify it from, default is None * None is an absolute offset * 0 is relative to the earliest available offset (head) * 1 is relative to the current offset * 2 is relative to the latest known offset (tail) partition: modify which partition, default is None. If partition is None, would modify all partitions. """ if whence is None: # set an absolute offset if partition is None: for tmp_partition in self.offsets: self.offsets[tmp_partition] = offset else: self.offsets[partition] = offset elif whence == 1: # relative to current position if partition is None: for tmp_partition, _offset in self.offsets.items(): self.offsets[tmp_partition] = _offset + offset else: self.offsets[partition] += offset elif whence in (0, 2): # relative to beginning or end reqs = [] deltas = {} if partition is None: # divide the request offset by number of partitions, # distribute the remained evenly (delta, rem) = divmod(offset, len(self.offsets)) for tmp_partition, r in izip_longest(self.offsets.keys(), repeat(1, rem), fillvalue=0): deltas[tmp_partition] = delta + r for tmp_partition in self.offsets.keys(): if whence == 0: reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -2, 1)) elif whence == 2: reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -1, 1)) else: pass else: deltas[partition] = offset if whence == 0: reqs.append(OffsetRequestPayload(self.topic, partition, -2, 1)) elif whence == 2: reqs.append(OffsetRequestPayload(self.topic, partition, -1, 1)) else: pass resps = self.client.send_offset_request(reqs) for resp in resps: self.offsets[resp.partition] = \ resp.offsets[0] + deltas[resp.partition] else: raise ValueError('Unexpected value for `whence`, %d' % (whence,)) # Reset queue and fetch offsets since they are invalid self.fetch_offsets = self.offsets.copy() self.count_since_commit += 1 if self.auto_commit: self.commit() self.queue = queue.Queue()
[ "def", "seek", "(", "self", ",", "offset", ",", "whence", "=", "None", ",", "partition", "=", "None", ")", ":", "if", "whence", "is", "None", ":", "# set an absolute offset", "if", "partition", "is", "None", ":", "for", "tmp_partition", "in", "self", "."...
Alter the current offset in the consumer, similar to fseek Arguments: offset: how much to modify the offset whence: where to modify it from, default is None * None is an absolute offset * 0 is relative to the earliest available offset (head) * 1 is relative to the current offset * 2 is relative to the latest known offset (tail) partition: modify which partition, default is None. If partition is None, would modify all partitions.
[ "Alter", "the", "current", "offset", "in", "the", "consumer", "similar", "to", "fseek" ]
python
train
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L4909-L4933
def _prm_read_pandas(self, pd_node, full_name): """Reads a DataFrame from dis. :param pd_node: hdf5 node storing the pandas DataFrame :param full_name: Full name of the parameter or result whose data is to be loaded :return: Data to load """ try: name = pd_node._v_name pathname = pd_node._v_pathname pandas_store = self._hdf5store pandas_data = pandas_store.get(pathname) return pandas_data except: self._logger.error('Failed loading `%s` of `%s`.' % (pd_node._v_name, full_name)) raise
[ "def", "_prm_read_pandas", "(", "self", ",", "pd_node", ",", "full_name", ")", ":", "try", ":", "name", "=", "pd_node", ".", "_v_name", "pathname", "=", "pd_node", ".", "_v_pathname", "pandas_store", "=", "self", ".", "_hdf5store", "pandas_data", "=", "panda...
Reads a DataFrame from dis. :param pd_node: hdf5 node storing the pandas DataFrame :param full_name: Full name of the parameter or result whose data is to be loaded :return: Data to load
[ "Reads", "a", "DataFrame", "from", "dis", "." ]
python
test
pydata/numexpr
numexpr/necompiler.py
https://github.com/pydata/numexpr/blob/364bac13d84524e0e01db892301b2959d822dcff/numexpr/necompiler.py#L450-L471
def setRegisterNumbersForTemporaries(ast, start): """Assign register numbers for temporary registers, keeping track of aliases and handling immediate operands. """ seen = 0 signature = '' aliases = [] for node in ast.postorderWalk(): if node.astType == 'alias': aliases.append(node) node = node.value if node.reg.immediate: node.reg.n = node.value continue reg = node.reg if reg.n is None: reg.n = start + seen seen += 1 signature += reg.node.typecode() for node in aliases: node.reg = node.value.reg return start + seen, signature
[ "def", "setRegisterNumbersForTemporaries", "(", "ast", ",", "start", ")", ":", "seen", "=", "0", "signature", "=", "''", "aliases", "=", "[", "]", "for", "node", "in", "ast", ".", "postorderWalk", "(", ")", ":", "if", "node", ".", "astType", "==", "'al...
Assign register numbers for temporary registers, keeping track of aliases and handling immediate operands.
[ "Assign", "register", "numbers", "for", "temporary", "registers", "keeping", "track", "of", "aliases", "and", "handling", "immediate", "operands", "." ]
python
train
ulule/django-linguist
linguist/mixins.py
https://github.com/ulule/django-linguist/blob/d2b95a6ab921039d56d5eeb352badfe5be9e8f77/linguist/mixins.py#L276-L297
def with_translations(self, **kwargs): """ Prefetches translations. Takes three optional keyword arguments: * ``field_names``: ``field_name`` values for SELECT IN * ``languages``: ``language`` values for SELECT IN * ``chunks_length``: fetches IDs by chunk """ force = kwargs.pop("force", False) if self._prefetch_translations_done and force is False: return self self._prefetched_translations_cache = utils.get_grouped_translations( self, **kwargs ) self._prefetch_translations_done = True return self._clone()
[ "def", "with_translations", "(", "self", ",", "*", "*", "kwargs", ")", ":", "force", "=", "kwargs", ".", "pop", "(", "\"force\"", ",", "False", ")", "if", "self", ".", "_prefetch_translations_done", "and", "force", "is", "False", ":", "return", "self", "...
Prefetches translations. Takes three optional keyword arguments: * ``field_names``: ``field_name`` values for SELECT IN * ``languages``: ``language`` values for SELECT IN * ``chunks_length``: fetches IDs by chunk
[ "Prefetches", "translations", "." ]
python
train
ozgur/python-firebase
firebase/firebase.py
https://github.com/ozgur/python-firebase/blob/6b96b326f6d8f477503ca42fdfbd81bcbe1f9e0d/firebase/firebase.py#L331-L342
def post_async(self, url, data, callback=None, params=None, headers=None): """ Asynchronous POST request with the process pool. """ params = params or {} headers = headers or {} endpoint = self._build_endpoint_url(url, None) self._authenticate(params, headers) data = json.dumps(data, cls=JSONEncoder) process_pool.apply_async(make_post_request, args=(endpoint, data, params, headers), callback=callback)
[ "def", "post_async", "(", "self", ",", "url", ",", "data", ",", "callback", "=", "None", ",", "params", "=", "None", ",", "headers", "=", "None", ")", ":", "params", "=", "params", "or", "{", "}", "headers", "=", "headers", "or", "{", "}", "endpoin...
Asynchronous POST request with the process pool.
[ "Asynchronous", "POST", "request", "with", "the", "process", "pool", "." ]
python
valid
novopl/peltak
src/peltak/core/context.py
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/core/context.py#L92-L113
def set(self, name, value): """ Set context value. Args: name (str): The name of the context value to change. value (Any): The new value for the selected context value """ curr = self.values parts = name.split('.') for i, part in enumerate(parts[:-1]): try: curr = curr.setdefault(part, {}) except AttributeError: raise InvalidPath('.'.join(parts[:i + 1])) try: curr[parts[-1]] = value except TypeError: raise InvalidPath('.'.join(parts[:-1]))
[ "def", "set", "(", "self", ",", "name", ",", "value", ")", ":", "curr", "=", "self", ".", "values", "parts", "=", "name", ".", "split", "(", "'.'", ")", "for", "i", ",", "part", "in", "enumerate", "(", "parts", "[", ":", "-", "1", "]", ")", "...
Set context value. Args: name (str): The name of the context value to change. value (Any): The new value for the selected context value
[ "Set", "context", "value", "." ]
python
train
log2timeline/plaso
plaso/cli/helpers/xlsx_output.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/xlsx_output.py#L26-L50
def AddArguments(cls, argument_group): """Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group. """ argument_group.add_argument( '--fields', dest='fields', type=str, action='store', default=cls._DEFAULT_FIELDS, help=( 'Defines which fields should be included in the output.')) argument_group.add_argument( '--additional_fields', dest='additional_fields', type=str, action='store', default='', help=( 'Defines extra fields to be included in the output, in addition to' ' the default fields, which are {0:s}.'.format( cls._DEFAULT_FIELDS))) argument_group.add_argument( '--timestamp_format', dest='timestamp_format', type=str, action='store', default=cls._DEFAULT_TIMESTAMP_FORMAT, help=( 'Set the timestamp format that will be used in the datetime' 'column of the XLSX spreadsheet.'))
[ "def", "AddArguments", "(", "cls", ",", "argument_group", ")", ":", "argument_group", ".", "add_argument", "(", "'--fields'", ",", "dest", "=", "'fields'", ",", "type", "=", "str", ",", "action", "=", "'store'", ",", "default", "=", "cls", ".", "_DEFAULT_F...
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
[ "Adds", "command", "line", "arguments", "the", "helper", "supports", "to", "an", "argument", "group", "." ]
python
train
learningequality/ricecooker
ricecooker/classes/questions.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/questions.py#L442-L460
def validate(self): """ validate: Makes sure single selection question is valid Args: None Returns: boolean indicating if single selection question is valid """ try: assert self.question_type == exercises.SINGLE_SELECTION, "Assumption Failed: Question should be single selection type" assert len(self.answers) > 0, "Assumption Failed: Multiple selection question should have answers" correct_answers = 0 for a in self.answers: assert 'answer' in a and isinstance(a['answer'], str), "Assumption Failed: Answer in answer list is not a string" assert 'correct' in a and isinstance(a['correct'], bool), "Assumption Failed: Correct indicator is not a boolean in answer list" correct_answers += 1 if a['correct'] else 0 assert correct_answers == 1, "Assumption Failed: Single selection question should have only one correct answer" for h in self.hints: assert isinstance(h, str), "Assumption Failed: Hint in hints list is not a string" return super(SingleSelectQuestion, self).validate() except AssertionError as ae: raise InvalidQuestionException("Invalid question: {0}".format(self.__dict__))
[ "def", "validate", "(", "self", ")", ":", "try", ":", "assert", "self", ".", "question_type", "==", "exercises", ".", "SINGLE_SELECTION", ",", "\"Assumption Failed: Question should be single selection type\"", "assert", "len", "(", "self", ".", "answers", ")", ">", ...
validate: Makes sure single selection question is valid Args: None Returns: boolean indicating if single selection question is valid
[ "validate", ":", "Makes", "sure", "single", "selection", "question", "is", "valid", "Args", ":", "None", "Returns", ":", "boolean", "indicating", "if", "single", "selection", "question", "is", "valid" ]
python
train
MeaningCloud/meaningcloud-python
meaningcloud/SentimentResponse.py
https://github.com/MeaningCloud/meaningcloud-python/blob/1dd76ecabeedd80c9bb14a1716d39657d645775f/meaningcloud/SentimentResponse.py#L177-L197
def scoreTagToString(self, scoreTag): """ :param scoreTag: :return: """ scoreTagToString = "" if scoreTag == "P+": scoreTagToString = 'strong positive' elif scoreTag == "P": scoreTagToString = 'positive' elif scoreTag == "NEU": scoreTagToString = 'neutral' elif scoreTag == "N": scoreTagToString = 'negative' elif scoreTag == "N+": scoreTagToString = 'strong negative' elif scoreTag == "NONE": scoreTagToString = 'no sentiment' return scoreTagToString
[ "def", "scoreTagToString", "(", "self", ",", "scoreTag", ")", ":", "scoreTagToString", "=", "\"\"", "if", "scoreTag", "==", "\"P+\"", ":", "scoreTagToString", "=", "'strong positive'", "elif", "scoreTag", "==", "\"P\"", ":", "scoreTagToString", "=", "'positive'", ...
:param scoreTag: :return:
[ ":", "param", "scoreTag", ":", ":", "return", ":" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/abiobjects.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/abiobjects.py#L1304-L1328
def to_abivars(self): """Returns a dictionary with the abinit variables.""" abivars = dict( gwcalctyp=self.gwcalctyp, ecuteps=self.ecuteps, ecutsigx=self.ecutsigx, symsigma=self.symsigma, gw_qprange=self.gw_qprange, gwpara=self.gwpara, optdriver=self.optdriver, nband=self.nband #"ecutwfn" : self.ecutwfn, #"kptgw" : self.kptgw, #"nkptgw" : self.nkptgw, #"bdgw" : self.bdgw, ) # FIXME: problem with the spin #assert len(self.bdgw) == self.nkptgw # ppmodel variables if self.use_ppmodel: abivars.update(self.ppmodel.to_abivars()) return abivars
[ "def", "to_abivars", "(", "self", ")", ":", "abivars", "=", "dict", "(", "gwcalctyp", "=", "self", ".", "gwcalctyp", ",", "ecuteps", "=", "self", ".", "ecuteps", ",", "ecutsigx", "=", "self", ".", "ecutsigx", ",", "symsigma", "=", "self", ".", "symsigm...
Returns a dictionary with the abinit variables.
[ "Returns", "a", "dictionary", "with", "the", "abinit", "variables", "." ]
python
train
collectiveacuity/labPack
labpack/storage/google/drive.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L273-L291
def _get_space(self): ''' a helper method to retrieve id of drive space ''' title = '%s._space_id' % self.__class__.__name__ list_kwargs = { 'q': "'%s' in parents" % self.drive_space, 'spaces': self.drive_space, 'fields': 'files(name, parents)', 'pageSize': 1 } try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) for file in response.get('files',[]): self.space_id = file.get('parents')[0] break return self.space_id
[ "def", "_get_space", "(", "self", ")", ":", "title", "=", "'%s._space_id'", "%", "self", ".", "__class__", ".", "__name__", "list_kwargs", "=", "{", "'q'", ":", "\"'%s' in parents\"", "%", "self", ".", "drive_space", ",", "'spaces'", ":", "self", ".", "dri...
a helper method to retrieve id of drive space
[ "a", "helper", "method", "to", "retrieve", "id", "of", "drive", "space" ]
python
train
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-core/ask_sdk_core/utils/request_util.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-core/ask_sdk_core/utils/request_util.py#L65-L85
def get_intent_name(handler_input): # type: (HandlerInput) -> AnyStr """Return the name of the intent request. The method retrieves the intent ``name`` from the input request, only if the input request is an :py:class:`ask_sdk_model.intent_request.IntentRequest`. If the input is not an IntentRequest, a :py:class:`TypeError` is raised. :param handler_input: The handler input instance that is generally passed in the sdk's request and exception components :type handler_input: ask_sdk_core.handler_input.HandlerInput :return: Name of the intent request :rtype: str :raises: TypeError """ request = handler_input.request_envelope.request if isinstance(request, IntentRequest): return request.intent.name raise TypeError("The provided request is not an IntentRequest")
[ "def", "get_intent_name", "(", "handler_input", ")", ":", "# type: (HandlerInput) -> AnyStr", "request", "=", "handler_input", ".", "request_envelope", ".", "request", "if", "isinstance", "(", "request", ",", "IntentRequest", ")", ":", "return", "request", ".", "int...
Return the name of the intent request. The method retrieves the intent ``name`` from the input request, only if the input request is an :py:class:`ask_sdk_model.intent_request.IntentRequest`. If the input is not an IntentRequest, a :py:class:`TypeError` is raised. :param handler_input: The handler input instance that is generally passed in the sdk's request and exception components :type handler_input: ask_sdk_core.handler_input.HandlerInput :return: Name of the intent request :rtype: str :raises: TypeError
[ "Return", "the", "name", "of", "the", "intent", "request", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAUtil/QADateTools.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAUtil/QADateTools.py#L47-L64
def QA_util_getBetweenQuarter(begin_date, end_date): """ #加上每季度的起始日期、结束日期 """ quarter_list = {} month_list = QA_util_getBetweenMonth(begin_date, end_date) for value in month_list: tempvalue = value.split("-") year = tempvalue[0] if tempvalue[1] in ['01', '02', '03']: quarter_list[year + "Q1"] = ['%s-01-01' % year, '%s-03-31' % year] elif tempvalue[1] in ['04', '05', '06']: quarter_list[year + "Q2"] = ['%s-04-01' % year, '%s-06-30' % year] elif tempvalue[1] in ['07', '08', '09']: quarter_list[year + "Q3"] = ['%s-07-31' % year, '%s-09-30' % year] elif tempvalue[1] in ['10', '11', '12']: quarter_list[year + "Q4"] = ['%s-10-01' % year, '%s-12-31' % year] return(quarter_list)
[ "def", "QA_util_getBetweenQuarter", "(", "begin_date", ",", "end_date", ")", ":", "quarter_list", "=", "{", "}", "month_list", "=", "QA_util_getBetweenMonth", "(", "begin_date", ",", "end_date", ")", "for", "value", "in", "month_list", ":", "tempvalue", "=", "va...
#加上每季度的起始日期、结束日期
[ "#加上每季度的起始日期、结束日期" ]
python
train
rosenbrockc/fortpy
fortpy/config.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/config.py#L173-L186
def _load_mapping(self, tag, ssh=False): """Extracts all the alternate module name mappings to be considered. :arg tag: the ET tag for the <mappings> element.""" mappings = {} for mapping in tag: if mapping.tag == "map": mappings[mapping.attrib["module"]] = mapping.attrib["file"] if ssh == False: self._vardict["mappings"] = mappings else: self._vardict["ssh.mappings"] = mappings
[ "def", "_load_mapping", "(", "self", ",", "tag", ",", "ssh", "=", "False", ")", ":", "mappings", "=", "{", "}", "for", "mapping", "in", "tag", ":", "if", "mapping", ".", "tag", "==", "\"map\"", ":", "mappings", "[", "mapping", ".", "attrib", "[", "...
Extracts all the alternate module name mappings to be considered. :arg tag: the ET tag for the <mappings> element.
[ "Extracts", "all", "the", "alternate", "module", "name", "mappings", "to", "be", "considered", "." ]
python
train
hobson/aima
aima/search.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/search.py#L571-L574
def conflicted(self, state, row, col): "Would placing a queen at (row, col) conflict with anything?" return any(self.conflict(row, col, state[c], c) for c in range(col))
[ "def", "conflicted", "(", "self", ",", "state", ",", "row", ",", "col", ")", ":", "return", "any", "(", "self", ".", "conflict", "(", "row", ",", "col", ",", "state", "[", "c", "]", ",", "c", ")", "for", "c", "in", "range", "(", "col", ")", "...
Would placing a queen at (row, col) conflict with anything?
[ "Would", "placing", "a", "queen", "at", "(", "row", "col", ")", "conflict", "with", "anything?" ]
python
valid
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L216-L227
def spec(self, postf_un_ops: str) -> list: """Return prefix unary operators list""" spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for op, pat in self.styles.items() for l, r in self.brackets] spec[0][1]['regex'] = self.regex_pat.format( _ops_regex(l for l, r in self.brackets), _ops_regex(self.styles.keys()) ) return spec
[ "def", "spec", "(", "self", ",", "postf_un_ops", ":", "str", ")", "->", "list", ":", "spec", "=", "[", "(", "l", "+", "op", ",", "{", "'pat'", ":", "self", ".", "pat", "(", "pat", ")", ",", "'postf'", ":", "self", ".", "postf", "(", "r", ",",...
Return prefix unary operators list
[ "Return", "prefix", "unary", "operators", "list" ]
python
train
joke2k/faker
faker/providers/geo/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/geo/__init__.py#L1011-L1017
def location_on_land(self, coords_only=False): """Returns a random tuple specifying a coordinate set guaranteed to exist on land. Format is `(latitude, longitude, place name, two-letter country code, timezone)` Pass `coords_only` to return coordinates without metadata. """ place = self.random_element(self.land_coords) return (place[0], place[1]) if coords_only else place
[ "def", "location_on_land", "(", "self", ",", "coords_only", "=", "False", ")", ":", "place", "=", "self", ".", "random_element", "(", "self", ".", "land_coords", ")", "return", "(", "place", "[", "0", "]", ",", "place", "[", "1", "]", ")", "if", "coo...
Returns a random tuple specifying a coordinate set guaranteed to exist on land. Format is `(latitude, longitude, place name, two-letter country code, timezone)` Pass `coords_only` to return coordinates without metadata.
[ "Returns", "a", "random", "tuple", "specifying", "a", "coordinate", "set", "guaranteed", "to", "exist", "on", "land", ".", "Format", "is", "(", "latitude", "longitude", "place", "name", "two", "-", "letter", "country", "code", "timezone", ")", "Pass", "coord...
python
train
ewels/MultiQC
multiqc/modules/slamdunk/slamdunk.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/slamdunk/slamdunk.py#L319-L362
def slamdunkGeneralStatsTable(self): """ Take the parsed summary stats from Slamdunk and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['counted'] = { 'title': '{} Counted'.format(config.read_count_prefix), 'description': '# reads counted within 3\'UTRs ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['retained'] = { 'title': '{} Retained'.format(config.read_count_prefix), 'description': '# retained reads after filtering ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['mapped'] = { 'title': '{} Mapped'.format(config.read_count_prefix), 'description': '# mapped reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['sequenced'] = { 'title': '{} Sequenced'.format(config.read_count_prefix), 'description': '# sequenced reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } self.general_stats_addcols(self.slamdunk_data, headers)
[ "def", "slamdunkGeneralStatsTable", "(", "self", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'counted'", "]", "=", "{", "'title'", ":", "'{} Counted'", ".", "format", "(", "config", ".", "read_count_prefix", ")", ",", "'description'", ...
Take the parsed summary stats from Slamdunk and add it to the basic stats table at the top of the report
[ "Take", "the", "parsed", "summary", "stats", "from", "Slamdunk", "and", "add", "it", "to", "the", "basic", "stats", "table", "at", "the", "top", "of", "the", "report" ]
python
train
sosy-lab/benchexec
benchexec/model.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/model.py#L436-L494
def extract_runs_from_xml(self, sourcefilesTagList, global_required_files_pattern): ''' This function builds a list of SourcefileSets (containing filename with options). The files and their options are taken from the list of sourcefilesTags. ''' base_dir = self.benchmark.base_dir # runs are structured as sourcefile sets, one set represents one sourcefiles tag blocks = [] for index, sourcefilesTag in enumerate(sourcefilesTagList): sourcefileSetName = sourcefilesTag.get("name") matchName = sourcefileSetName or str(index) if self.benchmark.config.selected_sourcefile_sets \ and not any(util.wildcard_match(matchName, sourcefile_set) for sourcefile_set in self.benchmark.config.selected_sourcefile_sets): continue required_files_pattern = global_required_files_pattern.union( set(tag.text for tag in sourcefilesTag.findall('requiredfiles'))) # get lists of filenames task_def_files = self.get_task_def_files_from_xml(sourcefilesTag, base_dir) # get file-specific options for filenames fileOptions = util.get_list_from_xml(sourcefilesTag) propertyfile = util.text_or_none(util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG)) # some runs need more than one sourcefile, # the first sourcefile is a normal 'include'-file, we use its name as identifier # for logfile and result-category all other files are 'append'ed. appendFileTags = sourcefilesTag.findall("append") currentRuns = [] for identifier in task_def_files: if identifier.endswith('.yml'): if appendFileTags: raise BenchExecException( "Cannot combine <append> and task-definition files in the same <tasks> tag.") run = self.create_run_from_task_definition( identifier, fileOptions, propertyfile, required_files_pattern) else: run = self.create_run_for_input_file( identifier, fileOptions, propertyfile, required_files_pattern, appendFileTags) if run: currentRuns.append(run) # add runs for cases without source files for run in sourcefilesTag.findall("withoutfile"): currentRuns.append(Run(run.text, [], fileOptions, self, propertyfile, required_files_pattern)) blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns)) if self.benchmark.config.selected_sourcefile_sets: for selected in self.benchmark.config.selected_sourcefile_sets: if not any(util.wildcard_match(sourcefile_set.real_name, selected) for sourcefile_set in blocks): logging.warning( 'The selected tasks "%s" are not present in the input file, ' 'skipping them.', selected) return blocks
[ "def", "extract_runs_from_xml", "(", "self", ",", "sourcefilesTagList", ",", "global_required_files_pattern", ")", ":", "base_dir", "=", "self", ".", "benchmark", ".", "base_dir", "# runs are structured as sourcefile sets, one set represents one sourcefiles tag", "blocks", "=",...
This function builds a list of SourcefileSets (containing filename with options). The files and their options are taken from the list of sourcefilesTags.
[ "This", "function", "builds", "a", "list", "of", "SourcefileSets", "(", "containing", "filename", "with", "options", ")", ".", "The", "files", "and", "their", "options", "are", "taken", "from", "the", "list", "of", "sourcefilesTags", "." ]
python
train
eandersson/amqpstorm
amqpstorm/management/queue.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/queue.py#L15-L32
def get(self, queue, virtual_host='/'): """Get Queue details. :param queue: Queue name :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.get( API_QUEUE % ( virtual_host, queue ) )
[ "def", "get", "(", "self", ",", "queue", ",", "virtual_host", "=", "'/'", ")", ":", "virtual_host", "=", "quote", "(", "virtual_host", ",", "''", ")", "return", "self", ".", "http_client", ".", "get", "(", "API_QUEUE", "%", "(", "virtual_host", ",", "q...
Get Queue details. :param queue: Queue name :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
[ "Get", "Queue", "details", "." ]
python
train
jsommers/switchyard
switchyard/lib/address/__init__.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/address/__init__.py#L59-L72
def isBridgeFiltered (self): """ Checks if address is an IEEE 802.1D MAC Bridge Filtered MAC Group Address This range is 01-80-C2-00-00-00 to 01-80-C2-00-00-0F. MAC frames that have a destination MAC address within this range are not relayed by bridges conforming to IEEE 802.1D """ return ((self.__value[0] == 0x01) and (self.__value[1] == 0x80) and (self.__value[2] == 0xC2) and (self.__value[3] == 0x00) and (self.__value[4] == 0x00) and (self.__value[5] <= 0x0F))
[ "def", "isBridgeFiltered", "(", "self", ")", ":", "return", "(", "(", "self", ".", "__value", "[", "0", "]", "==", "0x01", ")", "and", "(", "self", ".", "__value", "[", "1", "]", "==", "0x80", ")", "and", "(", "self", ".", "__value", "[", "2", ...
Checks if address is an IEEE 802.1D MAC Bridge Filtered MAC Group Address This range is 01-80-C2-00-00-00 to 01-80-C2-00-00-0F. MAC frames that have a destination MAC address within this range are not relayed by bridges conforming to IEEE 802.1D
[ "Checks", "if", "address", "is", "an", "IEEE", "802", ".", "1D", "MAC", "Bridge", "Filtered", "MAC", "Group", "Address" ]
python
train
notanumber/xapian-haystack
xapian_backend.py
https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1394-L1410
def _filter_contains(self, term, field_name, field_type, is_not): """ Splits the sentence in terms and join them with OR, using stemmed and un-stemmed. Assumes term is not a list. """ if field_type == 'text': term_list = term.split() else: term_list = [term] query = self._or_query(term_list, field_name, field_type) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) else: return query
[ "def", "_filter_contains", "(", "self", ",", "term", ",", "field_name", ",", "field_type", ",", "is_not", ")", ":", "if", "field_type", "==", "'text'", ":", "term_list", "=", "term", ".", "split", "(", ")", "else", ":", "term_list", "=", "[", "term", "...
Splits the sentence in terms and join them with OR, using stemmed and un-stemmed. Assumes term is not a list.
[ "Splits", "the", "sentence", "in", "terms", "and", "join", "them", "with", "OR", "using", "stemmed", "and", "un", "-", "stemmed", "." ]
python
train
sosy-lab/benchexec
benchexec/resources.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/resources.py#L213-L241
def get_memory_banks_per_run(coreAssignment, cgroups): """Get an assignment of memory banks to runs that fits to the given coreAssignment, i.e., no run is allowed to use memory that is not local (on the same NUMA node) to one of its CPU cores.""" try: # read list of available memory banks allMems = set(cgroups.read_allowed_memory_banks()) result = [] for cores in coreAssignment: mems = set() for core in cores: coreDir = '/sys/devices/system/cpu/cpu{0}/'.format(core) mems.update(_get_memory_banks_listed_in_dir(coreDir)) allowedMems = sorted(mems.intersection(allMems)) logging.debug("Memory banks for cores %s are %s, of which we can use %s.", cores, list(mems), allowedMems) result.append(allowedMems) assert len(result) == len(coreAssignment) if any(result) and os.path.isdir('/sys/devices/system/node/'): return result else: # All runs get the empty list of memory regions # because this system has no NUMA support return None except ValueError as e: sys.exit("Could not read memory information from kernel: {0}".format(e))
[ "def", "get_memory_banks_per_run", "(", "coreAssignment", ",", "cgroups", ")", ":", "try", ":", "# read list of available memory banks", "allMems", "=", "set", "(", "cgroups", ".", "read_allowed_memory_banks", "(", ")", ")", "result", "=", "[", "]", "for", "cores"...
Get an assignment of memory banks to runs that fits to the given coreAssignment, i.e., no run is allowed to use memory that is not local (on the same NUMA node) to one of its CPU cores.
[ "Get", "an", "assignment", "of", "memory", "banks", "to", "runs", "that", "fits", "to", "the", "given", "coreAssignment", "i", ".", "e", ".", "no", "run", "is", "allowed", "to", "use", "memory", "that", "is", "not", "local", "(", "on", "the", "same", ...
python
train
xen/webcraft
webcraft/apiview.py
https://github.com/xen/webcraft/blob/74ff1e5b253048d9260446bfbc95de2e402a8005/webcraft/apiview.py#L10-L15
def alchemyencoder(obj): """JSON encoder function for SQLAlchemy special classes.""" if isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj)
[ "def", "alchemyencoder", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ".", "date", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "elif", "isinstance", "(", "obj", ",", "decimal", ".", "Decimal", ")", ":", "return", ...
JSON encoder function for SQLAlchemy special classes.
[ "JSON", "encoder", "function", "for", "SQLAlchemy", "special", "classes", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/pkg_resources/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L658-L664
def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns)
[ "def", "run_script", "(", "self", ",", "requires", ",", "script_name", ")", ":", "ns", "=", "sys", ".", "_getframe", "(", "1", ")", ".", "f_globals", "name", "=", "ns", "[", "'__name__'", "]", "ns", ".", "clear", "(", ")", "ns", "[", "'__name__'", ...
Locate distribution for `requires` and run `script_name` script
[ "Locate", "distribution", "for", "requires", "and", "run", "script_name", "script" ]
python
train
Guake/guake
guake/prefs.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/prefs.py#L544-L548
def on_window_height_value_changed(self, hscale): """Changes the value of window_height in dconf """ val = hscale.get_value() self.settings.general.set_int('window-height', int(val))
[ "def", "on_window_height_value_changed", "(", "self", ",", "hscale", ")", ":", "val", "=", "hscale", ".", "get_value", "(", ")", "self", ".", "settings", ".", "general", ".", "set_int", "(", "'window-height'", ",", "int", "(", "val", ")", ")" ]
Changes the value of window_height in dconf
[ "Changes", "the", "value", "of", "window_height", "in", "dconf" ]
python
train
ardydedase/pycouchbase
couchbase-python-cffi/couchbase_ffi/executors.py
https://github.com/ardydedase/pycouchbase/blob/6f010b4d2ef41aead2366878d0cf0b1284c0db0e/couchbase-python-cffi/couchbase_ffi/executors.py#L71-L94
def get_option(name, key_options, global_options, default=None): """ Search the key-specific options and the global options for a given setting. Either dictionary may be None. This will first search the key settings and then the global settings. :param name: The setting to search for :param key_options: The item specific settings :param global_options: General method parameters :return: The option, if found, or None """ if key_options: try: return key_options[name] except KeyError: pass if global_options: try: return global_options[name] except KeyError: pass return default
[ "def", "get_option", "(", "name", ",", "key_options", ",", "global_options", ",", "default", "=", "None", ")", ":", "if", "key_options", ":", "try", ":", "return", "key_options", "[", "name", "]", "except", "KeyError", ":", "pass", "if", "global_options", ...
Search the key-specific options and the global options for a given setting. Either dictionary may be None. This will first search the key settings and then the global settings. :param name: The setting to search for :param key_options: The item specific settings :param global_options: General method parameters :return: The option, if found, or None
[ "Search", "the", "key", "-", "specific", "options", "and", "the", "global", "options", "for", "a", "given", "setting", ".", "Either", "dictionary", "may", "be", "None", "." ]
python
train
hfurubotten/enturclient
enturclient/api.py
https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L75-L108
async def expand_all_quays(self) -> None: """Find all quays from stop places.""" if not self.stops: return headers = {'ET-Client-Name': self._client_name} request = { 'query': GRAPHQL_STOP_TO_QUAY_TEMPLATE, 'variables': { 'stops': self.stops, 'omitNonBoarding': self.omit_non_boarding } } with async_timeout.timeout(10): resp = await self.web_session.post(RESOURCE, json=request, headers=headers) if resp.status != 200: _LOGGER.error( "Error connecting to Entur, response http status code: %s", resp.status) return None result = await resp.json() if 'errors' in result: return for stop_place in result['data']['stopPlaces']: if len(stop_place['quays']) > 1: for quay in stop_place['quays']: if quay['estimatedCalls']: self.quays.append(quay['id'])
[ "async", "def", "expand_all_quays", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "stops", ":", "return", "headers", "=", "{", "'ET-Client-Name'", ":", "self", ".", "_client_name", "}", "request", "=", "{", "'query'", ":", "GRAPHQL_STOP_TO...
Find all quays from stop places.
[ "Find", "all", "quays", "from", "stop", "places", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L715-L731
def setup_prjs_page(self, ): """Create and set the model on the projects page :returns: None :rtype: None :raises: None """ self.prjs_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents) log.debug("Loading projects for projects page.") rootdata = treemodel.ListItemData(['Name', 'Short', 'Path', 'Created', 'Semester', 'Status', 'Resolution', 'FPS', 'Scale']) rootitem = treemodel.TreeItem(rootdata) prjs = djadapter.projects.all() for prj in prjs: prjdata = djitemdata.ProjectItemData(prj) treemodel.TreeItem(prjdata, rootitem) self.prjs_model = treemodel.TreeModel(rootitem) self.prjs_tablev.setModel(self.prjs_model)
[ "def", "setup_prjs_page", "(", "self", ",", ")", ":", "self", ".", "prjs_tablev", ".", "horizontalHeader", "(", ")", ".", "setResizeMode", "(", "QtGui", ".", "QHeaderView", ".", "ResizeToContents", ")", "log", ".", "debug", "(", "\"Loading projects for projects ...
Create and set the model on the projects page :returns: None :rtype: None :raises: None
[ "Create", "and", "set", "the", "model", "on", "the", "projects", "page" ]
python
train
bioasp/caspo
travis-ci/upload.py
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/travis-ci/upload.py#L11-L23
def artifact_already_exists(cli, meta, owner): """ Checks to see whether the built recipe (aka distribution) already exists on the owner/user's binstar account. """ distro_name = '{}/{}.tar.bz2'.format(conda.config.subdir, meta.dist()) try: dist_info = cli.distribution(owner, meta.name(), meta.version(), distro_name) except binstar_client.errors.NotFound: dist_info = {} return bool(dist_info)
[ "def", "artifact_already_exists", "(", "cli", ",", "meta", ",", "owner", ")", ":", "distro_name", "=", "'{}/{}.tar.bz2'", ".", "format", "(", "conda", ".", "config", ".", "subdir", ",", "meta", ".", "dist", "(", ")", ")", "try", ":", "dist_info", "=", ...
Checks to see whether the built recipe (aka distribution) already exists on the owner/user's binstar account.
[ "Checks", "to", "see", "whether", "the", "built", "recipe", "(", "aka", "distribution", ")", "already", "exists", "on", "the", "owner", "/", "user", "s", "binstar", "account", "." ]
python
train
crypto101/merlyn
merlyn/exercise.py
https://github.com/crypto101/merlyn/blob/0f313210b9ea5385cc2e5b725dc766df9dc3284d/merlyn/exercise.py#L115-L124
def solveAndNotify(self, request): """Notifies the owner of the current request (so, the user doing the exercise) that they've solved the exercise, and mark it as solved in the database. """ remote = request.transport.remote withThisIdentifier = Exercise.identifier == self.exerciseIdentifier exercise = self.store.findUnique(Exercise, withThisIdentifier) solveAndNotify(remote, exercise)
[ "def", "solveAndNotify", "(", "self", ",", "request", ")", ":", "remote", "=", "request", ".", "transport", ".", "remote", "withThisIdentifier", "=", "Exercise", ".", "identifier", "==", "self", ".", "exerciseIdentifier", "exercise", "=", "self", ".", "store",...
Notifies the owner of the current request (so, the user doing the exercise) that they've solved the exercise, and mark it as solved in the database.
[ "Notifies", "the", "owner", "of", "the", "current", "request", "(", "so", "the", "user", "doing", "the", "exercise", ")", "that", "they", "ve", "solved", "the", "exercise", "and", "mark", "it", "as", "solved", "in", "the", "database", "." ]
python
train
inveniosoftware/invenio-access
invenio_access/ext.py
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/ext.py#L106-L112
def load_entry_point_actions(self, entry_point_group): """Load actions from an entry point group. :param entry_point_group: The entrypoint for extensions. """ for ep in pkg_resources.iter_entry_points(group=entry_point_group): self.register_action(ep.load())
[ "def", "load_entry_point_actions", "(", "self", ",", "entry_point_group", ")", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "group", "=", "entry_point_group", ")", ":", "self", ".", "register_action", "(", "ep", ".", "load", "(", ")"...
Load actions from an entry point group. :param entry_point_group: The entrypoint for extensions.
[ "Load", "actions", "from", "an", "entry", "point", "group", "." ]
python
train
trehn/termdown
termdown.py
https://github.com/trehn/termdown/blob/aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2/termdown.py#L70-L95
def draw_text(stdscr, text, color=0, fallback=None, title=None): """ Draws text in the given color. Duh. """ if fallback is None: fallback = text y, x = stdscr.getmaxyx() if title: title = pad_to_size(title, x, 1) if "\n" in title.rstrip("\n"): # hack to get more spacing between title and body for figlet title += "\n" * 5 text = title + "\n" + pad_to_size(text, x, len(text.split("\n"))) lines = pad_to_size(text, x, y).rstrip("\n").split("\n") try: for i, line in enumerate(lines): stdscr.insstr(i, 0, line, curses.color_pair(color)) except: lines = pad_to_size(fallback, x, y).rstrip("\n").split("\n") try: for i, line in enumerate(lines[:]): stdscr.insstr(i, 0, line, curses.color_pair(color)) except: pass stdscr.refresh()
[ "def", "draw_text", "(", "stdscr", ",", "text", ",", "color", "=", "0", ",", "fallback", "=", "None", ",", "title", "=", "None", ")", ":", "if", "fallback", "is", "None", ":", "fallback", "=", "text", "y", ",", "x", "=", "stdscr", ".", "getmaxyx", ...
Draws text in the given color. Duh.
[ "Draws", "text", "in", "the", "given", "color", ".", "Duh", "." ]
python
train
JdeRobot/base
src/drivers/drone/cmdvel.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/drone/cmdvel.py#L230-L241
def sendCMD (self, vel): ''' Sends CMDVel. @param vel: CMDVel to publish @type vel: CMDVel ''' self.lock.acquire() self.vel = vel self.lock.release()
[ "def", "sendCMD", "(", "self", ",", "vel", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "self", ".", "vel", "=", "vel", "self", ".", "lock", ".", "release", "(", ")" ]
Sends CMDVel. @param vel: CMDVel to publish @type vel: CMDVel
[ "Sends", "CMDVel", "." ]
python
train
BlueBrain/hpcbench
hpcbench/toolbox/slurm/cluster.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/slurm/cluster.py#L57-L66
def reservations(self): """get nodes of every reservations""" command = [SINFO, '--reservation'] output = subprocess.check_output(command, env=SINFO_ENV) output = output.decode() it = iter(output.splitlines()) next(it) for line in it: rsv = Reservation.from_sinfo(line) yield rsv.name, rsv
[ "def", "reservations", "(", "self", ")", ":", "command", "=", "[", "SINFO", ",", "'--reservation'", "]", "output", "=", "subprocess", ".", "check_output", "(", "command", ",", "env", "=", "SINFO_ENV", ")", "output", "=", "output", ".", "decode", "(", ")"...
get nodes of every reservations
[ "get", "nodes", "of", "every", "reservations" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_gremlin/__init__.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_gremlin/__init__.py#L13-L53
def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None): """Lower the IR into an IR form that can be represented in Gremlin queries. Args: ir_blocks: list of IR blocks to lower into Gremlin-compatible form query_metadata_table: QueryMetadataTable object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: list of IR blocks suitable for outputting as Gremlin """ sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table) ir_blocks = lower_context_field_existence(ir_blocks, query_metadata_table) ir_blocks = optimize_boolean_expression_comparisons(ir_blocks) if type_equivalence_hints: ir_blocks = lower_coerce_type_block_type_data(ir_blocks, type_equivalence_hints) ir_blocks = lower_coerce_type_blocks(ir_blocks) ir_blocks = rewrite_filters_in_optional_blocks(ir_blocks) ir_blocks = merge_consecutive_filter_clauses(ir_blocks) ir_blocks = lower_folded_outputs(ir_blocks) return ir_blocks
[ "def", "lower_ir", "(", "ir_blocks", ",", "query_metadata_table", ",", "type_equivalence_hints", "=", "None", ")", ":", "sanity_check_ir_blocks_from_frontend", "(", "ir_blocks", ",", "query_metadata_table", ")", "ir_blocks", "=", "lower_context_field_existence", "(", "ir_...
Lower the IR into an IR form that can be represented in Gremlin queries. Args: ir_blocks: list of IR blocks to lower into Gremlin-compatible form query_metadata_table: QueryMetadataTable object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: list of IR blocks suitable for outputting as Gremlin
[ "Lower", "the", "IR", "into", "an", "IR", "form", "that", "can", "be", "represented", "in", "Gremlin", "queries", "." ]
python
train
AirtestProject/Poco
poco/acceleration.py
https://github.com/AirtestProject/Poco/blob/2c559a586adf3fd11ee81cabc446d4d3f6f2d119/poco/acceleration.py#L18-L69
def dismiss(self, targets, exit_when=None, sleep_interval=0.5, appearance_timeout=20, timeout=120): """ Automatically dismiss the target objects Args: targets (:obj:`list`): list of poco objects to be dropped exit_when: termination condition, default is None which means to automatically exit when list of ``targets`` is empty sleep_interval: time interval between each actions for the given targets, default is 0.5s appearance_timeout: time interval to wait for given target to appear on the screen, automatically exit when timeout, default is 20s timeout: dismiss function timeout, default is 120s Raises: PocoTargetTimeout: when dismiss time interval timeout, under normal circumstances, this should not happen and if happens, it will be reported """ try: self.wait_for_any(targets, timeout=appearance_timeout) except PocoTargetTimeout: # here returns only when timeout # 仅当超时时自动退出 warnings.warn('Waiting timeout when trying to dismiss something before them appear. Targets are {}' .encode('utf-8').format(targets)) return start_time = time.time() while True: no_target = True for t in targets: if t.exists(): try: for n in t: try: n.click(sleep_interval=sleep_interval) no_target = False except: pass except: # Catch the NodeHasBeenRemoved exception if some node was removed over the above iteration # and just ignore as this will not affect the result. # 遍历(__iter__: for n in t)过程中如果节点正好被移除了,可能会报远程节点被移除的异常 # 这个报错忽略就行 pass time.sleep(sleep_interval) should_exit = exit_when() if exit_when else False if no_target or should_exit: return if time.time() - start_time > timeout: raise PocoTargetTimeout('dismiss', targets)
[ "def", "dismiss", "(", "self", ",", "targets", ",", "exit_when", "=", "None", ",", "sleep_interval", "=", "0.5", ",", "appearance_timeout", "=", "20", ",", "timeout", "=", "120", ")", ":", "try", ":", "self", ".", "wait_for_any", "(", "targets", ",", "...
Automatically dismiss the target objects Args: targets (:obj:`list`): list of poco objects to be dropped exit_when: termination condition, default is None which means to automatically exit when list of ``targets`` is empty sleep_interval: time interval between each actions for the given targets, default is 0.5s appearance_timeout: time interval to wait for given target to appear on the screen, automatically exit when timeout, default is 20s timeout: dismiss function timeout, default is 120s Raises: PocoTargetTimeout: when dismiss time interval timeout, under normal circumstances, this should not happen and if happens, it will be reported
[ "Automatically", "dismiss", "the", "target", "objects" ]
python
train
gitpython-developers/GitPython
git/db.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/db.py#L40-L43
def stream(self, sha): """For now, all lookup is done by git itself""" hexsha, typename, size, stream = self._git.stream_object_data(bin_to_hex(sha)) return OStream(hex_to_bin(hexsha), typename, size, stream)
[ "def", "stream", "(", "self", ",", "sha", ")", ":", "hexsha", ",", "typename", ",", "size", ",", "stream", "=", "self", ".", "_git", ".", "stream_object_data", "(", "bin_to_hex", "(", "sha", ")", ")", "return", "OStream", "(", "hex_to_bin", "(", "hexsh...
For now, all lookup is done by git itself
[ "For", "now", "all", "lookup", "is", "done", "by", "git", "itself" ]
python
train
brbsix/pip-utils
pip_utils/outdated.py
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L76-L86
def _build_package_finder(options, index_urls, session): """ Create a package finder appropriate to this list command. """ return PackageFinder( find_links=options.get('find_links'), index_urls=index_urls, allow_all_prereleases=options.get('pre'), trusted_hosts=options.get('trusted_hosts'), session=session, )
[ "def", "_build_package_finder", "(", "options", ",", "index_urls", ",", "session", ")", ":", "return", "PackageFinder", "(", "find_links", "=", "options", ".", "get", "(", "'find_links'", ")", ",", "index_urls", "=", "index_urls", ",", "allow_all_prereleases", "...
Create a package finder appropriate to this list command.
[ "Create", "a", "package", "finder", "appropriate", "to", "this", "list", "command", "." ]
python
train
saltstack/salt
salt/modules/github.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/github.py#L1277-L1309
def remove_team(name, profile="github"): ''' Remove a github team. name The name of the team to be removed. profile The name of the profile configuration to use. Defaults to ``github``. CLI Example: .. code-block:: bash salt myminion github.remove_team 'team_name' .. versionadded:: 2016.11.0 ''' team_info = get_team(name, profile=profile) if not team_info: log.error('Team %s to be removed does not exist.', name) return False try: client = _get_client(profile) organization = client.get_organization( _get_config_value(profile, 'org_name') ) team = organization.get_team(team_info['id']) team.delete() return list_teams(ignore_cache=True, profile=profile).get(name) is None except github.GithubException: log.exception('Error deleting a team') return False
[ "def", "remove_team", "(", "name", ",", "profile", "=", "\"github\"", ")", ":", "team_info", "=", "get_team", "(", "name", ",", "profile", "=", "profile", ")", "if", "not", "team_info", ":", "log", ".", "error", "(", "'Team %s to be removed does not exist.'", ...
Remove a github team. name The name of the team to be removed. profile The name of the profile configuration to use. Defaults to ``github``. CLI Example: .. code-block:: bash salt myminion github.remove_team 'team_name' .. versionadded:: 2016.11.0
[ "Remove", "a", "github", "team", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/journal/publisher.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/journal/publisher.py#L242-L253
def pending_batch_info(self): """Returns a tuple of the current size of the pending batch queue and the current queue limit. """ c_length = ctypes.c_int(0) c_limit = ctypes.c_int(0) self._call( 'pending_batch_info', ctypes.byref(c_length), ctypes.byref(c_limit)) return (c_length.value, c_limit.value)
[ "def", "pending_batch_info", "(", "self", ")", ":", "c_length", "=", "ctypes", ".", "c_int", "(", "0", ")", "c_limit", "=", "ctypes", ".", "c_int", "(", "0", ")", "self", ".", "_call", "(", "'pending_batch_info'", ",", "ctypes", ".", "byref", "(", "c_l...
Returns a tuple of the current size of the pending batch queue and the current queue limit.
[ "Returns", "a", "tuple", "of", "the", "current", "size", "of", "the", "pending", "batch", "queue", "and", "the", "current", "queue", "limit", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L3411-L3428
def get_item_ids_by_bank(self, bank_id): """Gets the list of ``Item`` ``Ids`` associated with a ``Bank``. arg: bank_id (osid.id.Id): ``Id`` of the ``Bank`` return: (osid.id.IdList) - list of related item ``Ids`` raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resource_ids_by_bin id_list = [] for item in self.get_items_by_bank(bank_id): id_list.append(item.get_id()) return IdList(id_list)
[ "def", "get_item_ids_by_bank", "(", "self", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_resource_ids_by_bin", "id_list", "=", "[", "]", "for", "item", "in", "self", ".", "get_items_by_bank", "(", "bank_id", ")", "...
Gets the list of ``Item`` ``Ids`` associated with a ``Bank``. arg: bank_id (osid.id.Id): ``Id`` of the ``Bank`` return: (osid.id.IdList) - list of related item ``Ids`` raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "Item", "Ids", "associated", "with", "a", "Bank", "." ]
python
train
fboender/ansible-cmdb
lib/mako/util.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/util.py#L255-L263
def sorted_dict_repr(d): """repr() a dictionary with the keys in order. Used by the lexer unit test to compare parse trees based on strings. """ keys = list(d.keys()) keys.sort() return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
[ "def", "sorted_dict_repr", "(", "d", ")", ":", "keys", "=", "list", "(", "d", ".", "keys", "(", ")", ")", "keys", ".", "sort", "(", ")", "return", "\"{\"", "+", "\", \"", ".", "join", "(", "[", "\"%r: %r\"", "%", "(", "k", ",", "d", "[", "k", ...
repr() a dictionary with the keys in order. Used by the lexer unit test to compare parse trees based on strings.
[ "repr", "()", "a", "dictionary", "with", "the", "keys", "in", "order", "." ]
python
train
google/gin-config
gin/config_parser.py
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L297-L354
def _parse_selector(self, scoped=True, allow_periods_in_scope=False): """Parse a (possibly scoped) selector. A selector is a sequence of one or more valid Python-style identifiers separated by periods (see also `SelectorMap`). A scoped selector is a selector that may be preceded by scope names (separated by slashes). Args: scoped: Whether scopes are allowed. allow_periods_in_scope: Whether to allow period characters in the scope names preceding the selector. Returns: The parsed selector (as a string). Raises: SyntaxError: If the scope or selector is malformatted. """ if self._current_token.kind != tokenize.NAME: self._raise_syntax_error('Unexpected token.') begin_line_num = self._current_token.begin[0] begin_char_num = self._current_token.begin[1] end_char_num = self._current_token.end[1] line = self._current_token.line selector_parts = [] # This accepts an alternating sequence of NAME and '/' or '.' tokens. step_parity = 0 while (step_parity == 0 and self._current_token.kind == tokenize.NAME or step_parity == 1 and self._current_token.value in ('/', '.')): selector_parts.append(self._current_token.value) step_parity = not step_parity end_char_num = self._current_token.end[1] self._advance_one_token() self._skip_whitespace_and_comments() # Due to tokenization, most whitespace has been stripped already. To prevent # whitespace inside the scoped selector, we verify that it matches an # untokenized version of the selector obtained from the first through last # character positions of the consumed tokens in the line being parsed. scoped_selector = ''.join(selector_parts) untokenized_scoped_selector = line[begin_char_num:end_char_num] # Also check that it's properly formatted (e.g., no consecutive slashes). scope_re = IDENTIFIER_RE if allow_periods_in_scope: scope_re = MODULE_RE selector_re = MODULE_RE scope_parts = scoped_selector.split('/') valid_format = all(scope_re.match(scope) for scope in scope_parts[:-1]) valid_format &= bool(selector_re.match(scope_parts[-1])) valid_format &= bool(scoped or len(scope_parts) == 1) if untokenized_scoped_selector != scoped_selector or not valid_format: location = (self._filename, begin_line_num, begin_char_num + 1, line) self._raise_syntax_error('Malformatted scope or selector.', location) return scoped_selector
[ "def", "_parse_selector", "(", "self", ",", "scoped", "=", "True", ",", "allow_periods_in_scope", "=", "False", ")", ":", "if", "self", ".", "_current_token", ".", "kind", "!=", "tokenize", ".", "NAME", ":", "self", ".", "_raise_syntax_error", "(", "'Unexpec...
Parse a (possibly scoped) selector. A selector is a sequence of one or more valid Python-style identifiers separated by periods (see also `SelectorMap`). A scoped selector is a selector that may be preceded by scope names (separated by slashes). Args: scoped: Whether scopes are allowed. allow_periods_in_scope: Whether to allow period characters in the scope names preceding the selector. Returns: The parsed selector (as a string). Raises: SyntaxError: If the scope or selector is malformatted.
[ "Parse", "a", "(", "possibly", "scoped", ")", "selector", "." ]
python
test
spacetelescope/pysynphot
pysynphot/refs.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/refs.py#L135-L216
def setref(graphtable=None, comptable=None, thermtable=None, area=None, waveset=None): """Set default graph and component tables, primary area, and wavelength set. This is similar to setting ``refdata`` in IRAF STSDAS SYNPHOT. If all parameters set to `None`, they are reverted to software default. If any of the parameters are not `None`, they are set to desired values while the rest (if any) remain at current setting. Parameters ---------- graphtable, comptable, thermtable : str or `None` Graph, component, and thermal table names, respectively, for `~pysynphot.observationmode` throughput look-up. Do not use "*" wildcard. area : float or `None` Telescope collecting area, i.e., the primary mirror, in :math:`\\textnormal{cm}^{2}`. waveset : tuple or `None` Parameters for :func:`set_default_waveset` as follow: * ``(minwave, maxwave, num)`` - This assumes log scale. * ``(minwave, maxwave, num, 'log')`` * ``(minwave, maxwave, num, 'linear')`` Raises ------ ValueError Invalid ``waveset`` parameters. """ global GRAPHTABLE, COMPTABLE, THERMTABLE, PRIMARY_AREA, GRAPHDICT, COMPDICT, THERMDICT GRAPHDICT = {} COMPDICT = {} THERMDICT = {} #Check for all None, which means reset kwds=set([graphtable,comptable,thermtable,area,waveset]) if kwds == set([None]): #then we should reset everything. _set_default_refdata() return #Otherwise, check them all separately if graphtable is not None: GRAPHTABLE = irafconvert(graphtable) if comptable is not None: COMPTABLE = irafconvert(comptable) if thermtable is not None: THERMTABLE = irafconvert(thermtable) #Area is a bit different: if area is not None: PRIMARY_AREA = area if waveset is not None: if len(waveset) not in (3, 4): raise ValueError('waveset tuple must contain 3 or 4 values') minwave = waveset[0] maxwave = waveset[1] num = waveset[2] if len(waveset) == 3: log = True elif len(waveset) == 4: if waveset[3].lower() == 'log': log = True elif waveset[3].lower() == 'linear': log = False else: raise ValueError('fourth waveset option must be "log" or "linear"') set_default_waveset(minwave,maxwave,num,log=log) #That's it. return
[ "def", "setref", "(", "graphtable", "=", "None", ",", "comptable", "=", "None", ",", "thermtable", "=", "None", ",", "area", "=", "None", ",", "waveset", "=", "None", ")", ":", "global", "GRAPHTABLE", ",", "COMPTABLE", ",", "THERMTABLE", ",", "PRIMARY_AR...
Set default graph and component tables, primary area, and wavelength set. This is similar to setting ``refdata`` in IRAF STSDAS SYNPHOT. If all parameters set to `None`, they are reverted to software default. If any of the parameters are not `None`, they are set to desired values while the rest (if any) remain at current setting. Parameters ---------- graphtable, comptable, thermtable : str or `None` Graph, component, and thermal table names, respectively, for `~pysynphot.observationmode` throughput look-up. Do not use "*" wildcard. area : float or `None` Telescope collecting area, i.e., the primary mirror, in :math:`\\textnormal{cm}^{2}`. waveset : tuple or `None` Parameters for :func:`set_default_waveset` as follow: * ``(minwave, maxwave, num)`` - This assumes log scale. * ``(minwave, maxwave, num, 'log')`` * ``(minwave, maxwave, num, 'linear')`` Raises ------ ValueError Invalid ``waveset`` parameters.
[ "Set", "default", "graph", "and", "component", "tables", "primary", "area", "and", "wavelength", "set", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sortedlist.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L244-L296
def _delete(self, pos, idx): """Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc. """ _maxes, _lists, _index = self._maxes, self._lists, self._index lists_pos = _lists[pos] del lists_pos[idx] self._len -= 1 len_lists_pos = len(lists_pos) if len_lists_pos > self._half: _maxes[pos] = lists_pos[-1] if _index: child = self._offset + pos while child > 0: _index[child] -= 1 child = (child - 1) >> 1 _index[0] -= 1 elif len(_lists) > 1: if not pos: pos += 1 prev = pos - 1 _lists[prev].extend(_lists[pos]) _maxes[prev] = _lists[prev][-1] del _maxes[pos] del _lists[pos] del _index[:] self._expand(prev) elif len_lists_pos: _maxes[pos] = lists_pos[-1] else: del _maxes[pos] del _lists[pos] del _index[:]
[ "def", "_delete", "(", "self", ",", "pos", ",", "idx", ")", ":", "_maxes", ",", "_lists", ",", "_index", "=", "self", ".", "_maxes", ",", "self", ".", "_lists", ",", "self", ".", "_index", "lists_pos", "=", "_lists", "[", "pos", "]", "del", "lists_...
Delete the item at the given (pos, idx). Combines lists that are less than half the load level. Updates the index when the sublist length is more than half the load level. This requires decrementing the nodes in a traversal from the leaf node to the root. For an example traversal see self._loc.
[ "Delete", "the", "item", "at", "the", "given", "(", "pos", "idx", ")", "." ]
python
train
splunk/splunk-sdk-python
examples/genevents.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/genevents.py#L58-L106
def feed_index(service, opts): """Feed the named index in a specific manner.""" indexname = opts.args[0] itype = opts.kwargs['ingest'] # get index handle try: index = service.indexes[indexname] except KeyError: print("Index %s not found" % indexname) return if itype in ["stream", "submit"]: stream = index.attach() else: # create a tcp input if one doesn't exist input_host = opts.kwargs.get("inputhost", SPLUNK_HOST) input_port = int(opts.kwargs.get("inputport", SPLUNK_PORT)) input_name = "tcp:%s" % (input_port) if input_name not in service.inputs.list(): service.inputs.create("tcp", input_port, index=indexname) # connect to socket ingest = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ingest.connect((input_host, input_port)) count = 0 lastevent = "" try: for i in range(0, 10): for j in range(0, 5000): lastevent = "%s: event bunch %d, number %d\n" % \ (datetime.datetime.now().isoformat(), i, j) if itype == "stream": stream.write(lastevent + "\n") elif itype == "submit": index.submit(lastevent + "\n") else: ingest.send(lastevent + "\n") count = count + 1 print("submitted %d events, sleeping 1 second" % count) time.sleep(1) except KeyboardInterrupt: print("^C detected, last event written:") print(lastevent)
[ "def", "feed_index", "(", "service", ",", "opts", ")", ":", "indexname", "=", "opts", ".", "args", "[", "0", "]", "itype", "=", "opts", ".", "kwargs", "[", "'ingest'", "]", "# get index handle", "try", ":", "index", "=", "service", ".", "indexes", "[",...
Feed the named index in a specific manner.
[ "Feed", "the", "named", "index", "in", "a", "specific", "manner", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/transformer/dataset.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L493-L601
def _pack_with_tf_ops(dataset, keys, length): """Helper-function for packing a dataset which has already been batched. See pack_dataset() Uses tf.while_loop. Slow. Args: dataset: a dataset containing padded batches of examples. keys: a list of strings length: an integer Returns: a dataset. """ empty_example = {} for k in keys: empty_example[k] = tf.zeros([0], dtype=tf.int32) empty_example[k + "_position"] = tf.zeros([0], dtype=tf.int32) keys_etc = empty_example.keys() def write_packed_example(partial, outputs): new_partial = empty_example.copy() new_outputs = {} for k in keys_etc: new_outputs[k] = outputs[k].write( outputs[k].size(), tf.pad(partial[k], [[0, length - tf.size(partial[k])]])) return new_partial, new_outputs def map_fn(x): """Internal function to flat_map over. Consumes a batch of input examples and produces a variable number of output examples. Args: x: a single example Returns: a tf.data.Dataset """ partial = empty_example.copy() i = tf.zeros([], dtype=tf.int32) dynamic_batch_size = tf.shape(x[keys[0]])[0] outputs = {} for k in keys: outputs[k] = tf.TensorArray( tf.int32, size=0, dynamic_size=True, element_shape=[length]) outputs[k + "_position"] = tf.TensorArray( tf.int32, size=0, dynamic_size=True, element_shape=[length]) def cond_fn(i, partial, outputs): del partial, outputs return i < dynamic_batch_size def body_fn(i, partial, outputs): """Body function for while_loop. Args: i: integer scalar partial: dictionary of Tensor (partially-constructed example) outputs: dictionary of TensorArray Returns: A triple containing the new values of the inputs. """ can_append = True one_example = {} for k in keys: val = tf.cast(x[k][i], tf.int32) val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))] one_example[k] = val for k in keys: can_append = tf.logical_and( can_append, tf.less_equal( tf.size(partial[k]) + tf.size(one_example[k]), length)) def false_fn(): return write_packed_example(partial, outputs) def true_fn(): return partial, outputs partial, outputs = tf.cond(can_append, true_fn, false_fn) new_partial = {} for k in keys: new_seq = one_example[k][:length] new_seq_len = tf.size(new_seq) new_partial[k] = tf.concat([partial[k], new_seq], 0) new_partial[k + "_position"] = tf.concat( [partial[k + "_position"], tf.range(new_seq_len, dtype=tf.int32)], 0) partial = new_partial return i+1, partial, outputs i, partial, outputs = tf.while_loop( cond_fn, body_fn, (i, partial, outputs), back_prop=False, shape_invariants=( tf.TensorShape([]), {k: tf.TensorShape([None]) for k in keys_etc}, {k: tf.TensorShape(None) for k in keys_etc}, )) partial, outputs = write_packed_example(partial, outputs) packed = {k: outputs[k].stack() for k in keys_etc} for k in keys: packed[k + "_segmentation"] = ( tf.cumsum( tf.cast(tf.equal(packed[k + "_position"], 0), tf.int32), axis=1) * tf.cast(tf.not_equal(packed[k], 0), tf.int32)) return packed dataset = dataset.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset.flat_map(tf.data.Dataset.from_tensor_slices)
[ "def", "_pack_with_tf_ops", "(", "dataset", ",", "keys", ",", "length", ")", ":", "empty_example", "=", "{", "}", "for", "k", "in", "keys", ":", "empty_example", "[", "k", "]", "=", "tf", ".", "zeros", "(", "[", "0", "]", ",", "dtype", "=", "tf", ...
Helper-function for packing a dataset which has already been batched. See pack_dataset() Uses tf.while_loop. Slow. Args: dataset: a dataset containing padded batches of examples. keys: a list of strings length: an integer Returns: a dataset.
[ "Helper", "-", "function", "for", "packing", "a", "dataset", "which", "has", "already", "been", "batched", "." ]
python
train
python-openxml/python-docx
docx/text/run.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/text/run.py#L28-L47
def add_break(self, break_type=WD_BREAK.LINE): """ Add a break element of *break_type* to this run. *break_type* can take the values `WD_BREAK.LINE`, `WD_BREAK.PAGE`, and `WD_BREAK.COLUMN` where `WD_BREAK` is imported from `docx.enum.text`. *break_type* defaults to `WD_BREAK.LINE`. """ type_, clear = { WD_BREAK.LINE: (None, None), WD_BREAK.PAGE: ('page', None), WD_BREAK.COLUMN: ('column', None), WD_BREAK.LINE_CLEAR_LEFT: ('textWrapping', 'left'), WD_BREAK.LINE_CLEAR_RIGHT: ('textWrapping', 'right'), WD_BREAK.LINE_CLEAR_ALL: ('textWrapping', 'all'), }[break_type] br = self._r.add_br() if type_ is not None: br.type = type_ if clear is not None: br.clear = clear
[ "def", "add_break", "(", "self", ",", "break_type", "=", "WD_BREAK", ".", "LINE", ")", ":", "type_", ",", "clear", "=", "{", "WD_BREAK", ".", "LINE", ":", "(", "None", ",", "None", ")", ",", "WD_BREAK", ".", "PAGE", ":", "(", "'page'", ",", "None",...
Add a break element of *break_type* to this run. *break_type* can take the values `WD_BREAK.LINE`, `WD_BREAK.PAGE`, and `WD_BREAK.COLUMN` where `WD_BREAK` is imported from `docx.enum.text`. *break_type* defaults to `WD_BREAK.LINE`.
[ "Add", "a", "break", "element", "of", "*", "break_type", "*", "to", "this", "run", ".", "*", "break_type", "*", "can", "take", "the", "values", "WD_BREAK", ".", "LINE", "WD_BREAK", ".", "PAGE", "and", "WD_BREAK", ".", "COLUMN", "where", "WD_BREAK", "is",...
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_antenna.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_antenna.py#L20-L28
def cmd_antenna(self, args): '''set gcs location''' if len(args) != 2: if self.gcs_location is None: print("GCS location not set") else: print("GCS location %s" % str(self.gcs_location)) return self.gcs_location = (float(args[0]), float(args[1]))
[ "def", "cmd_antenna", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "!=", "2", ":", "if", "self", ".", "gcs_location", "is", "None", ":", "print", "(", "\"GCS location not set\"", ")", "else", ":", "print", "(", "\"GCS location %s\""...
set gcs location
[ "set", "gcs", "location" ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L722-L730
def description(self): """Obtain the description associated with the element. Raises: :class:`NoSuchAnnotation` if there is no associated description.""" for e in self: if isinstance(e, Description): return e.value raise NoSuchAnnotation
[ "def", "description", "(", "self", ")", ":", "for", "e", "in", "self", ":", "if", "isinstance", "(", "e", ",", "Description", ")", ":", "return", "e", ".", "value", "raise", "NoSuchAnnotation" ]
Obtain the description associated with the element. Raises: :class:`NoSuchAnnotation` if there is no associated description.
[ "Obtain", "the", "description", "associated", "with", "the", "element", "." ]
python
train
LionelAuroux/pyrser
pyrser/type_system/scope.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/scope.py#L191-L199
def intersection_update(self, oset: Scope) -> Scope: """ Update Set with common values of another Set """ keys = list(self._hsig.keys()) for k in keys: if k not in oset: del self._hsig[k] else: self._hsig[k] = oset.get(k) return self
[ "def", "intersection_update", "(", "self", ",", "oset", ":", "Scope", ")", "->", "Scope", ":", "keys", "=", "list", "(", "self", ".", "_hsig", ".", "keys", "(", ")", ")", "for", "k", "in", "keys", ":", "if", "k", "not", "in", "oset", ":", "del", ...
Update Set with common values of another Set
[ "Update", "Set", "with", "common", "values", "of", "another", "Set" ]
python
test
materialsproject/pymatgen
pymatgen/phonon/bandstructure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/phonon/bandstructure.py#L427-L505
def as_phononwebsite(self): """ Return a dictionary with the phononwebsite format: http://henriquemiranda.github.io/phononwebsite """ d = {} #define the lattice d["lattice"] = self.structure.lattice._matrix.tolist() #define atoms atom_pos_car = [] atom_pos_red = [] atom_types = [] for site in self.structure.sites: atom_pos_car.append(site.coords.tolist()) atom_pos_red.append(site.frac_coords.tolist()) atom_types.append(site.species_string) #default for now d["repetitions"] = get_reasonable_repetitions(len(atom_pos_car)) d["natoms"] = len(atom_pos_car) d["atom_pos_car"] = atom_pos_car d["atom_pos_red"] = atom_pos_red d["atom_types"] = atom_types d["atom_numbers"] = self.structure.atomic_numbers d["formula"] = self.structure.formula d["name"] = self.structure.formula #get qpoints qpoints = [] for q in self.qpoints: qpoints.append(list(q.frac_coords)) d["qpoints"] = qpoints # get labels hsq_dict = collections.OrderedDict() for nq,q in enumerate(self.qpoints): if q.label is not None: hsq_dict[nq] = q.label #get distances dist = 0 nqstart = 0 distances = [dist] line_breaks = [] for nq in range(1,len(qpoints)): q1 = np.array(qpoints[nq]) q2 = np.array(qpoints[nq-1]) #detect jumps if ((nq in hsq_dict) and (nq-1 in hsq_dict)): if (hsq_dict[nq] != hsq_dict[nq-1]): hsq_dict[nq-1] += "|"+hsq_dict[nq] del hsq_dict[nq] line_breaks.append((nqstart,nq)) nqstart = nq else: dist += np.linalg.norm(q1-q2) distances.append(dist) line_breaks.append((nqstart,len(qpoints))) d["distances"] = distances d["line_breaks"] = line_breaks d["highsym_qpts"] = list(hsq_dict.items()) #eigenvalues thz2cm1 = 33.35641 bands = self.bands.copy()*thz2cm1 d["eigenvalues"] = bands.T.tolist() #eigenvectors eigenvectors = self.eigendisplacements.copy() eigenvectors /= np.linalg.norm(eigenvectors[0,0]) eigenvectors = eigenvectors.swapaxes(0,1) eigenvectors = np.array([eigenvectors.real, eigenvectors.imag]) eigenvectors = np.rollaxis(eigenvectors,0,5) d["vectors"] = eigenvectors.tolist() return d
[ "def", "as_phononwebsite", "(", "self", ")", ":", "d", "=", "{", "}", "#define the lattice", "d", "[", "\"lattice\"", "]", "=", "self", ".", "structure", ".", "lattice", ".", "_matrix", ".", "tolist", "(", ")", "#define atoms", "atom_pos_car", "=", "[", ...
Return a dictionary with the phononwebsite format: http://henriquemiranda.github.io/phononwebsite
[ "Return", "a", "dictionary", "with", "the", "phononwebsite", "format", ":", "http", ":", "//", "henriquemiranda", ".", "github", ".", "io", "/", "phononwebsite" ]
python
train
log2timeline/plaso
plaso/output/xlsx.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/xlsx.py#L61-L87
def _FormatDateTime(self, event): """Formats the date to a datetime object without timezone information. Note: timezone information must be removed due to lack of support by xlsxwriter and Excel. Args: event (EventObject): event. Returns: datetime.datetime|str: date and time value or a string containing "ERROR" on OverflowError. """ try: datetime_object = datetime.datetime( 1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC) datetime_object += datetime.timedelta(microseconds=event.timestamp) datetime_object.astimezone(self._output_mediator.timezone) return datetime_object.replace(tzinfo=None) except (OverflowError, ValueError) as exception: self._ReportEventError(event, ( 'unable to copy timestamp: {0!s} to a human readable date and time ' 'with error: {1!s}. Defaulting to: "ERROR"').format( event.timestamp, exception)) return 'ERROR'
[ "def", "_FormatDateTime", "(", "self", ",", "event", ")", ":", "try", ":", "datetime_object", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ",", "tzinfo", "=", "pytz", ".", "UTC", ")", ...
Formats the date to a datetime object without timezone information. Note: timezone information must be removed due to lack of support by xlsxwriter and Excel. Args: event (EventObject): event. Returns: datetime.datetime|str: date and time value or a string containing "ERROR" on OverflowError.
[ "Formats", "the", "date", "to", "a", "datetime", "object", "without", "timezone", "information", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1485-L1494
def _proc_gnusparse_00(self, next, pax_headers, buf): """Process a GNU tar extended sparse header, version 0.0. """ offsets = [] for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): offsets.append(int(match.group(1))) numbytes = [] for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): numbytes.append(int(match.group(1))) next.sparse = list(zip(offsets, numbytes))
[ "def", "_proc_gnusparse_00", "(", "self", ",", "next", ",", "pax_headers", ",", "buf", ")", ":", "offsets", "=", "[", "]", "for", "match", "in", "re", ".", "finditer", "(", "br\"\\d+ GNU.sparse.offset=(\\d+)\\n\"", ",", "buf", ")", ":", "offsets", ".", "ap...
Process a GNU tar extended sparse header, version 0.0.
[ "Process", "a", "GNU", "tar", "extended", "sparse", "header", "version", "0", ".", "0", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/bagit.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/bagit.py#L78-L103
def create_bagit_stream(dir_name, payload_info_list): """Create a stream containing a BagIt zip archive. Args: dir_name : str The name of the root directory in the zip file, under which all the files are placed (avoids "zip bombs"). payload_info_list: list List of payload_info_dict, each dict describing a file. - keys: pid, filename, iter, checksum, checksum_algorithm - If the filename is None, the pid is used for the filename. """ zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) _add_path(dir_name, payload_info_list) payload_byte_count, payload_file_count = _add_payload_files( zip_file, payload_info_list ) tag_info_list = _add_tag_files( zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count ) _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list) _add_tag_manifest_file(zip_file, dir_name, tag_info_list) return zip_file
[ "def", "create_bagit_stream", "(", "dir_name", ",", "payload_info_list", ")", ":", "zip_file", "=", "zipstream", ".", "ZipFile", "(", "mode", "=", "'w'", ",", "compression", "=", "zipstream", ".", "ZIP_DEFLATED", ")", "_add_path", "(", "dir_name", ",", "payloa...
Create a stream containing a BagIt zip archive. Args: dir_name : str The name of the root directory in the zip file, under which all the files are placed (avoids "zip bombs"). payload_info_list: list List of payload_info_dict, each dict describing a file. - keys: pid, filename, iter, checksum, checksum_algorithm - If the filename is None, the pid is used for the filename.
[ "Create", "a", "stream", "containing", "a", "BagIt", "zip", "archive", "." ]
python
train
jrief/djangocms-cascade
cmsplugin_cascade/mixins.py
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/mixins.py#L27-L40
def get_css_classes(cls, instance): """ Returns a list of CSS classes to be added as class="..." to the current HTML tag. """ css_classes = [] if hasattr(cls, 'default_css_class'): css_classes.append(cls.default_css_class) for attr in getattr(cls, 'default_css_attributes', []): css_class = instance.glossary.get(attr) if isinstance(css_class, six.string_types): css_classes.append(css_class) elif isinstance(css_class, list): css_classes.extend(css_class) return css_classes
[ "def", "get_css_classes", "(", "cls", ",", "instance", ")", ":", "css_classes", "=", "[", "]", "if", "hasattr", "(", "cls", ",", "'default_css_class'", ")", ":", "css_classes", ".", "append", "(", "cls", ".", "default_css_class", ")", "for", "attr", "in", ...
Returns a list of CSS classes to be added as class="..." to the current HTML tag.
[ "Returns", "a", "list", "of", "CSS", "classes", "to", "be", "added", "as", "class", "=", "...", "to", "the", "current", "HTML", "tag", "." ]
python
train
lowandrew/OLCTools
spadespipeline/GeneSeekr.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/GeneSeekr.py#L51-L118
def filterunique(self): """ Filters multiple BLAST hits in a common region of the genome. Leaves only the best hit """ for sample in self.metadata: # Initialise variables sample[self.analysistype].blastresults = list() resultdict = dict() rowdict = dict() try: # Iterate through all the contigs, which had BLAST hits for contig in sample[self.analysistype].queryranges: # Find all the locations in each contig that correspond to the BLAST hits for location in sample[self.analysistype].queryranges[contig]: # Extract the BLAST result dictionary for the contig for row in sample[self.analysistype].results[contig]: # Initialise variable to reduce the number of times row['value'] needs to be typed contig = row['query_id'] high = row['high'] low = row['low'] percentidentity = row['percentidentity'] # Join the two ranges in the location list with a comma locstr = ','.join([str(x) for x in location]) # Create a set of the location of all the base pairs between the low and high (-1) e.g. # [6, 10] would give 6, 7, 8, 9, but NOT 10. This turns out to be useful, as there are # genes located back-to-back in the genome e.g. strB and strA, with locations of 2557,3393 # and 3393,4196, respectively. By not including 3393 in the strB calculations, I don't # have to worry about this single bp overlap loc = set(range(low, high)) # Use a set intersection to determine whether the current result overlaps with location # This will allow all the hits to be grouped together based on their location if loc.intersection(set(range(location[0], location[1]))): # Populate the grouped hits for each location try: resultdict[contig][locstr].append(percentidentity) rowdict[contig][locstr].append(row) # Initialise and populate the lists of the nested dictionary except KeyError: try: resultdict[contig][locstr] = list() resultdict[contig][locstr].append(percentidentity) rowdict[contig][locstr] = list() rowdict[contig][locstr].append(row) # As this is a nested dictionary, it needs to be initialised here except KeyError: resultdict[contig] = dict() resultdict[contig][locstr] = list() resultdict[contig][locstr].append(percentidentity) rowdict[contig] = dict() rowdict[contig][locstr] = list() rowdict[contig][locstr].append(row) except KeyError: pass # Find the best hit for each location based on percent identity for contig in resultdict: # Do not allow the same gene to be added to the dictionary more than once genes = list() for location in resultdict[contig]: # Initialise a variable to determine whether there is already a best hit found for the location multiple = False # Iterate through the BLAST results to find the best hit for row in rowdict[contig][location]: # Add the best hit to the .blastresults attribute of the object if row['percentidentity'] == max(resultdict[contig][location]) and not multiple \ and row['subject_id'] not in genes: sample[self.analysistype].blastresults.append(row) genes.append(row['subject_id']) multiple = True
[ "def", "filterunique", "(", "self", ")", ":", "for", "sample", "in", "self", ".", "metadata", ":", "# Initialise variables", "sample", "[", "self", ".", "analysistype", "]", ".", "blastresults", "=", "list", "(", ")", "resultdict", "=", "dict", "(", ")", ...
Filters multiple BLAST hits in a common region of the genome. Leaves only the best hit
[ "Filters", "multiple", "BLAST", "hits", "in", "a", "common", "region", "of", "the", "genome", ".", "Leaves", "only", "the", "best", "hit" ]
python
train
1flow/python-ftr
ftr/extractor.py
https://github.com/1flow/python-ftr/blob/90a2108c5ee005f1bf66dbe8cce68f2b7051b839/ftr/extractor.py#L134-L165
def _tidy(self, html, smart_tidy): """ Tidy HTML if we have a tidy method. This fixes problems with some sites which would otherwise trouble DOMDocument's HTML parsing. Although sometimes it makes the problem worse, which is why we can override it in site config files. """ if self.config.tidy and tidylib and smart_tidy: try: document, errors = tidylib.tidy_document(html, self.tidy_config) except UnicodeDecodeError: # For some reason, pytidylib fails to decode, whereas the # original html content converts perfectly manually. document, errors = tidylib.tidy_document(html.encode('utf-8'), self.tidy_config) document = document.decode('utf-8') # if errors: # LOGGER.debug(u'Ignored errors returned by tidylib: %s', # errors) self.tidied = True self.html = document LOGGER.info(u'Tidied document.') else: self.html = html
[ "def", "_tidy", "(", "self", ",", "html", ",", "smart_tidy", ")", ":", "if", "self", ".", "config", ".", "tidy", "and", "tidylib", "and", "smart_tidy", ":", "try", ":", "document", ",", "errors", "=", "tidylib", ".", "tidy_document", "(", "html", ",", ...
Tidy HTML if we have a tidy method. This fixes problems with some sites which would otherwise trouble DOMDocument's HTML parsing. Although sometimes it makes the problem worse, which is why we can override it in site config files.
[ "Tidy", "HTML", "if", "we", "have", "a", "tidy", "method", "." ]
python
train
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/transformations.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/transformations.py#L244-L254
def _stinespring_to_choi(data, input_dim, output_dim): """Transform Stinespring representation to Choi representation.""" trace_dim = data[0].shape[0] // output_dim stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim)) if data[1] is None: stine_r = stine_l else: stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim)) return np.reshape( np.einsum('iAj,kAl->jilk', stine_l, stine_r.conj()), 2 * [input_dim * output_dim])
[ "def", "_stinespring_to_choi", "(", "data", ",", "input_dim", ",", "output_dim", ")", ":", "trace_dim", "=", "data", "[", "0", "]", ".", "shape", "[", "0", "]", "//", "output_dim", "stine_l", "=", "np", ".", "reshape", "(", "data", "[", "0", "]", ","...
Transform Stinespring representation to Choi representation.
[ "Transform", "Stinespring", "representation", "to", "Choi", "representation", "." ]
python
test
hyperledger/indy-plenum
plenum/server/node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L1727-L1739
async def serviceViewChangerInbox(self, limit: int = None) -> int: """ Service at most `limit` number of messages from the view_changer's outBox. :return: the number of messages successfully serviced. """ msgCount = 0 while self.msgsToViewChanger and (not limit or msgCount < limit): msgCount += 1 msg = self.msgsToViewChanger.popleft() self.view_changer.inBox.append(msg) await self.view_changer.serviceQueues(limit) return msgCount
[ "async", "def", "serviceViewChangerInbox", "(", "self", ",", "limit", ":", "int", "=", "None", ")", "->", "int", ":", "msgCount", "=", "0", "while", "self", ".", "msgsToViewChanger", "and", "(", "not", "limit", "or", "msgCount", "<", "limit", ")", ":", ...
Service at most `limit` number of messages from the view_changer's outBox. :return: the number of messages successfully serviced.
[ "Service", "at", "most", "limit", "number", "of", "messages", "from", "the", "view_changer", "s", "outBox", "." ]
python
train
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L1608-L1641
def get_or_create(self, qualifier, new_parameter, **kwargs): """ Get a :class:`Parameter` from the ParameterSet, if it does not exist, create and attach it. Note: running this on a ParameterSet that is NOT a :class:`phoebe.frontend.bundle.Bundle`, will NOT add the Parameter to the bundle, but only the temporary ParameterSet :parameter str qualifier: the qualifier of the :class:`Parameter` (note, not the twig) :parameter new_parameter: the parameter to attach if no result is found :type new_parameter: :class:`Parameter` :parameter **kwargs: meta-tags to search - will also be applied to new_parameter if it is attached. :return: Parameter, created :rtype: :class:`Parameter`, bool :raises ValueError: if more than 1 result was found using the search criteria. """ ps = self.filter_or_get(qualifier=qualifier, **kwargs) if isinstance(ps, Parameter): return ps, False elif len(ps): # TODO: custom exception? raise ValueError("more than 1 result was found") else: self._attach_params(ParameterSet([new_parameter]), **kwargs) logger.debug("creating and attaching new parameter: {}".format(new_parameter.qualifier)) return self.filter_or_get(qualifier=qualifier, **kwargs), True
[ "def", "get_or_create", "(", "self", ",", "qualifier", ",", "new_parameter", ",", "*", "*", "kwargs", ")", ":", "ps", "=", "self", ".", "filter_or_get", "(", "qualifier", "=", "qualifier", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "ps", ",...
Get a :class:`Parameter` from the ParameterSet, if it does not exist, create and attach it. Note: running this on a ParameterSet that is NOT a :class:`phoebe.frontend.bundle.Bundle`, will NOT add the Parameter to the bundle, but only the temporary ParameterSet :parameter str qualifier: the qualifier of the :class:`Parameter` (note, not the twig) :parameter new_parameter: the parameter to attach if no result is found :type new_parameter: :class:`Parameter` :parameter **kwargs: meta-tags to search - will also be applied to new_parameter if it is attached. :return: Parameter, created :rtype: :class:`Parameter`, bool :raises ValueError: if more than 1 result was found using the search criteria.
[ "Get", "a", ":", "class", ":", "Parameter", "from", "the", "ParameterSet", "if", "it", "does", "not", "exist", "create", "and", "attach", "it", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/selfplay.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/selfplay.py#L49-L107
def play(network): """Plays out a self-play match, returning a MCTSPlayer object containing: - the final position - the n x 362 tensor of floats representing the mcts search probabilities - the n-ary tensor of floats representing the original value-net estimate where n is the number of moves in the game """ readouts = FLAGS.num_readouts # defined in strategies.py # Disable resign in 5% of games if random.random() < FLAGS.resign_disable_pct: resign_threshold = -1.0 else: resign_threshold = None player = MCTSPlayer(network, resign_threshold=resign_threshold) player.initialize_game() # Must run this once at the start to expand the root node. first_node = player.root.select_leaf() prob, val = network.run(first_node.position) first_node.incorporate_results(prob, val, first_node) while True: start = time.time() player.root.inject_noise() current_readouts = player.root.N # we want to do "X additional readouts", rather than "up to X readouts". while player.root.N < current_readouts + readouts: player.tree_search() if FLAGS.verbose >= 3: print(player.root.position) print(player.root.describe()) if player.should_resign(): player.set_result(-1 * player.root.position.to_play, was_resign=True) break move = player.pick_move() player.play_move(move) if player.root.is_done(): player.set_result(player.root.position.result(), was_resign=False) break if (FLAGS.verbose >= 2) or (FLAGS.verbose >= 1 and player.root.position.n % 10 == 9): print("Q: {:.5f}".format(player.root.Q)) dur = time.time() - start print("%d: %d readouts, %.3f s/100. (%.2f sec)" % ( player.root.position.n, readouts, dur / readouts * 100.0, dur), flush=True) if FLAGS.verbose >= 3: print("Played >>", coords.to_gtp(coords.from_flat(player.root.fmove))) if FLAGS.verbose >= 2: utils.dbg("%s: %.3f" % (player.result_string, player.root.Q)) utils.dbg(player.root.position, player.root.position.score()) return player
[ "def", "play", "(", "network", ")", ":", "readouts", "=", "FLAGS", ".", "num_readouts", "# defined in strategies.py", "# Disable resign in 5% of games", "if", "random", ".", "random", "(", ")", "<", "FLAGS", ".", "resign_disable_pct", ":", "resign_threshold", "=", ...
Plays out a self-play match, returning a MCTSPlayer object containing: - the final position - the n x 362 tensor of floats representing the mcts search probabilities - the n-ary tensor of floats representing the original value-net estimate where n is the number of moves in the game
[ "Plays", "out", "a", "self", "-", "play", "match", "returning", "a", "MCTSPlayer", "object", "containing", ":", "-", "the", "final", "position", "-", "the", "n", "x", "362", "tensor", "of", "floats", "representing", "the", "mcts", "search", "probabilities", ...
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/mds/apis/subscriptions_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/mds/apis/subscriptions_api.py#L604-L623
def get_pre_subscriptions(self, **kwargs): # noqa: E501 """Get pre-subscriptions # noqa: E501 You can retrieve the pre-subscription data with the GET operation. The server returns with the same JSON structure as described above. If there are no pre-subscribed resources, it returns with an empty array. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/subscriptions -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_pre_subscriptions(asynchronous=True) >>> result = thread.get() :param asynchronous bool :return: PresubscriptionArray If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_pre_subscriptions_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_pre_subscriptions_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "get_pre_subscriptions", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_pre_subscripti...
Get pre-subscriptions # noqa: E501 You can retrieve the pre-subscription data with the GET operation. The server returns with the same JSON structure as described above. If there are no pre-subscribed resources, it returns with an empty array. **Example usage:** curl -X GET https://api.us-east-1.mbedcloud.com/v2/subscriptions -H 'authorization: Bearer {api-key}' # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_pre_subscriptions(asynchronous=True) >>> result = thread.get() :param asynchronous bool :return: PresubscriptionArray If the method is called asynchronously, returns the request thread.
[ "Get", "pre", "-", "subscriptions", "#", "noqa", ":", "E501" ]
python
train
yyuu/botornado
boto/auth.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/auth.py#L223-L237
def string_to_sign(self, http_request): """ Return the canonical StringToSign as well as a dict containing the original version of all headers that were included in the StringToSign. """ headers_to_sign = self.headers_to_sign(http_request) canonical_headers = self.canonical_headers(headers_to_sign) string_to_sign = '\n'.join([http_request.method, http_request.path, '', canonical_headers, '', http_request.body]) return string_to_sign, headers_to_sign
[ "def", "string_to_sign", "(", "self", ",", "http_request", ")", ":", "headers_to_sign", "=", "self", ".", "headers_to_sign", "(", "http_request", ")", "canonical_headers", "=", "self", ".", "canonical_headers", "(", "headers_to_sign", ")", "string_to_sign", "=", "...
Return the canonical StringToSign as well as a dict containing the original version of all headers that were included in the StringToSign.
[ "Return", "the", "canonical", "StringToSign", "as", "well", "as", "a", "dict", "containing", "the", "original", "version", "of", "all", "headers", "that", "were", "included", "in", "the", "StringToSign", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/doi_resolver.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/doi_resolver.py#L125-L147
def illegal_doi(self, doi_string): """ DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None """ logger_doi_resolver.info("enter illegal_doi") # Ignores empty or irrelevant strings (blank, spaces, na, nan, ', others) if len(doi_string) > 5: # NOAA string if 'noaa' in doi_string.lower(): self.noaa_citation(doi_string) # Paragraph citation / Manual citation elif doi_string.count(' ') > 3: self.root_dict['pub'][0]['citation'] = doi_string # Strange Links or Other, send to quarantine else: logger_doi_resolver.warn("illegal_doi: bad doi string: {}".format(doi_string)) logger_doi_resolver.info("exit illegal_doi") return
[ "def", "illegal_doi", "(", "self", ",", "doi_string", ")", ":", "logger_doi_resolver", ".", "info", "(", "\"enter illegal_doi\"", ")", "# Ignores empty or irrelevant strings (blank, spaces, na, nan, ', others)", "if", "len", "(", "doi_string", ")", ">", "5", ":", "# NOA...
DOI string did not match the regex. Determine what the data is. :param doi_string: (str) Malformed DOI string :return: None
[ "DOI", "string", "did", "not", "match", "the", "regex", ".", "Determine", "what", "the", "data", "is", ".", ":", "param", "doi_string", ":", "(", "str", ")", "Malformed", "DOI", "string", ":", "return", ":", "None" ]
python
train
python-diamond/Diamond
src/diamond/handler/rabbitmq_pubsub.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/rabbitmq_pubsub.py#L87-L97
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(rmqHandler, self).get_default_config_help() config.update({ 'server': '', 'rmq_exchange': '', }) return config
[ "def", "get_default_config_help", "(", "self", ")", ":", "config", "=", "super", "(", "rmqHandler", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config", ".", "update", "(", "{", "'server'", ":", "''", ",", "'rmq_exchange'", ":", "''", ",",...
Returns the help text for the configuration options for this handler
[ "Returns", "the", "help", "text", "for", "the", "configuration", "options", "for", "this", "handler" ]
python
train
helixyte/everest
everest/repositories/uow.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/repositories/uow.py#L31-L40
def register_new(self, entity_class, entity): """ Registers the given entity for the given class as NEW. :raises ValueError: If the given entity already holds state that was created by another Unit Of Work. """ EntityState.manage(entity, self) EntityState.get_state(entity).status = ENTITY_STATUS.NEW self.__entity_set_map[entity_class].add(entity)
[ "def", "register_new", "(", "self", ",", "entity_class", ",", "entity", ")", ":", "EntityState", ".", "manage", "(", "entity", ",", "self", ")", "EntityState", ".", "get_state", "(", "entity", ")", ".", "status", "=", "ENTITY_STATUS", ".", "NEW", "self", ...
Registers the given entity for the given class as NEW. :raises ValueError: If the given entity already holds state that was created by another Unit Of Work.
[ "Registers", "the", "given", "entity", "for", "the", "given", "class", "as", "NEW", "." ]
python
train
getpelican/pelican-plugins
filetime_from_git/actions.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/filetime_from_git/actions.py#L84-L89
def update_hash_from_str(hsh, str_input): """ Convert a str to object supporting buffer API and update a hash with it. """ byte_input = str(str_input).encode("UTF-8") hsh.update(byte_input)
[ "def", "update_hash_from_str", "(", "hsh", ",", "str_input", ")", ":", "byte_input", "=", "str", "(", "str_input", ")", ".", "encode", "(", "\"UTF-8\"", ")", "hsh", ".", "update", "(", "byte_input", ")" ]
Convert a str to object supporting buffer API and update a hash with it.
[ "Convert", "a", "str", "to", "object", "supporting", "buffer", "API", "and", "update", "a", "hash", "with", "it", "." ]
python
train
vintasoftware/django-role-permissions
rolepermissions/utils.py
https://github.com/vintasoftware/django-role-permissions/blob/28924361e689e994e0c3575e18104a1a5abd8de6/rolepermissions/utils.py#L16-L26
def camelToSnake(s): """ https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm.. """ _underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower()
[ "def", "camelToSnake", "(", "s", ")", ":", "_underscorer1", "=", "re", ".", "compile", "(", "r'(.)([A-Z][a-z]+)'", ")", "_underscorer2", "=", "re", ".", "compile", "(", "'([a-z0-9])([A-Z])'", ")", "subbed", "=", "_underscorer1", ".", "sub", "(", "r'\\1_\\2'", ...
https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm..
[ "https", ":", "//", "gist", ".", "github", ".", "com", "/", "jaytaylor", "/", "3660565", "Is", "it", "ironic", "that", "this", "function", "is", "written", "in", "camel", "case", "yet", "it", "converts", "to", "snake", "case?", "hmm", ".." ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/baseviews.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/baseviews.py#L1067-L1123
def _edit(self, pk): """ Edit function logic, override to implement different logic returns Edit widget and related list or None """ is_valid_form = True pages = get_page_args() page_sizes = get_page_size_args() orders = get_order_args() get_filter_args(self._filters) exclude_cols = self._filters.get_relation_cols() item = self.datamodel.get(pk, self._base_filters) if not item: abort(404) # convert pk to correct type, if pk is non string type. pk = self.datamodel.get_pk_value(item) if request.method == "POST": form = self.edit_form.refresh(request.form) # fill the form with the suppressed cols, generated from exclude_cols self._fill_form_exclude_cols(exclude_cols, form) # trick to pass unique validation form._id = pk if form.validate(): self.process_form(form, False) form.populate_obj(item) try: self.pre_update(item) except Exception as e: flash(str(e), "danger") else: if self.datamodel.edit(item): self.post_update(item) flash(*self.datamodel.message) finally: return None else: is_valid_form = False else: # Only force form refresh for select cascade events form = self.edit_form.refresh(obj=item) # Perform additional actions to pre-fill the edit form. self.prefill_form(form, pk) widgets = self._get_edit_widget(form=form, exclude_cols=exclude_cols) widgets = self._get_related_views_widgets( item, filters={}, orders=orders, pages=pages, page_sizes=page_sizes, widgets=widgets, ) if is_valid_form: self.update_redirect() return widgets
[ "def", "_edit", "(", "self", ",", "pk", ")", ":", "is_valid_form", "=", "True", "pages", "=", "get_page_args", "(", ")", "page_sizes", "=", "get_page_size_args", "(", ")", "orders", "=", "get_order_args", "(", ")", "get_filter_args", "(", "self", ".", "_fi...
Edit function logic, override to implement different logic returns Edit widget and related list or None
[ "Edit", "function", "logic", "override", "to", "implement", "different", "logic", "returns", "Edit", "widget", "and", "related", "list", "or", "None" ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L511-L513
def groups_rename(self, room_id, name, **kwargs): """Changes the name of the private group.""" return self.__call_api_post('groups.rename', roomId=room_id, name=name, kwargs=kwargs)
[ "def", "groups_rename", "(", "self", ",", "room_id", ",", "name", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'groups.rename'", ",", "roomId", "=", "room_id", ",", "name", "=", "name", ",", "kwargs", "=", "kwargs", ...
Changes the name of the private group.
[ "Changes", "the", "name", "of", "the", "private", "group", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L6051-L6057
def getSkeletalReferenceTransforms(self, action, eTransformSpace, eReferencePose, unTransformArrayCount): """Fills the given buffer with the transforms for a specific static skeletal reference pose""" fn = self.function_table.getSkeletalReferenceTransforms pTransformArray = VRBoneTransform_t() result = fn(action, eTransformSpace, eReferencePose, byref(pTransformArray), unTransformArrayCount) return result, pTransformArray
[ "def", "getSkeletalReferenceTransforms", "(", "self", ",", "action", ",", "eTransformSpace", ",", "eReferencePose", ",", "unTransformArrayCount", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getSkeletalReferenceTransforms", "pTransformArray", "=", "VRBoneTr...
Fills the given buffer with the transforms for a specific static skeletal reference pose
[ "Fills", "the", "given", "buffer", "with", "the", "transforms", "for", "a", "specific", "static", "skeletal", "reference", "pose" ]
python
train
opereto/pyopereto
pyopereto/client.py
https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L856-L874
def modify_agent(self, agent_id, **kwargs): ''' modify_agent(self, agent_id, **kwargs) | Modifies agent information (like name) :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent :Example: .. code-block:: python opereto_client = OperetoClient() opereto_client.modify_agent('agentId', name='my new name') ''' request_data = {'id': agent_id} request_data.update(**kwargs) return self._call_rest_api('post', '/agents'+'', data=request_data, error='Failed to modify agent [%s]'%agent_id)
[ "def", "modify_agent", "(", "self", ",", "agent_id", ",", "*", "*", "kwargs", ")", ":", "request_data", "=", "{", "'id'", ":", "agent_id", "}", "request_data", ".", "update", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_call_rest_api", "(", "...
modify_agent(self, agent_id, **kwargs) | Modifies agent information (like name) :Parameters: * *agent_id* (`string`) -- Identifier of an existing agent :Example: .. code-block:: python opereto_client = OperetoClient() opereto_client.modify_agent('agentId', name='my new name')
[ "modify_agent", "(", "self", "agent_id", "**", "kwargs", ")" ]
python
train
Azure/msrestazure-for-python
msrestazure/azure_exceptions.py
https://github.com/Azure/msrestazure-for-python/blob/5f99262305692525d03ca87d2c5356b05c5aa874/msrestazure/azure_exceptions.py#L119-L141
def message(self, value): """Attempt to deconstruct error message to retrieve further error data. """ try: import ast value = ast.literal_eval(value) except (SyntaxError, TypeError, ValueError): pass try: value = value.get('value', value) msg_data = value.split('\n') self._message = msg_data[0] except AttributeError: self._message = value return try: self.request_id = msg_data[1].partition(':')[2] time_str = msg_data[2].partition(':') self.error_time = Deserializer.deserialize_iso( "".join(time_str[2:])) except (IndexError, DeserializationError): pass
[ "def", "message", "(", "self", ",", "value", ")", ":", "try", ":", "import", "ast", "value", "=", "ast", ".", "literal_eval", "(", "value", ")", "except", "(", "SyntaxError", ",", "TypeError", ",", "ValueError", ")", ":", "pass", "try", ":", "value", ...
Attempt to deconstruct error message to retrieve further error data.
[ "Attempt", "to", "deconstruct", "error", "message", "to", "retrieve", "further", "error", "data", "." ]
python
train