repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/thread.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L1977-L1983 | def clear_threads(self):
"""
Clears the threads snapshot.
"""
for aThread in compat.itervalues(self.__threadDict):
aThread.clear()
self.__threadDict = dict() | [
"def",
"clear_threads",
"(",
"self",
")",
":",
"for",
"aThread",
"in",
"compat",
".",
"itervalues",
"(",
"self",
".",
"__threadDict",
")",
":",
"aThread",
".",
"clear",
"(",
")",
"self",
".",
"__threadDict",
"=",
"dict",
"(",
")"
] | Clears the threads snapshot. | [
"Clears",
"the",
"threads",
"snapshot",
"."
] | python | train |
Avsecz/kopt | kopt/hyopt.py | https://github.com/Avsecz/kopt/blob/fe4f929c8938590845306a759547daa5ba8bd7a9/kopt/hyopt.py#L129-L143 | def best_trial_tid(self, rank=0):
"""Get tid of the best trial
rank=0 means the best model
rank=1 means second best
...
"""
candidates = [t for t in self.trials
if t['result']['status'] == STATUS_OK]
if len(candidates) == 0:
return None
losses = [float(t['result']['loss']) for t in candidates]
assert not np.any(np.isnan(losses))
lid = np.where(np.argsort(losses).argsort() == rank)[0][0]
return candidates[lid]["tid"] | [
"def",
"best_trial_tid",
"(",
"self",
",",
"rank",
"=",
"0",
")",
":",
"candidates",
"=",
"[",
"t",
"for",
"t",
"in",
"self",
".",
"trials",
"if",
"t",
"[",
"'result'",
"]",
"[",
"'status'",
"]",
"==",
"STATUS_OK",
"]",
"if",
"len",
"(",
"candidate... | Get tid of the best trial
rank=0 means the best model
rank=1 means second best
... | [
"Get",
"tid",
"of",
"the",
"best",
"trial"
] | python | train |
sci-bots/mpm | mpm/api.py | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L57-L91 | def _save_action(extra_context=None):
'''
Save list of revisions revisions for active Conda environment.
.. versionchanged:: 0.18
Compress action revision files using ``bz2`` to save disk space.
Parameters
----------
extra_context : dict, optional
Extra content to store in stored action revision.
Returns
-------
path_helpers.path, dict
Path to which action was written and action object, including list of
revisions for active Conda environment.
'''
# Get list of revisions to Conda environment since creation.
revisions_js = ch.conda_exec('list', '--revisions', '--json',
verbose=False)
revisions = json.loads(revisions_js)
# Save list of revisions to `/etc/microdrop/plugins/actions/rev<rev>.json`
# See [wheeler-microfluidics/microdrop#200][i200].
#
# [i200]: https://github.com/wheeler-microfluidics/microdrop/issues/200
action = extra_context.copy() if extra_context else {}
action['revisions'] = revisions
action_path = (MICRODROP_CONDA_ACTIONS
.joinpath('rev{}.json.bz2'.format(revisions[-1]['rev'])))
action_path.parent.makedirs_p()
# Compress action file using bz2 to save disk space.
with bz2.BZ2File(action_path, mode='w') as output:
json.dump(action, output, indent=2)
return action_path, action | [
"def",
"_save_action",
"(",
"extra_context",
"=",
"None",
")",
":",
"# Get list of revisions to Conda environment since creation.",
"revisions_js",
"=",
"ch",
".",
"conda_exec",
"(",
"'list'",
",",
"'--revisions'",
",",
"'--json'",
",",
"verbose",
"=",
"False",
")",
... | Save list of revisions revisions for active Conda environment.
.. versionchanged:: 0.18
Compress action revision files using ``bz2`` to save disk space.
Parameters
----------
extra_context : dict, optional
Extra content to store in stored action revision.
Returns
-------
path_helpers.path, dict
Path to which action was written and action object, including list of
revisions for active Conda environment. | [
"Save",
"list",
"of",
"revisions",
"revisions",
"for",
"active",
"Conda",
"environment",
"."
] | python | train |
Opentrons/opentrons | api/src/opentrons/protocol_api/contexts.py | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L650-L683 | def mix(self,
repetitions: int = 1,
volume: float = None,
location: Union[types.Location, Well] = None,
rate: float = 1.0) -> 'InstrumentContext':
"""
Mix a volume of liquid (uL) using this pipette.
If no location is specified, the pipette will mix from its current
position. If no Volume is passed, 'mix' will default to its max_volume.
:param repetitions: how many times the pipette should mix (default: 1)
:param volume: number of microlitres to mix (default: self.max_volume)
:param location: a Well or a position relative to well.
e.g, `plate.rows()[0][0].bottom()`
(types.Location type).
:param rate: Set plunger speed for this mix, where,
speed = rate * (aspirate_speed or dispense_speed)
:raises NoTipAttachedError: If no tip is attached to the pipette.
:returns: This instance
"""
self._log.debug(
'mixing {}uL with {} repetitions in {} at rate={}'.format(
volume, repetitions,
location if location else 'current position', rate))
if not self.hw_pipette['has_tip']:
raise hc.NoTipAttachedError('Pipette has no tip. Aborting mix()')
self.aspirate(volume, location, rate)
while repetitions - 1 > 0:
self.dispense(volume, rate=rate)
self.aspirate(volume, rate=rate)
repetitions -= 1
self.dispense(volume, rate=rate)
return self | [
"def",
"mix",
"(",
"self",
",",
"repetitions",
":",
"int",
"=",
"1",
",",
"volume",
":",
"float",
"=",
"None",
",",
"location",
":",
"Union",
"[",
"types",
".",
"Location",
",",
"Well",
"]",
"=",
"None",
",",
"rate",
":",
"float",
"=",
"1.0",
")"... | Mix a volume of liquid (uL) using this pipette.
If no location is specified, the pipette will mix from its current
position. If no Volume is passed, 'mix' will default to its max_volume.
:param repetitions: how many times the pipette should mix (default: 1)
:param volume: number of microlitres to mix (default: self.max_volume)
:param location: a Well or a position relative to well.
e.g, `plate.rows()[0][0].bottom()`
(types.Location type).
:param rate: Set plunger speed for this mix, where,
speed = rate * (aspirate_speed or dispense_speed)
:raises NoTipAttachedError: If no tip is attached to the pipette.
:returns: This instance | [
"Mix",
"a",
"volume",
"of",
"liquid",
"(",
"uL",
")",
"using",
"this",
"pipette",
".",
"If",
"no",
"location",
"is",
"specified",
"the",
"pipette",
"will",
"mix",
"from",
"its",
"current",
"position",
".",
"If",
"no",
"Volume",
"is",
"passed",
"mix",
"... | python | train |
pydata/xarray | xarray/core/computation.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/computation.py#L663-L680 | def apply_array_ufunc(func, *args, dask='forbidden'):
"""Apply a ndarray level function over ndarray objects."""
if any(isinstance(arg, dask_array_type) for arg in args):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a dask array on an '
'argument, but handling for dask arrays has not '
'been enabled. Either set the ``dask`` argument '
'or load your data into memory first with '
'``.load()`` or ``.compute()``')
elif dask == 'parallelized':
raise ValueError("cannot use dask='parallelized' for apply_ufunc "
'unless at least one input is an xarray object')
elif dask == 'allowed':
pass
else:
raise ValueError('unknown setting for dask array handling: {}'
.format(dask))
return func(*args) | [
"def",
"apply_array_ufunc",
"(",
"func",
",",
"*",
"args",
",",
"dask",
"=",
"'forbidden'",
")",
":",
"if",
"any",
"(",
"isinstance",
"(",
"arg",
",",
"dask_array_type",
")",
"for",
"arg",
"in",
"args",
")",
":",
"if",
"dask",
"==",
"'forbidden'",
":",... | Apply a ndarray level function over ndarray objects. | [
"Apply",
"a",
"ndarray",
"level",
"function",
"over",
"ndarray",
"objects",
"."
] | python | train |
allelos/vectors | vectors/vectors.py | https://github.com/allelos/vectors/blob/55db2a7e489ae5f4380e70b3c5b7a6ce39de5cee/vectors/vectors.py#L130-L134 | def sum(self, vector):
"""Return a Vector instance as the vector sum of two vectors."""
return self.from_list(
[x + vector.vector[i] for i, x in self.to_list()]
) | [
"def",
"sum",
"(",
"self",
",",
"vector",
")",
":",
"return",
"self",
".",
"from_list",
"(",
"[",
"x",
"+",
"vector",
".",
"vector",
"[",
"i",
"]",
"for",
"i",
",",
"x",
"in",
"self",
".",
"to_list",
"(",
")",
"]",
")"
] | Return a Vector instance as the vector sum of two vectors. | [
"Return",
"a",
"Vector",
"instance",
"as",
"the",
"vector",
"sum",
"of",
"two",
"vectors",
"."
] | python | train |
timothydmorton/isochrones | isochrones/starmodel_old.py | https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L1668-L1711 | def maxlike(self,nseeds=50):
"""Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[mA,mB,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``.
"""
mA_0,age0,feh0 = self.ic.random_points(nseeds)
mB_0,foo1,foo2 = self.ic.random_points(nseeds)
mC_0,foo3,foo4 = self.ic.random_points(nseeds)
m_all = np.sort(np.array([mA_0, mB_0, mC_0]), axis=0)
mA_0, mB_0, mC_0 = (m_all[0,:], m_all[1,:], m_all[2,:])
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds))
AV0 = rand.uniform(0,self.maxAV,size=nseeds)
costs = np.zeros(nseeds)
if self.fit_for_distance:
pfits = np.zeros((nseeds,7))
else:
pfits = np.zeros((nseeds,5))
def fn(p): #fmin is a function *minimizer*
return -1*self.lnpost(p)
for i,mA,mB,mC,age,feh,d,AV in zip(range(nseeds),
mA_0,mB_0,mC_0,age0,feh0,d0,AV0):
if self.fit_for_distance:
pfit = scipy.optimize.fmin(fn,[mA,mB,mC,age,feh,d,AV],disp=False)
else:
pfit = scipy.optimize.fmin(fn,[mA,mB,mC,age,feh],disp=False)
pfits[i,:] = pfit
costs[i] = self.lnpost(pfit)
return pfits[np.argmax(costs),:] | [
"def",
"maxlike",
"(",
"self",
",",
"nseeds",
"=",
"50",
")",
":",
"mA_0",
",",
"age0",
",",
"feh0",
"=",
"self",
".",
"ic",
".",
"random_points",
"(",
"nseeds",
")",
"mB_0",
",",
"foo1",
",",
"foo2",
"=",
"self",
".",
"ic",
".",
"random_points",
... | Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[mA,mB,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``. | [
"Returns",
"the",
"best",
"-",
"fit",
"parameters",
"choosing",
"the",
"best",
"of",
"multiple",
"starting",
"guesses"
] | python | train |
bjmorgan/lattice_mc | lattice_mc/simulation.py | https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/simulation.py#L135-L150 | def is_initialised( self ):
"""
Check whether the simulation has been initialised.
Args:
None
Returns:
None
"""
if not self.lattice:
raise AttributeError('Running a simulation needs the lattice to be initialised')
if not self.atoms:
raise AttributeError('Running a simulation needs the atoms to be initialised')
if not self.number_of_jumps and not self.for_time:
raise AttributeError('Running a simulation needs number_of_jumps or for_time to be set') | [
"def",
"is_initialised",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"lattice",
":",
"raise",
"AttributeError",
"(",
"'Running a simulation needs the lattice to be initialised'",
")",
"if",
"not",
"self",
".",
"atoms",
":",
"raise",
"AttributeError",
"(",
"'R... | Check whether the simulation has been initialised.
Args:
None
Returns:
None | [
"Check",
"whether",
"the",
"simulation",
"has",
"been",
"initialised",
"."
] | python | train |
tdryer/hangups | hangups/conversation_event.py | https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation_event.py#L168-L171 | def segments(self):
"""List of :class:`ChatMessageSegment` in message (:class:`list`)."""
seg_list = self._event.chat_message.message_content.segment
return [ChatMessageSegment.deserialize(seg) for seg in seg_list] | [
"def",
"segments",
"(",
"self",
")",
":",
"seg_list",
"=",
"self",
".",
"_event",
".",
"chat_message",
".",
"message_content",
".",
"segment",
"return",
"[",
"ChatMessageSegment",
".",
"deserialize",
"(",
"seg",
")",
"for",
"seg",
"in",
"seg_list",
"]"
] | List of :class:`ChatMessageSegment` in message (:class:`list`). | [
"List",
"of",
":",
"class",
":",
"ChatMessageSegment",
"in",
"message",
"(",
":",
"class",
":",
"list",
")",
"."
] | python | valid |
trp07/messages | messages/telegram.py | https://github.com/trp07/messages/blob/7789ebc960335a59ea5d319fceed3dd349023648/messages/telegram.py#L146-L161 | def _send_content(self, method="/sendMessage"):
"""send via HTTP Post."""
url = self.base_url + method
try:
resp = requests.post(url, json=self.message)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
raise MessageSendError(e)
if self.verbose:
if method == "/sendMessage":
content_type = "Message body"
elif method == "/sendDocument":
content_type = "Attachment: " + self.message["document"]
print(timestamp(), content_type, "sent.") | [
"def",
"_send_content",
"(",
"self",
",",
"method",
"=",
"\"/sendMessage\"",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"method",
"try",
":",
"resp",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"json",
"=",
"self",
".",
"message",
")",
"r... | send via HTTP Post. | [
"send",
"via",
"HTTP",
"Post",
"."
] | python | test |
Clinical-Genomics/scout | scout/parse/omim.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/omim.py#L321-L364 | def get_mim_phenotypes(genemap_lines):
"""Get a dictionary with phenotypes
Use the mim numbers for phenotypes as keys and phenotype information as
values.
Args:
genemap_lines(iterable(str))
Returns:
phenotypes_found(dict): A dictionary with mim_numbers as keys and
dictionaries with phenotype information as values.
{
'description': str, # Description of the phenotype
'hgnc_symbols': set(), # Associated hgnc symbols
'inheritance': set(), # Associated phenotypes
'mim_number': int, # mim number of phenotype
}
"""
# Set with all omim numbers that are phenotypes
# Parsed from mim2gene.txt
phenotype_mims = set()
phenotypes_found = {}
# Genemap is a file with one entry per gene.
# Each line hold a lot of information and in specific it
# has information about the phenotypes that a gene is associated with
# From this source we collect inheritane patterns and what hgnc symbols
# a phenotype is associated with
for entry in parse_genemap2(genemap_lines):
hgnc_symbol = entry['hgnc_symbol']
for phenotype in entry['phenotypes']:
mim_nr = phenotype['mim_number']
if mim_nr in phenotypes_found:
phenotype_entry = phenotypes_found[mim_nr]
phenotype_entry['inheritance'] = phenotype_entry['inheritance'].union(phenotype['inheritance'])
phenotype_entry['hgnc_symbols'].add(hgnc_symbol)
else:
phenotype['hgnc_symbols'] = set([hgnc_symbol])
phenotypes_found[mim_nr] = phenotype
return phenotypes_found | [
"def",
"get_mim_phenotypes",
"(",
"genemap_lines",
")",
":",
"# Set with all omim numbers that are phenotypes",
"# Parsed from mim2gene.txt",
"phenotype_mims",
"=",
"set",
"(",
")",
"phenotypes_found",
"=",
"{",
"}",
"# Genemap is a file with one entry per gene.",
"# Each line ho... | Get a dictionary with phenotypes
Use the mim numbers for phenotypes as keys and phenotype information as
values.
Args:
genemap_lines(iterable(str))
Returns:
phenotypes_found(dict): A dictionary with mim_numbers as keys and
dictionaries with phenotype information as values.
{
'description': str, # Description of the phenotype
'hgnc_symbols': set(), # Associated hgnc symbols
'inheritance': set(), # Associated phenotypes
'mim_number': int, # mim number of phenotype
} | [
"Get",
"a",
"dictionary",
"with",
"phenotypes",
"Use",
"the",
"mim",
"numbers",
"for",
"phenotypes",
"as",
"keys",
"and",
"phenotype",
"information",
"as",
"values",
"."
] | python | test |
blockstack/blockstack-core | blockstack/blockstackd.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L95-L133 | def get_bitcoind( new_bitcoind_opts=None, reset=False, new=False ):
"""
Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options.
"""
global bitcoind
if reset:
bitcoind = None
elif not new and bitcoind is not None:
return bitcoind
if new or bitcoind is None:
if new_bitcoind_opts is not None:
set_bitcoin_opts( new_bitcoind_opts )
bitcoin_opts = get_bitcoin_opts()
new_bitcoind = None
try:
try:
new_bitcoind = virtualchain.connect_bitcoind( bitcoin_opts )
except KeyError, ke:
log.exception(ke)
log.error("Invalid configuration: %s" % bitcoin_opts)
return None
if new:
return new_bitcoind
else:
# save for subsequent reuse
bitcoind = new_bitcoind
return bitcoind
except Exception, e:
log.exception( e )
return None | [
"def",
"get_bitcoind",
"(",
"new_bitcoind_opts",
"=",
"None",
",",
"reset",
"=",
"False",
",",
"new",
"=",
"False",
")",
":",
"global",
"bitcoind",
"if",
"reset",
":",
"bitcoind",
"=",
"None",
"elif",
"not",
"new",
"and",
"bitcoind",
"is",
"not",
"None",... | Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options. | [
"Get",
"or",
"instantiate",
"our",
"bitcoind",
"client",
".",
"Optionally",
"re",
"-",
"set",
"the",
"bitcoind",
"options",
"."
] | python | train |
saltstack/salt | salt/modules/boto_elb.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elb.py#L332-L354 | def delete_listeners(name, ports, region=None, key=None, keyid=None,
profile=None):
'''
Delete listeners on an ELB.
CLI example:
.. code-block:: bash
salt myminion boto_elb.delete_listeners myelb '[80,443]'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(ports, six.string_types):
ports = salt.utils.json.loads(ports)
try:
conn.delete_load_balancer_listeners(name, ports)
log.info('Deleted ELB listeners on %s', name)
return True
except boto.exception.BotoServerError as error:
log.error('Failed to delete ELB listeners on %s: %s', name, error,
exc_info_on_loglevel=logging.DEBUG)
return False | [
"def",
"delete_listeners",
"(",
"name",
",",
"ports",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"k... | Delete listeners on an ELB.
CLI example:
.. code-block:: bash
salt myminion boto_elb.delete_listeners myelb '[80,443]' | [
"Delete",
"listeners",
"on",
"an",
"ELB",
"."
] | python | train |
brbsix/subnuker | subnuker.py | https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L99-L117 | def open(self):
"""Open the subtitle file into an Aeidon project."""
try:
self.project.open_main(self.filename)
except UnicodeDecodeError:
with open(self.filename, 'rb') as openfile:
encoding = get_encoding(openfile.read())
try:
self.project.open_main(self.filename, encoding)
except UnicodeDecodeError:
LOGGER.error("'%s' encountered a fatal encoding error",
self.filename)
sys.exit(1)
except: # pylint: disable=W0702
open_error(self.filename)
except: # pylint: disable=W0702
open_error(self.filename) | [
"def",
"open",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"project",
".",
"open_main",
"(",
"self",
".",
"filename",
")",
"except",
"UnicodeDecodeError",
":",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"openfile",
":",
... | Open the subtitle file into an Aeidon project. | [
"Open",
"the",
"subtitle",
"file",
"into",
"an",
"Aeidon",
"project",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L323-L334 | def get_vnetwork_vms_output_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
output = ET.SubElement(get_vnetwork_vms, "output")
instance_id = ET.SubElement(output, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_vnetwork_vms_output_instance_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_vnetwork_vms",
"=",
"ET",
".",
"Element",
"(",
"\"get_vnetwork_vms\"",
")",
"config",
"=",
"get_vnet... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L2290-L2340 | def update_os_image_from_image_reference(self, image_name, os_image):
'''
Updates metadata elements from a given OS image reference.
image_name:
The name of the image to update.
os_image:
An instance of OSImage class.
os_image.label: Optional. Specifies an identifier for the image.
os_image.description: Optional. Specifies the description of the image.
os_image.language: Optional. Specifies the language of the image.
os_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
os_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
os_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
os_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
os_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
os_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
os_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
os.image.media_link:
Required: Specifies the location of the blob in Windows Azure
blob store where the media for the image is located. The blob
location must belong to a storage account in the subscription
specified by the <subscription-id> value in the operation call.
Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
os_image.name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more VM Roles.
os_image.os:
The operating system type of the OS image. Possible values are:
Linux, Windows
'''
_validate_not_none('image_name', image_name)
_validate_not_none('os_image', os_image)
return self._perform_put(self._get_image_path(image_name),
_XmlSerializer.update_os_image_to_xml(os_image), as_async=True
) | [
"def",
"update_os_image_from_image_reference",
"(",
"self",
",",
"image_name",
",",
"os_image",
")",
":",
"_validate_not_none",
"(",
"'image_name'",
",",
"image_name",
")",
"_validate_not_none",
"(",
"'os_image'",
",",
"os_image",
")",
"return",
"self",
".",
"_perfo... | Updates metadata elements from a given OS image reference.
image_name:
The name of the image to update.
os_image:
An instance of OSImage class.
os_image.label: Optional. Specifies an identifier for the image.
os_image.description: Optional. Specifies the description of the image.
os_image.language: Optional. Specifies the language of the image.
os_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
os_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
os_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
os_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
os_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
os_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
os_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
os.image.media_link:
Required: Specifies the location of the blob in Windows Azure
blob store where the media for the image is located. The blob
location must belong to a storage account in the subscription
specified by the <subscription-id> value in the operation call.
Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
os_image.name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more VM Roles.
os_image.os:
The operating system type of the OS image. Possible values are:
Linux, Windows | [
"Updates",
"metadata",
"elements",
"from",
"a",
"given",
"OS",
"image",
"reference",
"."
] | python | test |
fabioz/PyDev.Debugger | third_party/isort_container/isort/isort.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/isort_container/isort/isort.py#L322-L327 | def _add_comments(self, comments, original_string=""):
"""
Returns a string with comments added
"""
return comments and "{0} # {1}".format(self._strip_comments(original_string)[0],
"; ".join(comments)) or original_string | [
"def",
"_add_comments",
"(",
"self",
",",
"comments",
",",
"original_string",
"=",
"\"\"",
")",
":",
"return",
"comments",
"and",
"\"{0} # {1}\"",
".",
"format",
"(",
"self",
".",
"_strip_comments",
"(",
"original_string",
")",
"[",
"0",
"]",
",",
"\"; \"",... | Returns a string with comments added | [
"Returns",
"a",
"string",
"with",
"comments",
"added"
] | python | train |
thespacedoctor/rockAtlas | rockAtlas/positions/pyephemPositions.py | https://github.com/thespacedoctor/rockAtlas/blob/062ecaa95ab547efda535aa33165944f13c621de/rockAtlas/positions/pyephemPositions.py#L93-L141 | def get(self, singleSnapshot=False):
"""
*geneate the pyephem positions*
**Key Arguments:**
- ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing)
**Return:**
- ``None``
"""
self.log.info('starting the ``get`` method')
global xephemOE
global tileSide
global magLimit
# GRAB PARAMETERS FROM SETTINGS FILE
tileSide = float(self.settings["pyephem"]["atlas exposure match side"])
magLimit = float(self.settings["pyephem"]["magnitude limit"])
snapshotsRequired = 1
while snapshotsRequired > 0:
nextMjds, exposures, snapshotsRequired = self._get_exposures_requiring_pyephem_positions(
concurrentSnapshots=int(self.settings["pyephem"]["batch size"]))
print "There are currently %(snapshotsRequired)s more pyephem snapshots required " % locals()
if snapshotsRequired == 0:
return
if len(xephemOE) == 0:
xephemOE = self._get_xephem_orbital_elements()
# DEFINE AN INPUT ARRAY
magLimit = self.settings["pyephem"]["magnitude limit"]
pyephemDB = fmultiprocess(log=self.log, function=_generate_pyephem_snapshot, timeout=300,
inputArray=nextMjds, magLimit=magLimit)
matchedObjects = []
for p, e, m in zip(pyephemDB, exposures, nextMjds):
matchedObjects.append(
self._match_pyephem_snapshot_to_atlas_exposures(p, e, m))
self._add_matched_objects_to_database(matchedObjects)
self._update_database_flag(exposures)
if singleSnapshot:
snapshotsRequired = 0
self.log.info('completed the ``get`` method')
return None | [
"def",
"get",
"(",
"self",
",",
"singleSnapshot",
"=",
"False",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'starting the ``get`` method'",
")",
"global",
"xephemOE",
"global",
"tileSide",
"global",
"magLimit",
"# GRAB PARAMETERS FROM SETTINGS FILE",
"tileSide"... | *geneate the pyephem positions*
**Key Arguments:**
- ``singleSnapshot`` -- just extract positions for a single pyephem snapshot (used for unit testing)
**Return:**
- ``None`` | [
"*",
"geneate",
"the",
"pyephem",
"positions",
"*"
] | python | train |
opendatateam/udata | udata/models/owned.py | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/owned.py#L41-L68 | def owned_pre_save(sender, document, **kwargs):
'''
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
'''
if not isinstance(document, Owned):
return
changed_fields = getattr(document, '_changed_fields', [])
if 'organization' in changed_fields:
if document.owner:
# Change from owner to organization
document._previous_owner = document.owner
document.owner = None
else:
# Change from org to another
# Need to fetch previous value in base
original = sender.objects.only('organization').get(pk=document.pk)
document._previous_owner = original.organization
elif 'owner' in changed_fields:
if document.organization:
# Change from organization to owner
document._previous_owner = document.organization
document.organization = None
else:
# Change from owner to another
# Need to fetch previous value in base
original = sender.objects.only('owner').get(pk=document.pk)
document._previous_owner = original.owner | [
"def",
"owned_pre_save",
"(",
"sender",
",",
"document",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"document",
",",
"Owned",
")",
":",
"return",
"changed_fields",
"=",
"getattr",
"(",
"document",
",",
"'_changed_fields'",
",",
"[",... | Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it. | [
"Owned",
"mongoengine",
".",
"pre_save",
"signal",
"handler",
"Need",
"to",
"fetch",
"original",
"owner",
"before",
"the",
"new",
"one",
"erase",
"it",
"."
] | python | train |
hazelcast/hazelcast-python-client | hazelcast/proxy/multi_map.py | https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/multi_map.py#L298-L320 | def try_lock(self, key, lease_time=-1, timeout=-1):
"""
Tries to acquire the lock for the specified key. When the lock is not available,
* If timeout is not provided, the current thread doesn't wait and returns ``false`` immediately.
* If a timeout is provided, the current thread becomes disabled for thread scheduling purposes and lies
dormant until one of the followings happens:
* the lock is acquired by the current thread, or
* the specified waiting time elapses.
If lease_time is provided, lock will be released after this time elapses.
:param key: (object), key to lock in this map.
:param lease_time: (int), time in seconds to wait before releasing the lock (optional).
:param timeout: (int), maximum time in seconds to wait for the lock (optional).
:return: (bool), ``true`` if the lock was acquired and otherwise, false.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_try_lock_codec, key_data, key=key_data,
thread_id=thread_id(), lease=to_millis(lease_time),
timeout=to_millis(timeout),
reference_id=self.reference_id_generator.get_and_increment()) | [
"def",
"try_lock",
"(",
"self",
",",
"key",
",",
"lease_time",
"=",
"-",
"1",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"check_not_none",
"(",
"key",
",",
"\"key can't be None\"",
")",
"key_data",
"=",
"self",
".",
"_to_data",
"(",
"key",
")",
"return",... | Tries to acquire the lock for the specified key. When the lock is not available,
* If timeout is not provided, the current thread doesn't wait and returns ``false`` immediately.
* If a timeout is provided, the current thread becomes disabled for thread scheduling purposes and lies
dormant until one of the followings happens:
* the lock is acquired by the current thread, or
* the specified waiting time elapses.
If lease_time is provided, lock will be released after this time elapses.
:param key: (object), key to lock in this map.
:param lease_time: (int), time in seconds to wait before releasing the lock (optional).
:param timeout: (int), maximum time in seconds to wait for the lock (optional).
:return: (bool), ``true`` if the lock was acquired and otherwise, false. | [
"Tries",
"to",
"acquire",
"the",
"lock",
"for",
"the",
"specified",
"key",
".",
"When",
"the",
"lock",
"is",
"not",
"available"
] | python | train |
kragniz/python-etcd3 | etcd3/client.py | https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/client.py#L620-L624 | def watch_prefix(self, key_prefix, **kwargs):
"""Watches a range of keys with a prefix."""
kwargs['range_end'] = \
utils.increment_last_byte(utils.to_bytes(key_prefix))
return self.watch(key_prefix, **kwargs) | [
"def",
"watch_prefix",
"(",
"self",
",",
"key_prefix",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'range_end'",
"]",
"=",
"utils",
".",
"increment_last_byte",
"(",
"utils",
".",
"to_bytes",
"(",
"key_prefix",
")",
")",
"return",
"self",
".",
"wat... | Watches a range of keys with a prefix. | [
"Watches",
"a",
"range",
"of",
"keys",
"with",
"a",
"prefix",
"."
] | python | train |
GoogleCloudPlatform/datastore-ndb-python | ndb/msgprop.py | https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/msgprop.py#L248-L299 | def _make_model_class(message_type, indexed_fields, **props):
"""Construct a Model subclass corresponding to a Message subclass.
Args:
message_type: A Message subclass.
indexed_fields: A list of dotted and undotted field names.
**props: Additional properties with which to seed the class.
Returns:
A Model subclass whose properties correspond to those fields of
message_type whose field name is listed in indexed_fields, plus
the properties specified by the **props arguments. For dotted
field names, a StructuredProperty is generated using a Model
subclass created by a recursive call.
Raises:
Whatever _analyze_indexed_fields() raises.
ValueError if a field name conflicts with a name in **props.
ValueError if a field name is not valid field of message_type.
ValueError if an undotted field name designates a MessageField.
"""
analyzed = _analyze_indexed_fields(indexed_fields)
for field_name, sub_fields in analyzed.iteritems():
if field_name in props:
raise ValueError('field name %s is reserved' % field_name)
try:
field = message_type.field_by_name(field_name)
except KeyError:
raise ValueError('Message type %s has no field named %s' %
(message_type.__name__, field_name))
if isinstance(field, messages.MessageField):
if not sub_fields:
raise ValueError(
'MessageField %s cannot be indexed, only sub-fields' % field_name)
sub_model_class = _make_model_class(field.type, sub_fields)
prop = model.StructuredProperty(sub_model_class, field_name,
repeated=field.repeated)
else:
if sub_fields is not None:
raise ValueError(
'Unstructured field %s cannot have indexed sub-fields' % field_name)
if isinstance(field, messages.EnumField):
prop = EnumProperty(field.type, field_name, repeated=field.repeated)
elif isinstance(field, messages.BytesField):
prop = model.BlobProperty(field_name,
repeated=field.repeated, indexed=True)
else:
# IntegerField, FloatField, BooleanField, StringField.
prop = model.GenericProperty(field_name, repeated=field.repeated)
props[field_name] = prop
return model.MetaModel('_%s__Model' % message_type.__name__,
(model.Model,), props) | [
"def",
"_make_model_class",
"(",
"message_type",
",",
"indexed_fields",
",",
"*",
"*",
"props",
")",
":",
"analyzed",
"=",
"_analyze_indexed_fields",
"(",
"indexed_fields",
")",
"for",
"field_name",
",",
"sub_fields",
"in",
"analyzed",
".",
"iteritems",
"(",
")"... | Construct a Model subclass corresponding to a Message subclass.
Args:
message_type: A Message subclass.
indexed_fields: A list of dotted and undotted field names.
**props: Additional properties with which to seed the class.
Returns:
A Model subclass whose properties correspond to those fields of
message_type whose field name is listed in indexed_fields, plus
the properties specified by the **props arguments. For dotted
field names, a StructuredProperty is generated using a Model
subclass created by a recursive call.
Raises:
Whatever _analyze_indexed_fields() raises.
ValueError if a field name conflicts with a name in **props.
ValueError if a field name is not valid field of message_type.
ValueError if an undotted field name designates a MessageField. | [
"Construct",
"a",
"Model",
"subclass",
"corresponding",
"to",
"a",
"Message",
"subclass",
"."
] | python | train |
summa-tx/riemann | riemann/simple.py | https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L104-L116 | def unsigned_input(outpoint, redeem_script=None, sequence=None):
'''
Outpoint, byte-like, int -> TxIn
'''
if redeem_script is not None and sequence is None:
sequence = guess_sequence(redeem_script)
if sequence is None:
sequence = 0xFFFFFFFE
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=sequence) | [
"def",
"unsigned_input",
"(",
"outpoint",
",",
"redeem_script",
"=",
"None",
",",
"sequence",
"=",
"None",
")",
":",
"if",
"redeem_script",
"is",
"not",
"None",
"and",
"sequence",
"is",
"None",
":",
"sequence",
"=",
"guess_sequence",
"(",
"redeem_script",
")... | Outpoint, byte-like, int -> TxIn | [
"Outpoint",
"byte",
"-",
"like",
"int",
"-",
">",
"TxIn"
] | python | train |
pgmpy/pgmpy | pgmpy/models/MarkovModel.py | https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/MarkovModel.py#L623-L677 | def to_bayesian_model(self):
"""
Creates a Bayesian Model which is a minimum I-Map for this markov model.
The ordering of parents may not remain constant. It would depend on the
ordering of variable in the junction tree (which is not constant) all the
time.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()]
>>> mm.add_factors(*phi)
>>> bm = mm.to_bayesian_model()
"""
from pgmpy.models import BayesianModel
bm = BayesianModel()
var_clique_dict = defaultdict(tuple)
var_order = []
# Create a junction tree from the markov model.
# Creation of clique tree involves triangulation, finding maximal cliques
# and creating a tree from these cliques
junction_tree = self.to_junction_tree()
# create an ordering of the nodes based on the ordering of the clique
# in which it appeared first
root_node = next(iter(junction_tree.nodes()))
bfs_edges = nx.bfs_edges(junction_tree, root_node)
for node in root_node:
var_clique_dict[node] = root_node
var_order.append(node)
for edge in bfs_edges:
clique_node = edge[1]
for node in clique_node:
if not var_clique_dict[node]:
var_clique_dict[node] = clique_node
var_order.append(node)
# create a bayesian model by adding edges from parent of node to node as
# par(x_i) = (var(c_k) - x_i) \cap {x_1, ..., x_{i-1}}
for node_index in range(len(var_order)):
node = var_order[node_index]
node_parents = (set(var_clique_dict[node]) - set([node])).intersection(
set(var_order[:node_index]))
bm.add_edges_from([(parent, node) for parent in node_parents])
# TODO : Convert factor into CPDs
return bm | [
"def",
"to_bayesian_model",
"(",
"self",
")",
":",
"from",
"pgmpy",
".",
"models",
"import",
"BayesianModel",
"bm",
"=",
"BayesianModel",
"(",
")",
"var_clique_dict",
"=",
"defaultdict",
"(",
"tuple",
")",
"var_order",
"=",
"[",
"]",
"# Create a junction tree fr... | Creates a Bayesian Model which is a minimum I-Map for this markov model.
The ordering of parents may not remain constant. It would depend on the
ordering of variable in the junction tree (which is not constant) all the
time.
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> mm = MarkovModel()
>>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
>>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'),
... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'),
... ('x4', 'x7'), ('x5', 'x7')])
>>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()]
>>> mm.add_factors(*phi)
>>> bm = mm.to_bayesian_model() | [
"Creates",
"a",
"Bayesian",
"Model",
"which",
"is",
"a",
"minimum",
"I",
"-",
"Map",
"for",
"this",
"markov",
"model",
"."
] | python | train |
apple/turicreate | deps/src/libxml2-2.9.1/python/libxml2.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4858-L4863 | def schemaValidateDoc(self, ctxt):
"""Validate a document tree in memory. """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlSchemaValidateDoc(ctxt__o, self._o)
return ret | [
"def",
"schemaValidateDoc",
"(",
"self",
",",
"ctxt",
")",
":",
"if",
"ctxt",
"is",
"None",
":",
"ctxt__o",
"=",
"None",
"else",
":",
"ctxt__o",
"=",
"ctxt",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlSchemaValidateDoc",
"(",
"ctxt__o",
",",
"self",
... | Validate a document tree in memory. | [
"Validate",
"a",
"document",
"tree",
"in",
"memory",
"."
] | python | train |
kivy/python-for-android | pythonforandroid/recipe.py | https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/recipe.py#L255-L264 | def get_build_container_dir(self, arch):
'''Given the arch name, returns the directory where it will be
built.
This returns a different directory depending on what
alternative or optional dependencies are being built.
'''
dir_name = self.get_dir_name()
return join(self.ctx.build_dir, 'other_builds',
dir_name, '{}__ndk_target_{}'.format(arch, self.ctx.ndk_api)) | [
"def",
"get_build_container_dir",
"(",
"self",
",",
"arch",
")",
":",
"dir_name",
"=",
"self",
".",
"get_dir_name",
"(",
")",
"return",
"join",
"(",
"self",
".",
"ctx",
".",
"build_dir",
",",
"'other_builds'",
",",
"dir_name",
",",
"'{}__ndk_target_{}'",
"."... | Given the arch name, returns the directory where it will be
built.
This returns a different directory depending on what
alternative or optional dependencies are being built. | [
"Given",
"the",
"arch",
"name",
"returns",
"the",
"directory",
"where",
"it",
"will",
"be",
"built",
"."
] | python | train |
wesyoung/pyzyre | czmq/_czmq_ctypes.py | https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L4931-L4936 | def set_argsx(self, arguments, *args):
"""
Setup the command line arguments, the first item must be an (absolute) filename
to run. Variadic function, must be NULL terminated.
"""
return lib.zproc_set_argsx(self._as_parameter_, arguments, *args) | [
"def",
"set_argsx",
"(",
"self",
",",
"arguments",
",",
"*",
"args",
")",
":",
"return",
"lib",
".",
"zproc_set_argsx",
"(",
"self",
".",
"_as_parameter_",
",",
"arguments",
",",
"*",
"args",
")"
] | Setup the command line arguments, the first item must be an (absolute) filename
to run. Variadic function, must be NULL terminated. | [
"Setup",
"the",
"command",
"line",
"arguments",
"the",
"first",
"item",
"must",
"be",
"an",
"(",
"absolute",
")",
"filename",
"to",
"run",
".",
"Variadic",
"function",
"must",
"be",
"NULL",
"terminated",
"."
] | python | train |
thautwarm/Redy | Redy/Collections/Traversal.py | https://github.com/thautwarm/Redy/blob/8beee5c5f752edfd2754bb1e6b5f4acb016a7770/Redy/Collections/Traversal.py#L81-L94 | def each_do(action: Callable[[T], None]):
"""
>>> from Redy.Collections import Traversal, Flow
>>> lst: Iterable[int] = [1, 2, 3]
>>> def action(x: int) -> None: print(x)
>>> x = Flow(lst)[Traversal.each_do(action)]
>>> assert x.unbox is None
"""
def inner(collection: ActualIterable[T]):
for each in collection:
action(each)
return inner | [
"def",
"each_do",
"(",
"action",
":",
"Callable",
"[",
"[",
"T",
"]",
",",
"None",
"]",
")",
":",
"def",
"inner",
"(",
"collection",
":",
"ActualIterable",
"[",
"T",
"]",
")",
":",
"for",
"each",
"in",
"collection",
":",
"action",
"(",
"each",
")",... | >>> from Redy.Collections import Traversal, Flow
>>> lst: Iterable[int] = [1, 2, 3]
>>> def action(x: int) -> None: print(x)
>>> x = Flow(lst)[Traversal.each_do(action)]
>>> assert x.unbox is None | [
">>>",
"from",
"Redy",
".",
"Collections",
"import",
"Traversal",
"Flow",
">>>",
"lst",
":",
"Iterable",
"[",
"int",
"]",
"=",
"[",
"1",
"2",
"3",
"]",
">>>",
"def",
"action",
"(",
"x",
":",
"int",
")",
"-",
">",
"None",
":",
"print",
"(",
"x",
... | python | train |
eumis/pyviews | pyviews/core/xml.py | https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/xml.py#L111-L123 | def parse(self, xml_file, view_name=None) -> XmlNode:
"""Parses xml file with xml_path and returns XmlNode"""
self._setup_parser()
try:
self._view_name = view_name
self._parser.ParseFile(xml_file)
except ExpatError as error:
# pylint: disable=E1101
raise XmlError(errors.messages[error.code], ViewInfo(view_name, error.lineno))
root = self._root
self._reset()
return root | [
"def",
"parse",
"(",
"self",
",",
"xml_file",
",",
"view_name",
"=",
"None",
")",
"->",
"XmlNode",
":",
"self",
".",
"_setup_parser",
"(",
")",
"try",
":",
"self",
".",
"_view_name",
"=",
"view_name",
"self",
".",
"_parser",
".",
"ParseFile",
"(",
"xml... | Parses xml file with xml_path and returns XmlNode | [
"Parses",
"xml",
"file",
"with",
"xml_path",
"and",
"returns",
"XmlNode"
] | python | train |
PyCQA/astroid | astroid/scoped_nodes.py | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L1719-L1750 | def _is_metaclass(klass, seen=None):
""" Return if the given class can be
used as a metaclass.
"""
if klass.name == "type":
return True
if seen is None:
seen = set()
for base in klass.bases:
try:
for baseobj in base.infer():
baseobj_name = baseobj.qname()
if baseobj_name in seen:
continue
else:
seen.add(baseobj_name)
if isinstance(baseobj, bases.Instance):
# not abstract
return False
if baseobj is util.Uninferable:
continue
if baseobj is klass:
continue
if not isinstance(baseobj, ClassDef):
continue
if baseobj._type == "metaclass":
return True
if _is_metaclass(baseobj, seen):
return True
except exceptions.InferenceError:
continue
return False | [
"def",
"_is_metaclass",
"(",
"klass",
",",
"seen",
"=",
"None",
")",
":",
"if",
"klass",
".",
"name",
"==",
"\"type\"",
":",
"return",
"True",
"if",
"seen",
"is",
"None",
":",
"seen",
"=",
"set",
"(",
")",
"for",
"base",
"in",
"klass",
".",
"bases"... | Return if the given class can be
used as a metaclass. | [
"Return",
"if",
"the",
"given",
"class",
"can",
"be",
"used",
"as",
"a",
"metaclass",
"."
] | python | train |
ejeschke/ginga | ginga/web/pgw/ipg.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/web/pgw/ipg.py#L107-L114 | def load_hdu(self, hdu):
"""
Load an HDU into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_hdu(hdu)
self.set_image(image) | [
"def",
"load_hdu",
"(",
"self",
",",
"hdu",
")",
":",
"image",
"=",
"AstroImage",
".",
"AstroImage",
"(",
"logger",
"=",
"self",
".",
"logger",
")",
"image",
".",
"load_hdu",
"(",
"hdu",
")",
"self",
".",
"set_image",
"(",
"image",
")"
] | Load an HDU into the viewer. | [
"Load",
"an",
"HDU",
"into",
"the",
"viewer",
"."
] | python | train |
sentinel-hub/eo-learn | ml_tools/eolearn/ml_tools/classifier.py | https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/ml_tools/eolearn/ml_tools/classifier.py#L72-L91 | def _check_image(self, X):
"""
Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding.
"""
if (len(X.shape) < 3) or (len(X.shape) > 4):
raise ValueError('Input has to have shape [n_samples, n_pixels_y, n_pixels_x] '
'or [n_samples, n_pixels_y, n_pixels_x, n_bands].')
self._samples = X.shape[0]
self._image_size = X.shape[1:3]
if (self._image_size[0] % self.receptive_field[0]) or (self._image_size[0] % self.receptive_field[0]):
raise ValueError('Image (%d,%d) and receptive fields (%d,%d) mismatch.\n'
'Resize your image to be divisible with receptive field.'
% (self._image_size[0], self._image_size[0], self.receptive_field[0],
self.receptive_field[1])) | [
"def",
"_check_image",
"(",
"self",
",",
"X",
")",
":",
"if",
"(",
"len",
"(",
"X",
".",
"shape",
")",
"<",
"3",
")",
"or",
"(",
"len",
"(",
"X",
".",
"shape",
")",
">",
"4",
")",
":",
"raise",
"ValueError",
"(",
"'Input has to have shape [n_sample... | Checks the image size and its compatibility with classifier's receptive field.
At this moment it is required that image size = K * receptive_field. This will
be relaxed in future with the introduction of padding. | [
"Checks",
"the",
"image",
"size",
"and",
"its",
"compatibility",
"with",
"classifier",
"s",
"receptive",
"field",
".",
"At",
"this",
"moment",
"it",
"is",
"required",
"that",
"image",
"size",
"=",
"K",
"*",
"receptive_field",
".",
"This",
"will",
"be",
"re... | python | train |
UCSBarchlab/PyRTL | pyrtl/helperfuncs.py | https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/helperfuncs.py#L200-L223 | def val_to_signed_integer(value, bitwidth):
""" Return value as intrepreted as a signed integer under twos complement.
:param value: a python integer holding the value to convert
:param bitwidth: the length of the integer in bits to assume for conversion
Given an unsigned integer (not a wirevector!) covert that to a signed
integer. This is useful for printing and interpreting values which are
negative numbers in twos complement. ::
val_to_signed_integer(0xff, 8) == -1
"""
if isinstance(value, WireVector) or isinstance(bitwidth, WireVector):
raise PyrtlError('inputs must not be wirevectors')
if bitwidth < 1:
raise PyrtlError('bitwidth must be a positive integer')
neg_mask = 1 << (bitwidth - 1)
neg_part = value & neg_mask
pos_mask = neg_mask - 1
pos_part = value & pos_mask
return pos_part - neg_part | [
"def",
"val_to_signed_integer",
"(",
"value",
",",
"bitwidth",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"WireVector",
")",
"or",
"isinstance",
"(",
"bitwidth",
",",
"WireVector",
")",
":",
"raise",
"PyrtlError",
"(",
"'inputs must not be wirevectors'",
"... | Return value as intrepreted as a signed integer under twos complement.
:param value: a python integer holding the value to convert
:param bitwidth: the length of the integer in bits to assume for conversion
Given an unsigned integer (not a wirevector!) covert that to a signed
integer. This is useful for printing and interpreting values which are
negative numbers in twos complement. ::
val_to_signed_integer(0xff, 8) == -1 | [
"Return",
"value",
"as",
"intrepreted",
"as",
"a",
"signed",
"integer",
"under",
"twos",
"complement",
"."
] | python | train |
rsgalloway/grit | grit/repo/version.py | https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/version.py#L51-L60 | def iteritems(self):
"""Generator that yields Items"""
if self.type in ['blob']:
raise StopIteration
for path, mode, sha in self.tree.iteritems():
item = Item(self, sha, path, mode)
yield item
for i in item.iteritems():
yield i | [
"def",
"iteritems",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"in",
"[",
"'blob'",
"]",
":",
"raise",
"StopIteration",
"for",
"path",
",",
"mode",
",",
"sha",
"in",
"self",
".",
"tree",
".",
"iteritems",
"(",
")",
":",
"item",
"=",
"Item",
... | Generator that yields Items | [
"Generator",
"that",
"yields",
"Items"
] | python | train |
lablup/backend.ai-client-py | src/ai/backend/client/kernel.py | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/kernel.py#L307-L361 | async def upload(self, files: Sequence[Union[str, Path]],
basedir: Union[str, Path] = None,
show_progress: bool = False):
'''
Uploads the given list of files to the compute session.
You may refer them in the batch-mode execution or from the code
executed in the server afterwards.
:param files: The list of file paths in the client-side.
If the paths include directories, the location of them in the compute
session is calculated from the relative path to *basedir* and all
intermediate parent directories are automatically created if not exists.
For example, if a file path is ``/home/user/test/data.txt`` (or
``test/data.txt``) where *basedir* is ``/home/user`` (or the current
working directory is ``/home/user``), the uploaded file is located at
``/home/work/test/data.txt`` in the compute session container.
:param basedir: The directory prefix where the files reside.
The default value is the current working directory.
:param show_progress: Displays a progress bar during uploads.
'''
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key
base_path = (Path.cwd() if basedir is None
else Path(basedir).resolve())
files = [Path(file).resolve() for file in files]
total_size = 0
for file_path in files:
total_size += file_path.stat().st_size
tqdm_obj = tqdm(desc='Uploading files',
unit='bytes', unit_scale=True,
total=total_size,
disable=not show_progress)
with tqdm_obj:
attachments = []
for file_path in files:
try:
attachments.append(AttachedFile(
str(file_path.relative_to(base_path)),
ProgressReportingReader(str(file_path),
tqdm_instance=tqdm_obj),
'application/octet-stream',
))
except ValueError:
msg = 'File "{0}" is outside of the base directory "{1}".' \
.format(file_path, base_path)
raise ValueError(msg) from None
rqst = Request(self.session,
'POST', '/kernel/{}/upload'.format(self.kernel_id),
params=params)
rqst.attach_files(attachments)
async with rqst.fetch() as resp:
return resp | [
"async",
"def",
"upload",
"(",
"self",
",",
"files",
":",
"Sequence",
"[",
"Union",
"[",
"str",
",",
"Path",
"]",
"]",
",",
"basedir",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
"=",
"None",
",",
"show_progress",
":",
"bool",
"=",
"False",
")",
... | Uploads the given list of files to the compute session.
You may refer them in the batch-mode execution or from the code
executed in the server afterwards.
:param files: The list of file paths in the client-side.
If the paths include directories, the location of them in the compute
session is calculated from the relative path to *basedir* and all
intermediate parent directories are automatically created if not exists.
For example, if a file path is ``/home/user/test/data.txt`` (or
``test/data.txt``) where *basedir* is ``/home/user`` (or the current
working directory is ``/home/user``), the uploaded file is located at
``/home/work/test/data.txt`` in the compute session container.
:param basedir: The directory prefix where the files reside.
The default value is the current working directory.
:param show_progress: Displays a progress bar during uploads. | [
"Uploads",
"the",
"given",
"list",
"of",
"files",
"to",
"the",
"compute",
"session",
".",
"You",
"may",
"refer",
"them",
"in",
"the",
"batch",
"-",
"mode",
"execution",
"or",
"from",
"the",
"code",
"executed",
"in",
"the",
"server",
"afterwards",
"."
] | python | train |
10gen/mongo-orchestration | mongo_orchestration/process.py | https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/process.py#L81-L92 | def __check_port(self, port):
"""check port status
return True if port is free, False else
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((_host(), port))
return True
except socket.error:
return False
finally:
s.close() | [
"def",
"__check_port",
"(",
"self",
",",
"port",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"s",
".",
"bind",
"(",
"(",
"_host",
"(",
")",
",",
"port",
")",
")",... | check port status
return True if port is free, False else | [
"check",
"port",
"status",
"return",
"True",
"if",
"port",
"is",
"free",
"False",
"else"
] | python | train |
yougov/mongo-connector | mongo_connector/oplog_manager.py | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L736-L754 | def _get_oplog_timestamp(self, newest_entry):
"""Return the timestamp of the latest or earliest entry in the oplog.
"""
sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING
curr = (
self.oplog.find({"op": {"$ne": "n"}}).sort("$natural", sort_order).limit(-1)
)
try:
ts = next(curr)["ts"]
except StopIteration:
LOG.debug("OplogThread: oplog is empty.")
return None
LOG.debug(
"OplogThread: %s oplog entry has timestamp %s."
% ("Newest" if newest_entry else "Oldest", ts)
)
return ts | [
"def",
"_get_oplog_timestamp",
"(",
"self",
",",
"newest_entry",
")",
":",
"sort_order",
"=",
"pymongo",
".",
"DESCENDING",
"if",
"newest_entry",
"else",
"pymongo",
".",
"ASCENDING",
"curr",
"=",
"(",
"self",
".",
"oplog",
".",
"find",
"(",
"{",
"\"op\"",
... | Return the timestamp of the latest or earliest entry in the oplog. | [
"Return",
"the",
"timestamp",
"of",
"the",
"latest",
"or",
"earliest",
"entry",
"in",
"the",
"oplog",
"."
] | python | train |
shoebot/shoebot | shoebot/core/cairo_sink.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/core/cairo_sink.py#L64-L78 | def _output_file(self, frame):
"""
If filename was used output a filename, along with multifile
numbered filenames will be used.
If buff was specified it is returned.
:return: Output buff or filename.
"""
if self.buff:
return self.buff
elif self.multifile:
return self.file_root + "_%03d" % frame + self.file_ext
else:
return self.filename | [
"def",
"_output_file",
"(",
"self",
",",
"frame",
")",
":",
"if",
"self",
".",
"buff",
":",
"return",
"self",
".",
"buff",
"elif",
"self",
".",
"multifile",
":",
"return",
"self",
".",
"file_root",
"+",
"\"_%03d\"",
"%",
"frame",
"+",
"self",
".",
"f... | If filename was used output a filename, along with multifile
numbered filenames will be used.
If buff was specified it is returned.
:return: Output buff or filename. | [
"If",
"filename",
"was",
"used",
"output",
"a",
"filename",
"along",
"with",
"multifile",
"numbered",
"filenames",
"will",
"be",
"used",
"."
] | python | valid |
armet/python-armet | armet/query/parser.py | https://github.com/armet/python-armet/blob/d61eca9082256cb1e7f7f3c7f2fbc4b697157de7/armet/query/parser.py#L284-L292 | def parse_directive(key):
"""
Takes a key of type (foo:bar) and returns either the key and the
directive, or the key and None (for no directive.)
"""
if constants.DIRECTIVE in key:
return key.split(constants.DIRECTIVE, 1)
else:
return key, None | [
"def",
"parse_directive",
"(",
"key",
")",
":",
"if",
"constants",
".",
"DIRECTIVE",
"in",
"key",
":",
"return",
"key",
".",
"split",
"(",
"constants",
".",
"DIRECTIVE",
",",
"1",
")",
"else",
":",
"return",
"key",
",",
"None"
] | Takes a key of type (foo:bar) and returns either the key and the
directive, or the key and None (for no directive.) | [
"Takes",
"a",
"key",
"of",
"type",
"(",
"foo",
":",
"bar",
")",
"and",
"returns",
"either",
"the",
"key",
"and",
"the",
"directive",
"or",
"the",
"key",
"and",
"None",
"(",
"for",
"no",
"directive",
".",
")"
] | python | valid |
GNS3/gns3-server | gns3server/compute/base_node.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/base_node.py#L303-L327 | def close(self):
"""
Close the node process.
"""
if self._closed:
return False
log.info("{module}: '{name}' [{id}]: is closing".format(module=self.manager.module_name,
name=self.name,
id=self.id))
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
if self._wrap_console:
self._manager.port_manager.release_tcp_port(self._internal_console_port, self._project)
self._internal_console_port = None
if self._aux:
self._manager.port_manager.release_tcp_port(self._aux, self._project)
self._aux = None
self._closed = True
return True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closed",
":",
"return",
"False",
"log",
".",
"info",
"(",
"\"{module}: '{name}' [{id}]: is closing\"",
".",
"format",
"(",
"module",
"=",
"self",
".",
"manager",
".",
"module_name",
",",
"name",
"... | Close the node process. | [
"Close",
"the",
"node",
"process",
"."
] | python | train |
nicholasbishop/shaderdef | tools/version_util.py | https://github.com/nicholasbishop/shaderdef/blob/b68a9faf4c7cfa61e32a2e49eb2cae2f2e2b1f78/tools/version_util.py#L19-L28 | def load_version_as_string():
"""Get the current version from version.py as a string."""
with open(VERSION_PATH, 'r') as rfile:
contents = rfile.read().strip()
_, version = contents.split('=')
version = version.strip()
# Remove quotes
return version.strip('"\'') | [
"def",
"load_version_as_string",
"(",
")",
":",
"with",
"open",
"(",
"VERSION_PATH",
",",
"'r'",
")",
"as",
"rfile",
":",
"contents",
"=",
"rfile",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"_",
",",
"version",
"=",
"contents",
".",
"split",
"("... | Get the current version from version.py as a string. | [
"Get",
"the",
"current",
"version",
"from",
"version",
".",
"py",
"as",
"a",
"string",
"."
] | python | train |
Kronuz/pyScss | scss/types.py | https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/types.py#L442-L483 | def _add_sub(self, other, op):
"""Implements both addition and subtraction."""
if not isinstance(other, Number):
return NotImplemented
# If either side is unitless, inherit the other side's units. Skip all
# the rest of the conversion math, too.
if self.is_unitless or other.is_unitless:
return Number(
op(self.value, other.value),
unit_numer=self.unit_numer or other.unit_numer,
unit_denom=self.unit_denom or other.unit_denom,
)
# Likewise, if either side is zero, it can auto-cast to any units
if self.value == 0:
return Number(
op(self.value, other.value),
unit_numer=other.unit_numer,
unit_denom=other.unit_denom,
)
elif other.value == 0:
return Number(
op(self.value, other.value),
unit_numer=self.unit_numer,
unit_denom=self.unit_denom,
)
# Reduce both operands to the same units
left = self.to_base_units()
right = other.to_base_units()
if left.unit_numer != right.unit_numer or left.unit_denom != right.unit_denom:
raise ValueError("Can't reconcile units: %r and %r" % (self, other))
new_amount = op(left.value, right.value)
# Convert back to the left side's units
if left.value != 0:
new_amount = new_amount * self.value / left.value
return Number(new_amount, unit_numer=self.unit_numer, unit_denom=self.unit_denom) | [
"def",
"_add_sub",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"Number",
")",
":",
"return",
"NotImplemented",
"# If either side is unitless, inherit the other side's units. Skip all",
"# the rest of the conversion math, ... | Implements both addition and subtraction. | [
"Implements",
"both",
"addition",
"and",
"subtraction",
"."
] | python | train |
sci-bots/mpm | mpm/api.py | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L578-L643 | def installed_plugins(only_conda=False):
'''
.. versionadded:: 0.20
Parameters
----------
only_conda : bool, optional
Only consider plugins that are installed **as Conda packages**.
.. versionadded:: 0.22
Returns
-------
list
List of properties corresponding to each available plugin that is
**installed**.
.. versionchanged:: 0.22
If :data:`only_conda` is ``False``, a plugin is assumed to be
*installed* if it is present in the
``share/microdrop/plugins/available`` directory **and** is a
**real** directory (i.e., not a link).
If :data:`only_conda` is ``True``, only properties for plugins that
are installed **as Conda packages** are returned.
'''
available_path = MICRODROP_CONDA_SHARE.joinpath('plugins', 'available')
if not available_path.isdir():
return []
installed_plugins_ = []
for plugin_path_i in available_path.dirs():
# Only process plugin directory if it is *not a link*.
if not _islinklike(plugin_path_i):
# Read plugin package info from `properties.yml` file.
try:
with plugin_path_i.joinpath('properties.yml').open('r') as input_:
properties_i = yaml.load(input_.read())
except:
logger.info('[warning] Could not read package info: `%s`',
plugin_path_i.joinpath('properties.yml'),
exc_info=True)
else:
properties_i['path'] = plugin_path_i.realpath()
installed_plugins_.append(properties_i)
if only_conda:
# Only consider plugins that are installed **as Conda packages**.
try:
package_names = [plugin_i['package_name']
for plugin_i in installed_plugins_]
conda_package_infos = ch.package_version(package_names,
verbose=False)
except ch.PackageNotFound, exception:
# At least one specified plugin package name did not correspond to an
# installed Conda package.
logger.warning(str(exception))
conda_package_infos = exception.available
# Extract name from each Conda plugin package.
installed_package_names = set([package_i['name']
for package_i in conda_package_infos])
return [plugin_i for plugin_i in installed_plugins_
if plugin_i['package_name'] in installed_package_names]
else:
# Return all available plugins.
return installed_plugins_ | [
"def",
"installed_plugins",
"(",
"only_conda",
"=",
"False",
")",
":",
"available_path",
"=",
"MICRODROP_CONDA_SHARE",
".",
"joinpath",
"(",
"'plugins'",
",",
"'available'",
")",
"if",
"not",
"available_path",
".",
"isdir",
"(",
")",
":",
"return",
"[",
"]",
... | .. versionadded:: 0.20
Parameters
----------
only_conda : bool, optional
Only consider plugins that are installed **as Conda packages**.
.. versionadded:: 0.22
Returns
-------
list
List of properties corresponding to each available plugin that is
**installed**.
.. versionchanged:: 0.22
If :data:`only_conda` is ``False``, a plugin is assumed to be
*installed* if it is present in the
``share/microdrop/plugins/available`` directory **and** is a
**real** directory (i.e., not a link).
If :data:`only_conda` is ``True``, only properties for plugins that
are installed **as Conda packages** are returned. | [
"..",
"versionadded",
"::",
"0",
".",
"20"
] | python | train |
OCA/vertical-hotel | hotel_reservation/models/hotel_reservation.py | https://github.com/OCA/vertical-hotel/blob/a01442e92b5ea1fda7fb9e6180b3211e8749a35a/hotel_reservation/models/hotel_reservation.py#L314-L401 | def confirmed_reservation(self):
"""
This method create a new record set for hotel room reservation line
-------------------------------------------------------------------
@param self: The object pointer
@return: new record set for hotel room reservation line.
"""
reservation_line_obj = self.env['hotel.room.reservation.line']
vals = {}
for reservation in self:
reserv_checkin = datetime.strptime(reservation.checkin, dt)
reserv_checkout = datetime.strptime(reservation.checkout, dt)
room_bool = False
for line_id in reservation.reservation_line:
for room_id in line_id.reserve:
if room_id.room_reservation_line_ids:
for reserv in room_id.room_reservation_line_ids.\
search([('status', 'in', ('confirm', 'done')),
('room_id', '=', room_id.id)]):
check_in = datetime.strptime(reserv.check_in, dt)
check_out = datetime.strptime(reserv.check_out, dt)
if check_in <= reserv_checkin <= check_out:
room_bool = True
if check_in <= reserv_checkout <= check_out:
room_bool = True
if reserv_checkin <= check_in and \
reserv_checkout >= check_out:
room_bool = True
mytime = "%Y-%m-%d"
r_checkin = datetime.strptime(reservation.checkin,
dt).date()
r_checkin = r_checkin.strftime(mytime)
r_checkout = datetime.\
strptime(reservation.checkout, dt).date()
r_checkout = r_checkout.strftime(mytime)
check_intm = datetime.strptime(reserv.check_in,
dt).date()
check_outtm = datetime.strptime(reserv.check_out,
dt).date()
check_intm = check_intm.strftime(mytime)
check_outtm = check_outtm.strftime(mytime)
range1 = [r_checkin, r_checkout]
range2 = [check_intm, check_outtm]
overlap_dates = self.check_overlap(*range1) \
& self.check_overlap(*range2)
overlap_dates = [datetime.strftime(dates,
'%d/%m/%Y') for
dates in overlap_dates]
if room_bool:
raise ValidationError(_('You tried to Confirm '
'Reservation with room'
' those already '
'reserved in this '
'Reservation Period. '
'Overlap Dates are '
'%s') % overlap_dates)
else:
self.state = 'confirm'
vals = {'room_id': room_id.id,
'check_in': reservation.checkin,
'check_out': reservation.checkout,
'state': 'assigned',
'reservation_id': reservation.id,
}
room_id.write({'isroom': False,
'status': 'occupied'})
else:
self.state = 'confirm'
vals = {'room_id': room_id.id,
'check_in': reservation.checkin,
'check_out': reservation.checkout,
'state': 'assigned',
'reservation_id': reservation.id,
}
room_id.write({'isroom': False,
'status': 'occupied'})
else:
self.state = 'confirm'
vals = {'room_id': room_id.id,
'check_in': reservation.checkin,
'check_out': reservation.checkout,
'state': 'assigned',
'reservation_id': reservation.id,
}
room_id.write({'isroom': False,
'status': 'occupied'})
reservation_line_obj.create(vals)
return True | [
"def",
"confirmed_reservation",
"(",
"self",
")",
":",
"reservation_line_obj",
"=",
"self",
".",
"env",
"[",
"'hotel.room.reservation.line'",
"]",
"vals",
"=",
"{",
"}",
"for",
"reservation",
"in",
"self",
":",
"reserv_checkin",
"=",
"datetime",
".",
"strptime",... | This method create a new record set for hotel room reservation line
-------------------------------------------------------------------
@param self: The object pointer
@return: new record set for hotel room reservation line. | [
"This",
"method",
"create",
"a",
"new",
"record",
"set",
"for",
"hotel",
"room",
"reservation",
"line",
"-------------------------------------------------------------------"
] | python | train |
greyli/flask-dropzone | flask_dropzone/__init__.py | https://github.com/greyli/flask-dropzone/blob/eb1d5ef16d8f83a12e6fed1bb9412a0c12c6d584/flask_dropzone/__init__.py#L325-L357 | def create(action='', csrf=False, action_view='', **kwargs):
"""Create a Dropzone form with given action.
.. versionchanged:: 1.4.2
Added ``csrf`` parameter to enable CSRF protect.
.. versionchanged:: 1.4.3
Added ``action`` parameter to replace ``action_view``, ``action_view`` was deprecated now.
.. versionchanged:: 1.5.0
If ``DROPZONE_IN_FORM`` set to ``True``, create ``<div>`` instead of ``<form>``.
:param action: The action attribute in ``<form>``, pass the url which handle uploads.
:param csrf: Enable CSRF protect or not, same with ``DROPZONE_ENABLE_CSRF``.
:param action_view: The view which handle the post data, deprecated since 1.4.2.
"""
if current_app.config['DROPZONE_IN_FORM']:
return Markup('<div class="dropzone" id="myDropzone"></div>')
if action:
action_url = get_url(action)
else:
action_url = url_for(action_view, **kwargs)
if csrf or current_app.config['DROPZONE_ENABLE_CSRF']:
if 'csrf' not in current_app.extensions:
raise RuntimeError("CSRFProtect is not initialized. It's required to enable CSRF protect, \
see docs for more details.")
csrf_field = render_template_string('<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>')
else:
csrf_field = ''
return Markup('''<form action="%s" method="post" class="dropzone" id="myDropzone"
enctype="multipart/form-data">%s</form>''' % (action_url, csrf_field)) | [
"def",
"create",
"(",
"action",
"=",
"''",
",",
"csrf",
"=",
"False",
",",
"action_view",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"current_app",
".",
"config",
"[",
"'DROPZONE_IN_FORM'",
"]",
":",
"return",
"Markup",
"(",
"'<div class=\"dropz... | Create a Dropzone form with given action.
.. versionchanged:: 1.4.2
Added ``csrf`` parameter to enable CSRF protect.
.. versionchanged:: 1.4.3
Added ``action`` parameter to replace ``action_view``, ``action_view`` was deprecated now.
.. versionchanged:: 1.5.0
If ``DROPZONE_IN_FORM`` set to ``True``, create ``<div>`` instead of ``<form>``.
:param action: The action attribute in ``<form>``, pass the url which handle uploads.
:param csrf: Enable CSRF protect or not, same with ``DROPZONE_ENABLE_CSRF``.
:param action_view: The view which handle the post data, deprecated since 1.4.2. | [
"Create",
"a",
"Dropzone",
"form",
"with",
"given",
"action",
"."
] | python | train |
tarbell-project/tarbell | tarbell/utils.py | https://github.com/tarbell-project/tarbell/blob/818b3d3623dcda5a08a5bf45550219719b0f0365/tarbell/utils.py#L34-L44 | def list_get(l, idx, default=None):
"""
Get from a list with an optional default value.
"""
try:
if l[idx]:
return l[idx]
else:
return default
except IndexError:
return default | [
"def",
"list_get",
"(",
"l",
",",
"idx",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"if",
"l",
"[",
"idx",
"]",
":",
"return",
"l",
"[",
"idx",
"]",
"else",
":",
"return",
"default",
"except",
"IndexError",
":",
"return",
"default"
] | Get from a list with an optional default value. | [
"Get",
"from",
"a",
"list",
"with",
"an",
"optional",
"default",
"value",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/vmware.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L3524-L3610 | def list_hbas(kwargs=None, call=None):
'''
List all HBAs for each host system; or all HBAs for a specified host
system; or HBAs of specified type for each host system; or HBAs of
specified type for a specified host system in this VMware environment
.. note::
You can specify type as either ``parallel``, ``iscsi``, ``block``
or ``fibre``.
To list all HBAs for each host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config
To list all HBAs for a specified host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config host="hostSystemName"
To list HBAs of specified type for each host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config type="HBAType"
To list HBAs of specified type for a specified host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config host="hostSystemName" type="HBAtype"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_hbas function must be called with '
'-f or --function.'
)
ret = {}
hba_type = kwargs.get('type').lower() if kwargs and 'type' in kwargs else None
host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None
host_properties = [
"name",
"config.storageDevice.hostBusAdapter"
]
if hba_type and hba_type not in ["parallel", "block", "iscsi", "fibre"]:
raise SaltCloudSystemExit(
'Specified hba type {0} currently not supported.'.format(hba_type)
)
host_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.HostSystem, host_properties)
for host in host_list:
ret[host['name']] = {}
for hba in host['config.storageDevice.hostBusAdapter']:
hba_spec = {
'driver': hba.driver,
'status': hba.status,
'type': type(hba).__name__.rsplit(".", 1)[1]
}
if hba_type:
if isinstance(hba, _get_hba_type(hba_type)):
if hba.model in ret[host['name']]:
ret[host['name']][hba.model][hba.device] = hba_spec
else:
ret[host['name']][hba.model] = {hba.device: hba_spec}
else:
if hba.model in ret[host['name']]:
ret[host['name']][hba.model][hba.device] = hba_spec
else:
ret[host['name']][hba.model] = {hba.device: hba_spec}
if host['name'] == host_name:
return {'HBAs by Host': {host_name: ret[host_name]}}
return {'HBAs by Host': ret} | [
"def",
"list_hbas",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_hbas function must be called with '",
"'-f or --function.'",
")",
"ret",
"=",
"{",
"}",
"hb... | List all HBAs for each host system; or all HBAs for a specified host
system; or HBAs of specified type for each host system; or HBAs of
specified type for a specified host system in this VMware environment
.. note::
You can specify type as either ``parallel``, ``iscsi``, ``block``
or ``fibre``.
To list all HBAs for each host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config
To list all HBAs for a specified host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config host="hostSystemName"
To list HBAs of specified type for each host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config type="HBAType"
To list HBAs of specified type for a specified host system:
CLI Example:
.. code-block:: bash
salt-cloud -f list_hbas my-vmware-config host="hostSystemName" type="HBAtype" | [
"List",
"all",
"HBAs",
"for",
"each",
"host",
"system",
";",
"or",
"all",
"HBAs",
"for",
"a",
"specified",
"host",
"system",
";",
"or",
"HBAs",
"of",
"specified",
"type",
"for",
"each",
"host",
"system",
";",
"or",
"HBAs",
"of",
"specified",
"type",
"f... | python | train |
pygobject/pgi | pgi/codegen/utils.py | https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/codegen/utils.py#L68-L76 | def request_name(self, name):
"""Request a name, might return the name or a similar one if already
used or reserved
"""
while name in self._blacklist:
name += "_"
self._blacklist.add(name)
return name | [
"def",
"request_name",
"(",
"self",
",",
"name",
")",
":",
"while",
"name",
"in",
"self",
".",
"_blacklist",
":",
"name",
"+=",
"\"_\"",
"self",
".",
"_blacklist",
".",
"add",
"(",
"name",
")",
"return",
"name"
] | Request a name, might return the name or a similar one if already
used or reserved | [
"Request",
"a",
"name",
"might",
"return",
"the",
"name",
"or",
"a",
"similar",
"one",
"if",
"already",
"used",
"or",
"reserved"
] | python | train |
PmagPy/PmagPy | programs/demag_gui.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L1835-L1873 | def plot_high_levels_data(self):
"""
Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, and if interpretation editor is open it calls the
interpretation editor to have it draw the same things.
"""
# self.toolbar4.home()
high_level = self.level_box.GetValue()
self.UPPER_LEVEL_NAME = self.level_names.GetValue()
self.UPPER_LEVEL_MEAN = self.mean_type_box.GetValue()
draw_net(self.high_level_eqarea)
what_is_it = self.level_box.GetValue()+": "+self.level_names.GetValue()
self.high_level_eqarea.text(-1.2, 1.15, what_is_it, {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
if self.ie_open:
self.ie.draw_net()
self.ie.write(what_is_it)
# plot elements directions
self.plot_high_level_elements()
# plot elements means
self.plot_high_level_means()
# update high level stats after plotting in case of change
self.update_high_level_stats()
# check sample orietation
if self.check_orient_on:
self.calc_and_plot_sample_orient_check()
self.canvas4.draw()
if self.ie_open:
self.ie.draw() | [
"def",
"plot_high_levels_data",
"(",
"self",
")",
":",
"# self.toolbar4.home()",
"high_level",
"=",
"self",
".",
"level_box",
".",
"GetValue",
"(",
")",
"self",
".",
"UPPER_LEVEL_NAME",
"=",
"self",
".",
"level_names",
".",
"GetValue",
"(",
")",
"self",
... | Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, and if interpretation editor is open it calls the
interpretation editor to have it draw the same things. | [
"Complicated",
"function",
"that",
"draws",
"the",
"high",
"level",
"mean",
"plot",
"on",
"canvas4",
"draws",
"all",
"specimen",
"sample",
"or",
"site",
"interpretations",
"according",
"to",
"the",
"UPPER_LEVEL_SHOW",
"variable",
"draws",
"the",
"fisher",
"mean",
... | python | train |
dslackw/slpkg | slpkg/main.py | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/main.py#L166-L172 | def command_repo_remove(self):
"""Remove custom repositories
"""
if len(self.args) == 2 and self.args[0] == "repo-remove":
Repo().remove(self.args[1])
else:
usage("") | [
"def",
"command_repo_remove",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"args",
")",
"==",
"2",
"and",
"self",
".",
"args",
"[",
"0",
"]",
"==",
"\"repo-remove\"",
":",
"Repo",
"(",
")",
".",
"remove",
"(",
"self",
".",
"args",
"[",
"... | Remove custom repositories | [
"Remove",
"custom",
"repositories"
] | python | train |
ska-sa/montblanc | montblanc/examples/standalone.py | https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/standalone.py#L52-L57 | def point_lm(self, context):
""" Supply point source lm coordinates to montblanc """
# Shape (npsrc, 2)
(ls, us), _ = context.array_extents(context.name)
return np.asarray(lm_coords[ls:us], dtype=context.dtype) | [
"def",
"point_lm",
"(",
"self",
",",
"context",
")",
":",
"# Shape (npsrc, 2)",
"(",
"ls",
",",
"us",
")",
",",
"_",
"=",
"context",
".",
"array_extents",
"(",
"context",
".",
"name",
")",
"return",
"np",
".",
"asarray",
"(",
"lm_coords",
"[",
"ls",
... | Supply point source lm coordinates to montblanc | [
"Supply",
"point",
"source",
"lm",
"coordinates",
"to",
"montblanc"
] | python | train |
Microsoft/knack | knack/commands.py | https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/commands.py#L229-L255 | def create_command(self, name, operation, **kwargs):
""" Constructs the command object that can then be added to the command table """
if not isinstance(operation, six.string_types):
raise ValueError("Operation must be a string. Got '{}'".format(operation))
name = ' '.join(name.split())
client_factory = kwargs.get('client_factory', None)
def _command_handler(command_args):
op = CLICommandsLoader._get_op_handler(operation)
client = client_factory(command_args) if client_factory else None
result = op(client, **command_args) if client else op(**command_args)
return result
def arguments_loader():
return list(extract_args_from_signature(CLICommandsLoader._get_op_handler(operation),
excluded_params=self.excluded_command_handler_args))
def description_loader():
return extract_full_summary_from_signature(CLICommandsLoader._get_op_handler(operation))
kwargs['arguments_loader'] = arguments_loader
kwargs['description_loader'] = description_loader
cmd = self.command_cls(self.cli_ctx, name, _command_handler, **kwargs)
return cmd | [
"def",
"create_command",
"(",
"self",
",",
"name",
",",
"operation",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"operation",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"Operation must be a string. Got '{}'\... | Constructs the command object that can then be added to the command table | [
"Constructs",
"the",
"command",
"object",
"that",
"can",
"then",
"be",
"added",
"to",
"the",
"command",
"table"
] | python | train |
smira/py-numa | numa.py | https://github.com/smira/py-numa/blob/eb38979c61028eb9422a4ad1eda0387cd93ea390/numa.py#L320-L331 | def get_membind():
"""
Returns the mask of nodes from which memory can currently be allocated.
@return: node mask
@rtype: C{set}
"""
bitmask = libnuma.numa_get_membind()
nodemask = nodemask_t()
libnuma.copy_bitmask_to_nodemask(bitmask, byref(nodemask))
libnuma.numa_bitmask_free(bitmask)
return numa_nodemask_to_set(nodemask) | [
"def",
"get_membind",
"(",
")",
":",
"bitmask",
"=",
"libnuma",
".",
"numa_get_membind",
"(",
")",
"nodemask",
"=",
"nodemask_t",
"(",
")",
"libnuma",
".",
"copy_bitmask_to_nodemask",
"(",
"bitmask",
",",
"byref",
"(",
"nodemask",
")",
")",
"libnuma",
".",
... | Returns the mask of nodes from which memory can currently be allocated.
@return: node mask
@rtype: C{set} | [
"Returns",
"the",
"mask",
"of",
"nodes",
"from",
"which",
"memory",
"can",
"currently",
"be",
"allocated",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/bgp.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/bgp.py#L154-L190 | def remove_bgp(self, **kwargs):
"""Remove BGP process completely.
Args:
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.remove_bgp(rbridge_id='225')
"""
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
disable_args = dict(rbridge_id=rbridge_id, local_as='65000')
config = getattr(self._rbridge,
'rbridge_id_router_router_bgp_router_bgp_'
'attributes_local_as')(**disable_args)
bgp = config.find('.//*.//*.//*')
bgp.remove(bgp.find('.//*'))
bgp.set('operation', 'delete')
return callback(config) | [
"def",
"remove_bgp",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"rbridge_id",
"=",
"kwargs",
".",
"pop",
"(",
"'rbridge_id'",
",",
"'1'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"disable_a... | Remove BGP process completely.
Args:
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.remove_bgp(rbridge_id='225') | [
"Remove",
"BGP",
"process",
"completely",
"."
] | python | train |
google-research/batch-ppo | agents/parts/normalize.py | https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L81-L108 | def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value)) | [
"def",
"update",
"(",
"self",
",",
"value",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/update'",
")",
":",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_mean",
".",
"shape",
".",
"ndims",
":",
... | Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor. | [
"Update",
"the",
"mean",
"and",
"variance",
"estimates",
"."
] | python | train |
GNS3/gns3-server | gns3server/compute/iou/iou_vm.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/iou/iou_vm.py#L447-L477 | def _push_configs_to_nvram(self):
"""
Push the startup-config and private-config content to the NVRAM.
"""
startup_config_content = self.startup_config_content
if startup_config_content:
nvram_file = self._nvram_file()
try:
if not os.path.exists(nvram_file):
open(nvram_file, "a").close()
nvram_content = None
else:
with open(nvram_file, "rb") as file:
nvram_content = file.read()
except OSError as e:
raise IOUError("Cannot read nvram file {}: {}".format(nvram_file, e))
startup_config_content = startup_config_content.encode("utf-8")
private_config_content = self.private_config_content
if private_config_content is not None:
private_config_content = private_config_content.encode("utf-8")
try:
nvram_content = nvram_import(nvram_content, startup_config_content, private_config_content, self.nvram)
except ValueError as e:
raise IOUError("Cannot push configs to nvram {}: {}".format(nvram_file, e))
try:
with open(nvram_file, "wb") as file:
file.write(nvram_content)
except OSError as e:
raise IOUError("Cannot write nvram file {}: {}".format(nvram_file, e)) | [
"def",
"_push_configs_to_nvram",
"(",
"self",
")",
":",
"startup_config_content",
"=",
"self",
".",
"startup_config_content",
"if",
"startup_config_content",
":",
"nvram_file",
"=",
"self",
".",
"_nvram_file",
"(",
")",
"try",
":",
"if",
"not",
"os",
".",
"path"... | Push the startup-config and private-config content to the NVRAM. | [
"Push",
"the",
"startup",
"-",
"config",
"and",
"private",
"-",
"config",
"content",
"to",
"the",
"NVRAM",
"."
] | python | train |
RyanBalfanz/django-smsish | smsish/sms/__init__.py | https://github.com/RyanBalfanz/django-smsish/blob/4d450e3d217cea9e373f16c5e4f0beb3218bd5c9/smsish/sms/__init__.py#L34-L50 | def send_sms(message, from_number, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
https://github.com/django/django/blob/master/django/core/mail/__init__.py#L40
"""
connection = connection or get_sms_connection(username=auth_user, password=auth_password, fail_silently=fail_silently)
mail = SMSMessage(message, from_number, recipient_list, connection=connection)
return mail.send() | [
"def",
"send_sms",
"(",
"message",
",",
"from_number",
",",
"recipient_list",
",",
"fail_silently",
"=",
"False",
",",
"auth_user",
"=",
"None",
",",
"auth_password",
"=",
"None",
",",
"connection",
"=",
"None",
")",
":",
"connection",
"=",
"connection",
"or... | Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
https://github.com/django/django/blob/master/django/core/mail/__init__.py#L40 | [
"Easy",
"wrapper",
"for",
"sending",
"a",
"single",
"message",
"to",
"a",
"recipient",
"list",
".",
"All",
"members",
"of",
"the",
"recipient",
"list",
"will",
"see",
"the",
"other",
"recipients",
"in",
"the",
"To",
"field",
"."
] | python | train |
knipknap/exscript | Exscript/servers/httpd.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/servers/httpd.py#L90-L148 | def _require_authenticate(func):
'''A decorator to add digest authorization checks to HTTP Request Handlers'''
def wrapped(self):
if not hasattr(self, 'authenticated'):
self.authenticated = None
if self.authenticated:
return func(self)
auth = self.headers.get(u'Authorization')
if auth is None:
msg = u"You are not allowed to access this page. Please login first!"
return _error_401(self, msg)
token, fields = auth.split(' ', 1)
if token != 'Digest':
return _error_401(self, 'Unsupported authentication type')
# Check the header fields of the request.
cred = parse_http_list(fields)
cred = parse_keqv_list(cred)
keys = u'realm', u'username', u'nonce', u'uri', u'response'
if not all(cred.get(key) for key in keys):
return _error_401(self, 'Incomplete authentication header')
if cred['realm'] != self.server.realm:
return _error_401(self, 'Incorrect realm')
if 'qop' in cred and ('nc' not in cred or 'cnonce' not in cred):
return _error_401(self, 'qop with missing nc or cnonce')
# Check the username.
username = cred['username']
password = self.server.get_password(username)
if not username or password is None:
return _error_401(self, 'Invalid username or password')
# Check the digest string.
location = u'%s:%s' % (self.command, self.path)
location = md5hex(location.encode('utf8'))
pwhash = md5hex('%s:%s:%s' % (username, self.server.realm, password))
if 'qop' in cred:
info = (cred['nonce'],
cred['nc'],
cred['cnonce'],
cred['qop'],
location)
else:
info = cred['nonce'], location
expect = u'%s:%s' % (pwhash, ':'.join(info))
expect = md5hex(expect.encode('utf8'))
if expect != cred['response']:
return _error_401(self, 'Invalid username or password')
# Success!
self.authenticated = True
return func(self)
return wrapped | [
"def",
"_require_authenticate",
"(",
"func",
")",
":",
"def",
"wrapped",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'authenticated'",
")",
":",
"self",
".",
"authenticated",
"=",
"None",
"if",
"self",
".",
"authenticated",
":",
"re... | A decorator to add digest authorization checks to HTTP Request Handlers | [
"A",
"decorator",
"to",
"add",
"digest",
"authorization",
"checks",
"to",
"HTTP",
"Request",
"Handlers"
] | python | train |
wummel/linkchecker | linkcheck/configuration/__init__.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/configuration/__init__.py#L563-L588 | def read_kioslaverc (kde_config_dir):
"""Read kioslaverc into data dictionary."""
data = {}
filename = os.path.join(kde_config_dir, "kioslaverc")
with open(filename) as fd:
# First read all lines into dictionary since they can occur
# in any order.
for line in fd:
line = line.rstrip()
if line.startswith('['):
in_proxy_settings = line.startswith("[Proxy Settings]")
elif in_proxy_settings:
if '=' not in line:
continue
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
if not key:
continue
# trim optional localization
key = loc_ro.sub("", key).strip()
if not key:
continue
add_kde_setting(key, value, data)
resolve_kde_settings(data)
return data | [
"def",
"read_kioslaverc",
"(",
"kde_config_dir",
")",
":",
"data",
"=",
"{",
"}",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"kde_config_dir",
",",
"\"kioslaverc\"",
")",
"with",
"open",
"(",
"filename",
")",
"as",
"fd",
":",
"# First read all l... | Read kioslaverc into data dictionary. | [
"Read",
"kioslaverc",
"into",
"data",
"dictionary",
"."
] | python | train |
SKA-ScienceDataProcessor/integration-prototype | sip/examples/flask_processing_controller/app/old.db/init.py | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/init.py#L41-L53 | def _scheduling_block_config(num_blocks=5, start_sbi_id=0, start_pb_id=0,
project='sip'):
"""Return a Scheduling Block Configuration dictionary"""
pb_id = start_pb_id
for sb_id, sbi_id in _scheduling_block_ids(num_blocks, start_sbi_id,
project):
sub_array_id = 'subarray-{:02d}'.format(random.choice(range(5)))
config = dict(id=sbi_id,
sched_block_id=sb_id,
sub_array_id=sub_array_id,
processing_blocks=_generate_processing_blocks(pb_id))
pb_id += len(config['processing_blocks'])
yield config | [
"def",
"_scheduling_block_config",
"(",
"num_blocks",
"=",
"5",
",",
"start_sbi_id",
"=",
"0",
",",
"start_pb_id",
"=",
"0",
",",
"project",
"=",
"'sip'",
")",
":",
"pb_id",
"=",
"start_pb_id",
"for",
"sb_id",
",",
"sbi_id",
"in",
"_scheduling_block_ids",
"(... | Return a Scheduling Block Configuration dictionary | [
"Return",
"a",
"Scheduling",
"Block",
"Configuration",
"dictionary"
] | python | train |
liampauling/betfair | betfairlightweight/endpoints/historic.py | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/historic.py#L63-L89 | def get_data_size(self, sport, plan, from_day, from_month, from_year, to_day, to_month, to_year, event_id=None,
event_name=None, market_types_collection=None, countries_collection=None,
file_type_collection=None, session=None):
"""
Returns a dictionary of file count and combines size files.
:param sport: sport to filter data for.
:param plan: plan type to filter for, Basic Plan, Advanced Plan or Pro Plan.
:param from_day: day of month to start data from.
:param from_month: month to start data from.
:param from_year: year to start data from.
:param to_day: day of month to end data at.
:param to_month: month to end data at.
:param to_year: year to end data at.
:param event_id: id of a specific event to get data for.
:param event_name: name of a specific event to get data for.
:param market_types_collection: list of specific marketTypes to filter for.
:param countries_collection: list of countries to filter for.
:param file_type_collection: list of file types.
:param requests.session session: Requests session object
:rtype: dict
"""
params = clean_locals(locals())
method = 'GetAdvBasketDataSize'
(response, elapsed_time) = self.request(method, params, session)
return response | [
"def",
"get_data_size",
"(",
"self",
",",
"sport",
",",
"plan",
",",
"from_day",
",",
"from_month",
",",
"from_year",
",",
"to_day",
",",
"to_month",
",",
"to_year",
",",
"event_id",
"=",
"None",
",",
"event_name",
"=",
"None",
",",
"market_types_collection"... | Returns a dictionary of file count and combines size files.
:param sport: sport to filter data for.
:param plan: plan type to filter for, Basic Plan, Advanced Plan or Pro Plan.
:param from_day: day of month to start data from.
:param from_month: month to start data from.
:param from_year: year to start data from.
:param to_day: day of month to end data at.
:param to_month: month to end data at.
:param to_year: year to end data at.
:param event_id: id of a specific event to get data for.
:param event_name: name of a specific event to get data for.
:param market_types_collection: list of specific marketTypes to filter for.
:param countries_collection: list of countries to filter for.
:param file_type_collection: list of file types.
:param requests.session session: Requests session object
:rtype: dict | [
"Returns",
"a",
"dictionary",
"of",
"file",
"count",
"and",
"combines",
"size",
"files",
"."
] | python | train |
aio-libs/aiodocker | aiodocker/tasks.py | https://github.com/aio-libs/aiodocker/blob/88d0285ddba8e606ff684278e0a831347209189c/aiodocker/tasks.py#L31-L43 | async def inspect(self, task_id: str) -> Mapping[str, Any]:
"""
Return info about a task
Args:
task_id: is ID of the task
"""
response = await self.docker._query_json(
"tasks/{task_id}".format(task_id=task_id), method="GET"
)
return response | [
"async",
"def",
"inspect",
"(",
"self",
",",
"task_id",
":",
"str",
")",
"->",
"Mapping",
"[",
"str",
",",
"Any",
"]",
":",
"response",
"=",
"await",
"self",
".",
"docker",
".",
"_query_json",
"(",
"\"tasks/{task_id}\"",
".",
"format",
"(",
"task_id",
... | Return info about a task
Args:
task_id: is ID of the task | [
"Return",
"info",
"about",
"a",
"task"
] | python | train |
pyvisa/pyvisa | pyvisa/ctwrapper/functions.py | https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L1190-L1214 | def open(library, session, resource_name,
access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE):
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be a session returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes)
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode`
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError('open_timeout (%r) must be an integer (or compatible type)' % open_timeout)
out_session = ViSession()
# [ViSession, ViRsrc, ViAccessMode, ViUInt32, ViPSession]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viOpen(session, resource_name, access_mode, open_timeout, byref(out_session))
return out_session.value, ret | [
"def",
"open",
"(",
"library",
",",
"session",
",",
"resource_name",
",",
"access_mode",
"=",
"constants",
".",
"AccessModes",
".",
"no_lock",
",",
"open_timeout",
"=",
"constants",
".",
"VI_TMO_IMMEDIATE",
")",
":",
"try",
":",
"open_timeout",
"=",
"int",
"... | Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be a session returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes)
:param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits
before returning an error.
:return: Unique logical identifier reference to a session, return value of the library call.
:rtype: session, :class:`pyvisa.constants.StatusCode` | [
"Opens",
"a",
"session",
"to",
"the",
"specified",
"resource",
"."
] | python | train |
liampauling/betfair | betfairlightweight/endpoints/racecard.py | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/racecard.py#L41-L57 | def get_race_card(self, market_ids, data_entries=None, session=None, lightweight=None):
"""
Returns a list of race cards based on market ids provided.
:param list market_ids: The filter to select desired markets
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceCard]
"""
if not self.app_key:
raise RaceCardError("You need to login before requesting a race_card\n"
"APIClient.race_card.login()")
params = self.create_race_card_req(market_ids, data_entries)
(response, elapsed_time) = self.request(params=params, session=session)
return self.process_response(response, resources.RaceCard, elapsed_time, lightweight) | [
"def",
"get_race_card",
"(",
"self",
",",
"market_ids",
",",
"data_entries",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"app_key",
":",
"raise",
"RaceCardError",
"(",
"\"You need to login be... | Returns a list of race cards based on market ids provided.
:param list market_ids: The filter to select desired markets
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceCard] | [
"Returns",
"a",
"list",
"of",
"race",
"cards",
"based",
"on",
"market",
"ids",
"provided",
"."
] | python | train |
aliyun/aliyun-odps-python-sdk | odps/df/backends/formatter.py | https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/backends/formatter.py#L103-L177 | def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False, max_seq_items=None):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = six.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t',
'\n': r'\n',
'\r': r'\r',
}
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return six.text_type(result)
if (six.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return six.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < options.display.pprint_nest_depth):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items)
elif is_sequence(thing) and _nest_lvl < \
options.display.pprint_nest_depth:
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings, max_seq_items=max_seq_items)
elif isinstance(thing, six.string_types) and quote_strings:
if six.PY3:
fmt = "'%s'"
else:
fmt = "u'%s'"
result = fmt % as_escaped_unicode(thing)
else:
result = as_escaped_unicode(thing)
return six.text_type(result) | [
"def",
"pprint_thing",
"(",
"thing",
",",
"_nest_lvl",
"=",
"0",
",",
"escape_chars",
"=",
"None",
",",
"default_escapes",
"=",
"False",
",",
"quote_strings",
"=",
"False",
",",
"max_seq_items",
"=",
"None",
")",
":",
"def",
"as_escaped_unicode",
"(",
"thing... | This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode object on py2, str on py3. Always Unicode. | [
"This",
"function",
"is",
"the",
"sanctioned",
"way",
"of",
"converting",
"objects",
"to",
"a",
"unicode",
"representation",
"."
] | python | train |
polyaxon/hestia | hestia/date_formatter.py | https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/date_formatter.py#L52-L65 | def extract_date(cls, date_str):
"""
Tries to extract a `datetime` object from the given string, expecting
date information only.
Raises `DateTimeFormatterException` if the extraction fails.
"""
if not date_str:
raise DateTimeFormatterException('date_str must a valid string {}.'.format(date_str))
try:
return cls._extract_timestamp(date_str, cls.DATE_FORMAT)
except (TypeError, ValueError):
raise DateTimeFormatterException('Invalid date string {}.'.format(date_str)) | [
"def",
"extract_date",
"(",
"cls",
",",
"date_str",
")",
":",
"if",
"not",
"date_str",
":",
"raise",
"DateTimeFormatterException",
"(",
"'date_str must a valid string {}.'",
".",
"format",
"(",
"date_str",
")",
")",
"try",
":",
"return",
"cls",
".",
"_extract_ti... | Tries to extract a `datetime` object from the given string, expecting
date information only.
Raises `DateTimeFormatterException` if the extraction fails. | [
"Tries",
"to",
"extract",
"a",
"datetime",
"object",
"from",
"the",
"given",
"string",
"expecting",
"date",
"information",
"only",
"."
] | python | train |
Stranger6667/pyoffers | pyoffers/models/raw_log.py | https://github.com/Stranger6667/pyoffers/blob/9575d6cdc878096242268311a22cc5fdd4f64b37/pyoffers/models/raw_log.py#L44-L52 | def content(self):
"""
Returns raw CSV content of the log file.
"""
raw_content = self._manager.api.session.get(self.download_link).content
data = BytesIO(raw_content)
archive = ZipFile(data)
filename = archive.filelist[0] # Always 1 file in the archive
return archive.read(filename) | [
"def",
"content",
"(",
"self",
")",
":",
"raw_content",
"=",
"self",
".",
"_manager",
".",
"api",
".",
"session",
".",
"get",
"(",
"self",
".",
"download_link",
")",
".",
"content",
"data",
"=",
"BytesIO",
"(",
"raw_content",
")",
"archive",
"=",
"ZipF... | Returns raw CSV content of the log file. | [
"Returns",
"raw",
"CSV",
"content",
"of",
"the",
"log",
"file",
"."
] | python | train |
DreamLab/VmShepherd | src/vmshepherd/iaas/vm.py | https://github.com/DreamLab/VmShepherd/blob/709a412c372b897d53808039c5c64a8b69c12c8d/src/vmshepherd/iaas/vm.py#L86-L91 | async def terminate(self):
""" Terminate vm.
"""
logging.debug('Terminate: %s', self)
self.state = VmState.TERMINATED
return await self.manager.terminate_vm(self.id) | [
"async",
"def",
"terminate",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"'Terminate: %s'",
",",
"self",
")",
"self",
".",
"state",
"=",
"VmState",
".",
"TERMINATED",
"return",
"await",
"self",
".",
"manager",
".",
"terminate_vm",
"(",
"self",
"... | Terminate vm. | [
"Terminate",
"vm",
"."
] | python | train |
inspirehep/inspire-dojson | inspire_dojson/hep/rules/bd5xx.py | https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L199-L205 | def funding_info(self, key, value):
"""Populate the ``funding_info`` key."""
return {
'agency': value.get('a'),
'grant_number': value.get('c'),
'project_number': value.get('f'),
} | [
"def",
"funding_info",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"return",
"{",
"'agency'",
":",
"value",
".",
"get",
"(",
"'a'",
")",
",",
"'grant_number'",
":",
"value",
".",
"get",
"(",
"'c'",
")",
",",
"'project_number'",
":",
"value",
"."... | Populate the ``funding_info`` key. | [
"Populate",
"the",
"funding_info",
"key",
"."
] | python | train |
tanghaibao/jcvi | jcvi/utils/orderedcollections.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/orderedcollections.py#L342-L347 | def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item) | [
"def",
"insert",
"(",
"self",
",",
"item",
")",
":",
"k",
"=",
"self",
".",
"_key",
"(",
"item",
")",
"i",
"=",
"bisect_left",
"(",
"self",
".",
"_keys",
",",
"k",
")",
"self",
".",
"_keys",
".",
"insert",
"(",
"i",
",",
"k",
")",
"self",
"."... | Insert a new item. If equal keys are found, add to the left | [
"Insert",
"a",
"new",
"item",
".",
"If",
"equal",
"keys",
"are",
"found",
"add",
"to",
"the",
"left"
] | python | train |
chaoss/grimoirelab-sigils | src/migration/to_kibana5.py | https://github.com/chaoss/grimoirelab-sigils/blob/33d395195acb316287143a535a2c6e4009bf0528/src/migration/to_kibana5.py#L86-L104 | def parse_args():
"""Parse arguments from the command line"""
parser = argparse.ArgumentParser(description=TO_KIBANA5_DESC_MSG)
parser.add_argument('-s', '--source', dest='src_path', \
required=True, help='source directory')
parser.add_argument('-d', '--dest', dest='dest_path', \
required=True, help='destination directory')
parser.add_argument('-o', '--old-size', dest='old_size', \
default='0', help='aggregation old size')
parser.add_argument('-n', '--new-size', dest='new_size', \
default='1000', help='aggregation new size')
parser.add_argument('-g', '--debug', dest='debug',
action='store_true')
return parser.parse_args() | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"TO_KIBANA5_DESC_MSG",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--source'",
",",
"dest",
"=",
"'src_path'",
",",
"required",
"=",
"T... | Parse arguments from the command line | [
"Parse",
"arguments",
"from",
"the",
"command",
"line"
] | python | train |
ArchiveTeam/wpull | wpull/string.py | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/string.py#L9-L24 | def to_bytes(instance, encoding='utf-8', error='strict'):
'''Convert an instance recursively to bytes.'''
if isinstance(instance, bytes):
return instance
elif hasattr(instance, 'encode'):
return instance.encode(encoding, error)
elif isinstance(instance, list):
return list([to_bytes(item, encoding, error) for item in instance])
elif isinstance(instance, tuple):
return tuple([to_bytes(item, encoding, error) for item in instance])
elif isinstance(instance, dict):
return dict(
[(to_bytes(key, encoding, error), to_bytes(value, encoding, error))
for key, value in instance.items()])
else:
return instance | [
"def",
"to_bytes",
"(",
"instance",
",",
"encoding",
"=",
"'utf-8'",
",",
"error",
"=",
"'strict'",
")",
":",
"if",
"isinstance",
"(",
"instance",
",",
"bytes",
")",
":",
"return",
"instance",
"elif",
"hasattr",
"(",
"instance",
",",
"'encode'",
")",
":"... | Convert an instance recursively to bytes. | [
"Convert",
"an",
"instance",
"recursively",
"to",
"bytes",
"."
] | python | train |
greenbender/pynntp | nntp/date.py | https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/date.py#L316-L374 | def timestamp(value, fmt=None):
"""Parse a datetime to a unix timestamp.
Uses fast custom parsing for common datetime formats or the slow dateutil
parser for other formats. This is a trade off between ease of use and speed
and is very useful for fast parsing of timestamp strings whose format may
standard but varied or unknown prior to parsing.
Common formats include:
1 Feb 2010 12:00:00 GMT
Mon, 1 Feb 2010 22:00:00 +1000
20100201120000
1383470155 (seconds since epoch)
See the other timestamp_*() functions for more details.
Args:
value: A string representing a datetime.
fmt: A timestamp format string like for time.strptime().
Returns:
The time in seconds since epoch as and integer for the value specified.
"""
if fmt:
return _timestamp_formats.get(fmt,
lambda v: timestamp_fmt(v, fmt)
)(value)
l = len(value)
if 19 <= l <= 24 and value[3] == " ":
# '%d %b %Y %H:%M:%Sxxxx'
try:
return timestamp_d_b_Y_H_M_S(value)
except (KeyError, ValueError, OverflowError):
pass
if 30 <= l <= 31:
# '%a, %d %b %Y %H:%M:%S %z'
try:
return timestamp_a__d_b_Y_H_M_S_z(value)
except (KeyError, ValueError, OverflowError):
pass
if l == 14:
# '%Y%m%d%H%M%S'
try:
return timestamp_YmdHMS(value)
except (ValueError, OverflowError):
pass
# epoch timestamp
try:
return timestamp_epoch(value)
except ValueError:
pass
# slow version
return timestamp_any(value) | [
"def",
"timestamp",
"(",
"value",
",",
"fmt",
"=",
"None",
")",
":",
"if",
"fmt",
":",
"return",
"_timestamp_formats",
".",
"get",
"(",
"fmt",
",",
"lambda",
"v",
":",
"timestamp_fmt",
"(",
"v",
",",
"fmt",
")",
")",
"(",
"value",
")",
"l",
"=",
... | Parse a datetime to a unix timestamp.
Uses fast custom parsing for common datetime formats or the slow dateutil
parser for other formats. This is a trade off between ease of use and speed
and is very useful for fast parsing of timestamp strings whose format may
standard but varied or unknown prior to parsing.
Common formats include:
1 Feb 2010 12:00:00 GMT
Mon, 1 Feb 2010 22:00:00 +1000
20100201120000
1383470155 (seconds since epoch)
See the other timestamp_*() functions for more details.
Args:
value: A string representing a datetime.
fmt: A timestamp format string like for time.strptime().
Returns:
The time in seconds since epoch as and integer for the value specified. | [
"Parse",
"a",
"datetime",
"to",
"a",
"unix",
"timestamp",
"."
] | python | test |
MagicStack/asyncpg | asyncpg/connection.py | https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/connection.py#L381-L393 | def cursor(self, query, *args, prefetch=None, timeout=None):
"""Return a *cursor factory* for the specified query.
:param args: Query arguments.
:param int prefetch: The number of rows the *cursor iterator*
will prefetch (defaults to ``50``.)
:param float timeout: Optional timeout in seconds.
:return: A :class:`~cursor.CursorFactory` object.
"""
self._check_open()
return cursor.CursorFactory(self, query, None, args,
prefetch, timeout) | [
"def",
"cursor",
"(",
"self",
",",
"query",
",",
"*",
"args",
",",
"prefetch",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_check_open",
"(",
")",
"return",
"cursor",
".",
"CursorFactory",
"(",
"self",
",",
"query",
",",
"None",... | Return a *cursor factory* for the specified query.
:param args: Query arguments.
:param int prefetch: The number of rows the *cursor iterator*
will prefetch (defaults to ``50``.)
:param float timeout: Optional timeout in seconds.
:return: A :class:`~cursor.CursorFactory` object. | [
"Return",
"a",
"*",
"cursor",
"factory",
"*",
"for",
"the",
"specified",
"query",
"."
] | python | train |
freeman-lab/regional | regional/regional.py | https://github.com/freeman-lab/regional/blob/e3a29c58982e5cd3d5700131ac96e5e0b84fb981/regional/regional.py#L459-L477 | def getcolors(spec, n, cmap=None, value=None):
"""
Turn list of color specs into list of arrays.
"""
if cmap is not None and spec is not None:
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.cm import get_cmap
if isinstance(cmap, LinearSegmentedColormap):
return cmap(value)[:, 0:3]
if isinstance(cmap, str):
return get_cmap(cmap, n)(value)[:, 0:3]
if isinstance(spec, str):
return [getcolor(spec) for i in range(n)]
elif isinstance(spec, list) and isinstance(spec[0], str):
return [getcolor(s) for s in spec]
elif (isinstance(spec, list) or isinstance(spec, ndarray)) and asarray(spec).shape == (3,):
return [spec for i in range(n)]
else:
return spec | [
"def",
"getcolors",
"(",
"spec",
",",
"n",
",",
"cmap",
"=",
"None",
",",
"value",
"=",
"None",
")",
":",
"if",
"cmap",
"is",
"not",
"None",
"and",
"spec",
"is",
"not",
"None",
":",
"from",
"matplotlib",
".",
"colors",
"import",
"LinearSegmentedColorma... | Turn list of color specs into list of arrays. | [
"Turn",
"list",
"of",
"color",
"specs",
"into",
"list",
"of",
"arrays",
"."
] | python | train |
Chilipp/psyplot | psyplot/data.py | https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L4355-L4374 | def append(self, value, new_name=False):
"""
Append a new array to the list
Parameters
----------
value: InteractiveBase
The data object to append to this list
%(ArrayList.rename.parameters.new_name)s
Raises
------
%(ArrayList.rename.raises)s
See Also
--------
list.append, extend, rename"""
arr, renamed = self.rename(value, new_name)
if renamed is not None:
super(ArrayList, self).append(value) | [
"def",
"append",
"(",
"self",
",",
"value",
",",
"new_name",
"=",
"False",
")",
":",
"arr",
",",
"renamed",
"=",
"self",
".",
"rename",
"(",
"value",
",",
"new_name",
")",
"if",
"renamed",
"is",
"not",
"None",
":",
"super",
"(",
"ArrayList",
",",
"... | Append a new array to the list
Parameters
----------
value: InteractiveBase
The data object to append to this list
%(ArrayList.rename.parameters.new_name)s
Raises
------
%(ArrayList.rename.raises)s
See Also
--------
list.append, extend, rename | [
"Append",
"a",
"new",
"array",
"to",
"the",
"list"
] | python | train |
toumorokoshi/sprinter | sprinter/external/pippuppet.py | https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/external/pippuppet.py#L50-L54 | def delete_all_eggs(self):
""" delete all the eggs in the directory specified """
path_to_delete = os.path.join(self.egg_directory, "lib", "python")
if os.path.exists(path_to_delete):
shutil.rmtree(path_to_delete) | [
"def",
"delete_all_eggs",
"(",
"self",
")",
":",
"path_to_delete",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"egg_directory",
",",
"\"lib\"",
",",
"\"python\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path_to_delete",
")",
":",
... | delete all the eggs in the directory specified | [
"delete",
"all",
"the",
"eggs",
"in",
"the",
"directory",
"specified"
] | python | train |
neurosynth/neurosynth | neurosynth/base/mask.py | https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/mask.py#L92-L136 | def get_image(self, image, output='vector'):
""" A flexible method for transforming between different
representations of image data.
Args:
image: The input image. Can be a string (filename of image),
NiBabel image, N-dimensional array (must have same shape as
self.volume), or vectorized image data (must have same length
as current conjunction mask).
output: The format of the returned image representation. Must be
one of:
'vector': A 1D vectorized array
'array': An N-dimensional array, with
shape = self.volume.shape
'image': A NiBabel image
Returns: An object containing image data; see output options above.
"""
if isinstance(image, string_types):
image = nb.load(image)
if type(image).__module__.startswith('nibabel'):
if output == 'image':
return image
image = image.get_data()
if not type(image).__module__.startswith('numpy'):
raise ValueError("Input image must be a string, a NiBabel image, "
"or a numpy array.")
if image.shape[:3] == self.volume.shape:
if output == 'image':
return nb.nifti1.Nifti1Image(image, None, self.get_header())
elif output == 'array':
return image
else:
image = image.ravel()
if output == 'vector':
return image.ravel()
image = np.reshape(image, self.volume.shape)
if output == 'array':
return image
return nb.nifti1.Nifti1Image(image, None, self.get_header()) | [
"def",
"get_image",
"(",
"self",
",",
"image",
",",
"output",
"=",
"'vector'",
")",
":",
"if",
"isinstance",
"(",
"image",
",",
"string_types",
")",
":",
"image",
"=",
"nb",
".",
"load",
"(",
"image",
")",
"if",
"type",
"(",
"image",
")",
".",
"__m... | A flexible method for transforming between different
representations of image data.
Args:
image: The input image. Can be a string (filename of image),
NiBabel image, N-dimensional array (must have same shape as
self.volume), or vectorized image data (must have same length
as current conjunction mask).
output: The format of the returned image representation. Must be
one of:
'vector': A 1D vectorized array
'array': An N-dimensional array, with
shape = self.volume.shape
'image': A NiBabel image
Returns: An object containing image data; see output options above. | [
"A",
"flexible",
"method",
"for",
"transforming",
"between",
"different",
"representations",
"of",
"image",
"data",
".",
"Args",
":",
"image",
":",
"The",
"input",
"image",
".",
"Can",
"be",
"a",
"string",
"(",
"filename",
"of",
"image",
")",
"NiBabel",
"i... | python | test |
ContextLab/quail | quail/egg.py | https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L257-L298 | def save(self, fname, compression='blosc'):
"""
Save method for the Egg object
The data will be saved as a 'egg' file, which is a dictionary containing
the elements of a Egg saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.egg) is not specified,
it will be appended.
compression : str
The kind of compression to use. See the deepdish documentation for
options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
"""
# put egg vars into a dict
egg = {
'pres' : df2list(self.pres),
'rec' : df2list(self.rec),
'dist_funcs' : self.dist_funcs,
'subjgroup' : self.subjgroup,
'subjname' : self.subjname,
'listgroup' : self.listgroup,
'listname' : self.listname,
'date_created' : self.date_created,
'meta' : self.meta
}
# if extension wasn't included, add it
if fname[-4:]!='.egg':
fname+='.egg'
# save
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dd.io.save(fname, egg, compression=compression) | [
"def",
"save",
"(",
"self",
",",
"fname",
",",
"compression",
"=",
"'blosc'",
")",
":",
"# put egg vars into a dict",
"egg",
"=",
"{",
"'pres'",
":",
"df2list",
"(",
"self",
".",
"pres",
")",
",",
"'rec'",
":",
"df2list",
"(",
"self",
".",
"rec",
")",
... | Save method for the Egg object
The data will be saved as a 'egg' file, which is a dictionary containing
the elements of a Egg saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.egg) is not specified,
it will be appended.
compression : str
The kind of compression to use. See the deepdish documentation for
options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save | [
"Save",
"method",
"for",
"the",
"Egg",
"object"
] | python | train |
jplusplus/statscraper | statscraper/scrapers/uka_scraper.py | https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/scrapers/uka_scraper.py#L24-L35 | def _fetch_dimensions(self, dataset):
""" Iterate through semesters, counties and municipalities.
"""
yield Dimension(u"school")
yield Dimension(u"year",
datatype="year")
yield Dimension(u"semester",
datatype="academic_term",
dialect="swedish") # HT/VT
yield Dimension(u"municipality",
datatype="year",
domain="sweden/municipalities") | [
"def",
"_fetch_dimensions",
"(",
"self",
",",
"dataset",
")",
":",
"yield",
"Dimension",
"(",
"u\"school\"",
")",
"yield",
"Dimension",
"(",
"u\"year\"",
",",
"datatype",
"=",
"\"year\"",
")",
"yield",
"Dimension",
"(",
"u\"semester\"",
",",
"datatype",
"=",
... | Iterate through semesters, counties and municipalities. | [
"Iterate",
"through",
"semesters",
"counties",
"and",
"municipalities",
"."
] | python | train |
yamcs/yamcs-python | yamcs-client/yamcs/archive/client.py | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L96-L123 | def list_processed_parameter_group_histogram(self, group=None, start=None, stop=None, merge_time=20):
"""
Reads index records related to processed parameter groups between the
specified start and stop time.
Each iteration returns a chunk of chronologically-sorted records.
:param float merge_time: Maximum gap in seconds before two consecutive index records are merged together.
:rtype: ~collections.Iterable[.IndexGroup]
"""
params = {}
if group is not None:
params['group'] = group
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
if merge_time is not None:
params['mergeTime'] = int(merge_time * 1000)
return pagination.Iterator(
client=self._client,
path='/archive/{}/parameter-index'.format(self._instance),
params=params,
response_class=archive_pb2.IndexResponse,
items_key='group',
item_mapper=IndexGroup,
) | [
"def",
"list_processed_parameter_group_histogram",
"(",
"self",
",",
"group",
"=",
"None",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"merge_time",
"=",
"20",
")",
":",
"params",
"=",
"{",
"}",
"if",
"group",
"is",
"not",
"None",
":",
"p... | Reads index records related to processed parameter groups between the
specified start and stop time.
Each iteration returns a chunk of chronologically-sorted records.
:param float merge_time: Maximum gap in seconds before two consecutive index records are merged together.
:rtype: ~collections.Iterable[.IndexGroup] | [
"Reads",
"index",
"records",
"related",
"to",
"processed",
"parameter",
"groups",
"between",
"the",
"specified",
"start",
"and",
"stop",
"time",
"."
] | python | train |
ronaldguillen/wave | wave/renderers.py | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L503-L563 | def get_raw_data_form(self, data, view, method, request):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
with override_method(view, request, method) as request:
# Check permissions
if not self.show_form_for_method(view, method, request, instance):
return
# If possible, serialize the initial content for the generic form
default_parser = view.parser_classes[0]
renderer_class = getattr(default_parser, 'renderer_class', None)
if (hasattr(view, 'get_serializer') and renderer_class):
# View has a serializer defined and parser class has a
# corresponding renderer that can be used to render the data.
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance)
else:
serializer = view.get_serializer()
# Render the raw data content
renderer = renderer_class()
accepted = self.accepted_media_type
context = self.renderer_context.copy()
context['indent'] = 4
content = renderer.render(serializer.data, accepted, context)
else:
content = None
# Generate a generic form that includes a content type field,
# and a content field.
media_types = [parser.media_type for parser in view.parser_classes]
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
class GenericContentForm(forms.Form):
_content_type = forms.ChoiceField(
label='Media type',
choices=choices,
initial=initial,
widget=forms.Select(attrs={'data-override': 'content-type'})
)
_content = forms.CharField(
label='Content',
widget=forms.Textarea(attrs={'data-override': 'content'}),
initial=content
)
return GenericContentForm() | [
"def",
"get_raw_data_form",
"(",
"self",
",",
"data",
",",
"view",
",",
"method",
",",
"request",
")",
":",
"# See issue #2089 for refactoring this.",
"serializer",
"=",
"getattr",
"(",
"data",
",",
"'serializer'",
",",
"None",
")",
"if",
"serializer",
"and",
... | Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded) | [
"Returns",
"a",
"form",
"that",
"allows",
"for",
"arbitrary",
"content",
"types",
"to",
"be",
"tunneled",
"via",
"standard",
"HTML",
"forms",
".",
"(",
"Which",
"are",
"typically",
"application",
"/",
"x",
"-",
"www",
"-",
"form",
"-",
"urlencoded",
")"
] | python | train |
wrobstory/vincent | vincent/visualization.py | https://github.com/wrobstory/vincent/blob/c5a06e50179015fbb788a7a42e4570ff4467a9e9/vincent/visualization.py#L153-L161 | def marks(value):
"""list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details.
"""
for i, entry in enumerate(value):
_assert_is_type('marks[{0}]'.format(i), entry, Mark) | [
"def",
"marks",
"(",
"value",
")",
":",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"value",
")",
":",
"_assert_is_type",
"(",
"'marks[{0}]'",
".",
"format",
"(",
"i",
")",
",",
"entry",
",",
"Mark",
")"
] | list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details. | [
"list",
"or",
"KeyedList",
"of",
"Mark",
":",
"Mark",
"definitions"
] | python | train |
v1k45/python-qBittorrent | qbittorrent/client.py | https://github.com/v1k45/python-qBittorrent/blob/04f9482a022dcc78c56b0b9acb9ca455f855ae24/qbittorrent/client.py#L93-L113 | def login(self, username='admin', password='admin'):
"""
Method to authenticate the qBittorrent Client.
Declares a class attribute named ``session`` which
stores the authenticated session if the login is correct.
Else, shows the login error.
:param username: Username.
:param password: Password.
:return: Response to login request to the API.
"""
self.session = requests.Session()
login = self.session.post(self.url+'login',
data={'username': username,
'password': password})
if login.text == 'Ok.':
self._is_authenticated = True
else:
return login.text | [
"def",
"login",
"(",
"self",
",",
"username",
"=",
"'admin'",
",",
"password",
"=",
"'admin'",
")",
":",
"self",
".",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"login",
"=",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"url",
... | Method to authenticate the qBittorrent Client.
Declares a class attribute named ``session`` which
stores the authenticated session if the login is correct.
Else, shows the login error.
:param username: Username.
:param password: Password.
:return: Response to login request to the API. | [
"Method",
"to",
"authenticate",
"the",
"qBittorrent",
"Client",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/transpiler/passes/mapping/dense_layout.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/passes/mapping/dense_layout.py#L68-L121 | def _best_subset(self, n_qubits):
"""Computes the qubit mapping with the best connectivity.
Args:
n_qubits (int): Number of subset qubits to consider.
Returns:
ndarray: Array of qubits to use for best connectivity mapping.
"""
if n_qubits == 1:
return np.array([0])
device_qubits = self.coupling_map.size()
cmap = np.asarray(self.coupling_map.get_edges())
data = np.ones_like(cmap[:, 0])
sp_cmap = sp.coo_matrix((data, (cmap[:, 0], cmap[:, 1])),
shape=(device_qubits, device_qubits)).tocsr()
best = 0
best_map = None
# do bfs with each node as starting point
for k in range(sp_cmap.shape[0]):
bfs = cs.breadth_first_order(sp_cmap, i_start=k, directed=False,
return_predecessors=False)
connection_count = 0
sub_graph = []
for i in range(n_qubits):
node_idx = bfs[i]
for j in range(sp_cmap.indptr[node_idx],
sp_cmap.indptr[node_idx + 1]):
node = sp_cmap.indices[j]
for counter in range(n_qubits):
if node == bfs[counter]:
connection_count += 1
sub_graph.append([node_idx, node])
break
if connection_count > best:
best = connection_count
best_map = bfs[0:n_qubits]
# Return a best mapping that has reduced bandwidth
mapping = {}
for edge in range(best_map.shape[0]):
mapping[best_map[edge]] = edge
new_cmap = [[mapping[c[0]], mapping[c[1]]] for c in sub_graph]
rows = [edge[0] for edge in new_cmap]
cols = [edge[1] for edge in new_cmap]
data = [1]*len(rows)
sp_sub_graph = sp.coo_matrix((data, (rows, cols)),
shape=(n_qubits, n_qubits)).tocsr()
perm = cs.reverse_cuthill_mckee(sp_sub_graph)
best_map = best_map[perm]
return best_map | [
"def",
"_best_subset",
"(",
"self",
",",
"n_qubits",
")",
":",
"if",
"n_qubits",
"==",
"1",
":",
"return",
"np",
".",
"array",
"(",
"[",
"0",
"]",
")",
"device_qubits",
"=",
"self",
".",
"coupling_map",
".",
"size",
"(",
")",
"cmap",
"=",
"np",
"."... | Computes the qubit mapping with the best connectivity.
Args:
n_qubits (int): Number of subset qubits to consider.
Returns:
ndarray: Array of qubits to use for best connectivity mapping. | [
"Computes",
"the",
"qubit",
"mapping",
"with",
"the",
"best",
"connectivity",
"."
] | python | test |
broadinstitute/fiss | firecloud/supervisor.py | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/supervisor.py#L148-L289 | def supervise_until_complete(monitor_data, dependencies, args, recovery_file):
""" Supervisor loop. Loop forever until all tasks are evaluated or completed """
project = args['project']
workspace = args['workspace']
namespace = args['namespace']
sample_sets = args['sample_sets']
recovery_data = {'args': args}
if not validate_monitor_tasks(dependencies, args):
logging.error("Errors found, aborting...")
return
while True:
# There are 4 possible states for each node:
# 1. Not Started -- In this state, check all the dependencies for the
# node (possibly 0). If all of them have been evaluated, and the
# satisfiedMode is met, start the task, change to "Running". if
# satisfiedMode is not met, change to "Evaluated"
#
# 2. Running -- Submitted in FC. Check the submission endpoint, and
# if it has completed, change to "Completed", set evaluated=True,
# and whether the task succeeded
# Otherwise, do nothing
#
# 3. Completed -- Job ran in FC and either succeeded or failed. Do nothing
# 4. Evaluated -- All dependencies evaluated, but this task did not run
# do nothing
# Keep a tab of the number of jobs in each category
running = 0
waiting = 0
completed = 0
# Get the submissions
r = fapi.list_submissions(project, workspace)
sub_list = r.json()
#TODO: filter this list by submission time first?
sub_lookup = {s["submissionId"]: s for s in sub_list}
# Keys of dependencies is the list of tasks to run
for n in dependencies:
for sset in sample_sets:
task_data = monitor_data[n][sset]
if task_data['state'] == "Not Started":
# See if all of the dependencies have been evaluated
upstream_evaluated = True
for dep in dependencies[n]:
# Look up the status of the task
upstream_task_data = monitor_data[dep['upstream_task']][sset]
if not upstream_task_data.get('evaluated'):
upstream_evaluated = False
# if all of the dependencies have been evaluated, we can evaluate
# this node
if upstream_evaluated:
# Now check the satisfied Mode of all the dependencies
should_run = True
for dep in dependencies[n]:
upstream_task_data = monitor_data[dep['upstream_task']][sset]
mode = dep['satisfiedMode']
# Task must have succeeded for OnComplete
if mode == '"OnComplete"' and not upstream_task_data['succeeded']:
should_run = False
# 'Always' and 'Optional' run once the deps have been
# evaluated
if should_run:
# Submit the workflow to FC
fc_config = n
logging.info("Starting workflow " + fc_config + " on " + sset)
# How to handle errors at this step?
for retry in range(3):
r = fapi.create_submission(
project, workspace, namespace, fc_config,
sset, etype="sample_set", expression=None
)
if r.status_code == 201:
task_data['submissionId'] = r.json()['submissionId']
task_data['state'] = "Running"
running += 1
break
else:
# There was an error, under certain circumstances retry
logging.debug("Create_submission for " + fc_config
+ "failed on " + sset + " with the following response:"
+ r.content + "\nRetrying...")
else:
# None of the attempts above succeeded, log an error, mark as failed
logging.error("Maximum retries exceeded")
task_data['state'] = 'Completed'
task_data['evaluated'] = True
task_data['succeeded'] = False
else:
# This task will never be able to run, mark evaluated
task_data['state'] = "Evaluated"
task_data['evaluated'] = True
completed += 1
else:
waiting += 1
elif task_data['state'] == "Running":
submission = sub_lookup[task_data['submissionId']]
status = submission['status']
if status == "Done":
# Look at the individual workflows to see if there were
# failures
logging.info("Workflow " + n + " completed for " + sset)
success = 'Failed' not in submission['workflowStatuses']
task_data['evaluated'] = True
task_data['succeeded'] = success
task_data['state'] = "Completed"
completed += 1
else:
# Submission isn't done, don't do anything
running += 1
else:
# Either Completed or evaluated
completed += 1
# Save the state of the monitor for recovery purposes
# Have to do this for every workflow + sample_set so we don't lose track of any
recovery_data['monitor_data'] = monitor_data
recovery_data['dependencies'] = dependencies
with open(recovery_file, 'w') as rf:
json.dump(recovery_data, rf)
logging.info("{0} Waiting, {1} Running, {2} Completed".format(waiting, running, completed))
# If all tasks have been evaluated, we are done
if all(monitor_data[n][sset]['evaluated']
for n in monitor_data for sset in monitor_data[n]):
logging.info("DONE.")
break
time.sleep(30) | [
"def",
"supervise_until_complete",
"(",
"monitor_data",
",",
"dependencies",
",",
"args",
",",
"recovery_file",
")",
":",
"project",
"=",
"args",
"[",
"'project'",
"]",
"workspace",
"=",
"args",
"[",
"'workspace'",
"]",
"namespace",
"=",
"args",
"[",
"'namespa... | Supervisor loop. Loop forever until all tasks are evaluated or completed | [
"Supervisor",
"loop",
".",
"Loop",
"forever",
"until",
"all",
"tasks",
"are",
"evaluated",
"or",
"completed"
] | python | train |
google/dotty | efilter/protocols/repeated.py | https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/protocols/repeated.py#L55-L78 | def meld(*values):
"""Return the repeated value, or the first value if there's only one.
This is a convenience function, equivalent to calling
getvalue(repeated(x)) to get x.
This function skips over instances of None in values (None is not allowed
in repeated variables).
Examples:
meld("foo", "bar") # => ListRepetition("foo", "bar")
meld("foo", "foo") # => ListRepetition("foo", "foo")
meld("foo", None) # => "foo"
meld(None) # => None
"""
values = [x for x in values if x is not None]
if not values:
return None
result = repeated(*values)
if isrepeating(result):
return result
return getvalue(result) | [
"def",
"meld",
"(",
"*",
"values",
")",
":",
"values",
"=",
"[",
"x",
"for",
"x",
"in",
"values",
"if",
"x",
"is",
"not",
"None",
"]",
"if",
"not",
"values",
":",
"return",
"None",
"result",
"=",
"repeated",
"(",
"*",
"values",
")",
"if",
"isrepe... | Return the repeated value, or the first value if there's only one.
This is a convenience function, equivalent to calling
getvalue(repeated(x)) to get x.
This function skips over instances of None in values (None is not allowed
in repeated variables).
Examples:
meld("foo", "bar") # => ListRepetition("foo", "bar")
meld("foo", "foo") # => ListRepetition("foo", "foo")
meld("foo", None) # => "foo"
meld(None) # => None | [
"Return",
"the",
"repeated",
"value",
"or",
"the",
"first",
"value",
"if",
"there",
"s",
"only",
"one",
"."
] | python | train |
apache/airflow | airflow/www/api/experimental/endpoints.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L142-L150 | def get_dag_code(dag_id):
"""Return python code of a given dag_id."""
try:
return get_code(dag_id)
except AirflowException as err:
_log.info(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response | [
"def",
"get_dag_code",
"(",
"dag_id",
")",
":",
"try",
":",
"return",
"get_code",
"(",
"dag_id",
")",
"except",
"AirflowException",
"as",
"err",
":",
"_log",
".",
"info",
"(",
"err",
")",
"response",
"=",
"jsonify",
"(",
"error",
"=",
"\"{}\"",
".",
"f... | Return python code of a given dag_id. | [
"Return",
"python",
"code",
"of",
"a",
"given",
"dag_id",
"."
] | python | test |
google/grr | grr/server/grr_response_server/gui/api_plugins/vfs.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/vfs.py#L1279-L1288 | def _GetTimelineStatEntries(client_id, file_path, with_history=True):
"""Gets timeline entries from the appropriate data source (AFF4 or REL_DB)."""
if data_store.RelationalDBEnabled():
fn = _GetTimelineStatEntriesRelDB
else:
fn = _GetTimelineStatEntriesLegacy
for v in fn(client_id, file_path, with_history=with_history):
yield v | [
"def",
"_GetTimelineStatEntries",
"(",
"client_id",
",",
"file_path",
",",
"with_history",
"=",
"True",
")",
":",
"if",
"data_store",
".",
"RelationalDBEnabled",
"(",
")",
":",
"fn",
"=",
"_GetTimelineStatEntriesRelDB",
"else",
":",
"fn",
"=",
"_GetTimelineStatEnt... | Gets timeline entries from the appropriate data source (AFF4 or REL_DB). | [
"Gets",
"timeline",
"entries",
"from",
"the",
"appropriate",
"data",
"source",
"(",
"AFF4",
"or",
"REL_DB",
")",
"."
] | python | train |
OpenKMIP/PyKMIP | kmip/services/server/engine.py | https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/services/server/engine.py#L865-L917 | def is_allowed(
self,
policy_name,
session_user,
session_group,
object_owner,
object_type,
operation
):
"""
Determine if object access is allowed for the provided policy and
session settings.
"""
policy_section = self.get_relevant_policy_section(
policy_name,
session_group
)
if policy_section is None:
return False
object_policy = policy_section.get(object_type)
if not object_policy:
self._logger.warning(
"The '{0}' policy does not apply to {1} objects.".format(
policy_name,
self._get_enum_string(object_type)
)
)
return False
operation_object_policy = object_policy.get(operation)
if not operation_object_policy:
self._logger.warning(
"The '{0}' policy does not apply to {1} operations on {2} "
"objects.".format(
policy_name,
self._get_enum_string(operation),
self._get_enum_string(object_type)
)
)
return False
if operation_object_policy == enums.Policy.ALLOW_ALL:
return True
elif operation_object_policy == enums.Policy.ALLOW_OWNER:
if session_user == object_owner:
return True
else:
return False
elif operation_object_policy == enums.Policy.DISALLOW_ALL:
return False
else:
return False | [
"def",
"is_allowed",
"(",
"self",
",",
"policy_name",
",",
"session_user",
",",
"session_group",
",",
"object_owner",
",",
"object_type",
",",
"operation",
")",
":",
"policy_section",
"=",
"self",
".",
"get_relevant_policy_section",
"(",
"policy_name",
",",
"sessi... | Determine if object access is allowed for the provided policy and
session settings. | [
"Determine",
"if",
"object",
"access",
"is",
"allowed",
"for",
"the",
"provided",
"policy",
"and",
"session",
"settings",
"."
] | python | test |
Yelp/threat_intel | threat_intel/opendns.py | https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/opendns.py#L324-L334 | def risk_score(self, domains):
"""Performs Umbrella risk score analysis on the input domains
Args:
domains: an enumerable of domains
Returns:
An enumerable of associated domain risk scores
"""
api_name = 'opendns-risk_score'
fmt_url_path = u'domains/risk-score/{0}'
return self._multi_get(api_name, fmt_url_path, domains) | [
"def",
"risk_score",
"(",
"self",
",",
"domains",
")",
":",
"api_name",
"=",
"'opendns-risk_score'",
"fmt_url_path",
"=",
"u'domains/risk-score/{0}'",
"return",
"self",
".",
"_multi_get",
"(",
"api_name",
",",
"fmt_url_path",
",",
"domains",
")"
] | Performs Umbrella risk score analysis on the input domains
Args:
domains: an enumerable of domains
Returns:
An enumerable of associated domain risk scores | [
"Performs",
"Umbrella",
"risk",
"score",
"analysis",
"on",
"the",
"input",
"domains"
] | python | train |
jobovy/galpy | galpy/orbit/OrbitTop.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L543-L564 | def bb(self,*args,**kwargs):
"""
NAME:
bb
PURPOSE:
return Galactic latitude
INPUT:
t - (optional) time at which to get bb
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
b(t)
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'bb')
lbd= self._lbd(*args,**kwargs)
return lbd[:,1] | [
"def",
"bb",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_check_roSet",
"(",
"self",
",",
"kwargs",
",",
"'bb'",
")",
"lbd",
"=",
"self",
".",
"_lbd",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"lbd",
"[",
... | NAME:
bb
PURPOSE:
return Galactic latitude
INPUT:
t - (optional) time at which to get bb
obs=[X,Y,Z] - (optional) position of observer (in kpc)
(default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
OUTPUT:
b(t)
HISTORY:
2011-02-23 - Written - Bovy (NYU) | [
"NAME",
":",
"bb",
"PURPOSE",
":",
"return",
"Galactic",
"latitude",
"INPUT",
":",
"t",
"-",
"(",
"optional",
")",
"time",
"at",
"which",
"to",
"get",
"bb",
"obs",
"=",
"[",
"X",
"Y",
"Z",
"]",
"-",
"(",
"optional",
")",
"position",
"of",
"observer... | python | train |
cggh/scikit-allel | allel/model/ndarray.py | https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L2971-L3027 | def map_alleles(self, mapping, max_allele=None):
"""Transform alleles via a mapping.
Parameters
----------
mapping : ndarray, int8, shape (n_variants, max_allele)
An array defining the allele mapping for each variant.
max_allele : int, optional
Highest allele index expected in the output. If not provided
will be determined from maximum value in `mapping`.
Returns
-------
ac : AlleleCountsArray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> ac = g.count_alleles()
>>> ac
<AlleleCountsArray shape=(4, 3) dtype=int32>
4 0 0
3 1 0
1 2 1
0 0 2
>>> mapping = [[1, 0, 2],
... [1, 0, 2],
... [2, 1, 0],
... [1, 2, 0]]
>>> ac.map_alleles(mapping)
<AlleleCountsArray shape=(4, 3) dtype=int32>
0 4 0
1 3 0
1 2 1
2 0 0
See Also
--------
create_allele_mapping
"""
# ensure correct dimensionality and matching dtype
mapping = asarray_ndim(mapping, 2, dtype=self.dtype)
check_dim0_aligned(self, mapping)
check_dim1_aligned(self, mapping)
# use optimisation
out = allele_counts_array_map_alleles(self.values, mapping, max_allele)
# wrap and return
return type(self)(out) | [
"def",
"map_alleles",
"(",
"self",
",",
"mapping",
",",
"max_allele",
"=",
"None",
")",
":",
"# ensure correct dimensionality and matching dtype",
"mapping",
"=",
"asarray_ndim",
"(",
"mapping",
",",
"2",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"check_dim0... | Transform alleles via a mapping.
Parameters
----------
mapping : ndarray, int8, shape (n_variants, max_allele)
An array defining the allele mapping for each variant.
max_allele : int, optional
Highest allele index expected in the output. If not provided
will be determined from maximum value in `mapping`.
Returns
-------
ac : AlleleCountsArray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> ac = g.count_alleles()
>>> ac
<AlleleCountsArray shape=(4, 3) dtype=int32>
4 0 0
3 1 0
1 2 1
0 0 2
>>> mapping = [[1, 0, 2],
... [1, 0, 2],
... [2, 1, 0],
... [1, 2, 0]]
>>> ac.map_alleles(mapping)
<AlleleCountsArray shape=(4, 3) dtype=int32>
0 4 0
1 3 0
1 2 1
2 0 0
See Also
--------
create_allele_mapping | [
"Transform",
"alleles",
"via",
"a",
"mapping",
"."
] | python | train |
koenedaele/skosprovider | skosprovider/skos.py | https://github.com/koenedaele/skosprovider/blob/7304a37953978ca8227febc2d3cc2b2be178f215/skosprovider/skos.py#L555-L571 | def filter_labels_by_language(labels, language, broader=False):
'''
Filter a list of labels, leaving only labels of a certain language.
:param list labels: A list of :class:`Label`.
:param str language: An IANA language string, eg. `nl` or `nl-BE`.
:param boolean broader: When true, will also match `nl-BE` when filtering
on `nl`. When false, only exact matches are considered.
'''
if language == 'any':
return labels
if broader:
language = tags.tag(language).language.format
return [l for l in labels if tags.tag(l.language).language.format == language]
else:
language = tags.tag(language).format
return [l for l in labels if tags.tag(l.language).format == language] | [
"def",
"filter_labels_by_language",
"(",
"labels",
",",
"language",
",",
"broader",
"=",
"False",
")",
":",
"if",
"language",
"==",
"'any'",
":",
"return",
"labels",
"if",
"broader",
":",
"language",
"=",
"tags",
".",
"tag",
"(",
"language",
")",
".",
"l... | Filter a list of labels, leaving only labels of a certain language.
:param list labels: A list of :class:`Label`.
:param str language: An IANA language string, eg. `nl` or `nl-BE`.
:param boolean broader: When true, will also match `nl-BE` when filtering
on `nl`. When false, only exact matches are considered. | [
"Filter",
"a",
"list",
"of",
"labels",
"leaving",
"only",
"labels",
"of",
"a",
"certain",
"language",
"."
] | python | valid |
saltstack/salt | salt/utils/thin.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/thin.py#L155-L166 | def _add_dependency(container, obj):
'''
Add a dependency to the top list.
:param obj:
:param is_file:
:return:
'''
if os.path.basename(obj.__file__).split('.')[0] == '__init__':
container.append(os.path.dirname(obj.__file__))
else:
container.append(obj.__file__.replace('.pyc', '.py')) | [
"def",
"_add_dependency",
"(",
"container",
",",
"obj",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"obj",
".",
"__file__",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"==",
"'__init__'",
":",
"container",
".",
"append",
"(",
"o... | Add a dependency to the top list.
:param obj:
:param is_file:
:return: | [
"Add",
"a",
"dependency",
"to",
"the",
"top",
"list",
"."
] | python | train |
mpg-age-bioinformatics/AGEpy | AGEpy/fasta.py | https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/fasta.py#L54-L92 | def rewriteFasta(sequence, sequence_name, fasta_in, fasta_out):
"""
Rewrites a specific sequence in a multifasta file while keeping the sequence header.
:param sequence: a string with the sequence to be written
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
:param fasta_in: /path/to/original.fa
:param fasta_out: /path/to/destination.fa
:returns: nothing
"""
f=open(fasta_in, 'r+')
f2=open(fasta_out,'w')
lines = f.readlines()
i=0
while i < len(lines):
line = lines[i]
if line[0] == ">":
f2.write(line)
fChr=line.split(" ")[0]
fChr=fChr[1:]
if fChr == sequence_name:
code=['N','A','C','T','G']
firstbase=lines[i+1][0]
while firstbase in code:
i=i+1
firstbase=lines[i][0]
s=0
while s <= len(sequence):
f2.write(sequence[s:s+60]+"\n")
s=s+60
else:
i=i+1
else:
f2.write(line)
i=i+1
f2.close
f.close | [
"def",
"rewriteFasta",
"(",
"sequence",
",",
"sequence_name",
",",
"fasta_in",
",",
"fasta_out",
")",
":",
"f",
"=",
"open",
"(",
"fasta_in",
",",
"'r+'",
")",
"f2",
"=",
"open",
"(",
"fasta_out",
",",
"'w'",
")",
"lines",
"=",
"f",
".",
"readlines",
... | Rewrites a specific sequence in a multifasta file while keeping the sequence header.
:param sequence: a string with the sequence to be written
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
:param fasta_in: /path/to/original.fa
:param fasta_out: /path/to/destination.fa
:returns: nothing | [
"Rewrites",
"a",
"specific",
"sequence",
"in",
"a",
"multifasta",
"file",
"while",
"keeping",
"the",
"sequence",
"header",
"."
] | python | train |
xaptum/xtt-python | xtt/_utils.py | https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/_utils.py#L21-L31 | def to_bytes(s, encoding="utf-8"):
"""
Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3.
"""
if isinstance(s, six.binary_type):
return s
else:
return six.text_type(s).encode(encoding) | [
"def",
"to_bytes",
"(",
"s",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"s",
"else",
":",
"return",
"six",
".",
"text_type",
"(",
"s",
")",
".",
"encode",
"(",
"enc... | Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3. | [
"Converts",
"the",
"string",
"to",
"a",
"bytes",
"type",
"if",
"not",
"already",
"."
] | python | train |
Titan-C/slaveparticles | slaveparticles/utils/plotter.py | https://github.com/Titan-C/slaveparticles/blob/e4c2f5afb1a7b195517ef2f1b5cc758965036aab/slaveparticles/utils/plotter.py#L11-L27 | def solve_loop(slsp, u_span, j_coup):
"""Calculates the quasiparticle for the input loop of:
@param slsp: Slave spin Object
@param Uspan: local Couloumb interation
@param J_coup: Fraction of Uspan of Hund coupling strength"""
zet, lam, eps, hlog, mean_f = [], [], [], [], [None]
for u in u_span:
print(u, j_coup)
hlog.append(slsp.selfconsistency(u, j_coup, mean_f[-1]))
mean_f.append(slsp.mean_field())
zet.append(slsp.quasiparticle_weight())
lam.append(slsp.param['lambda'])
eps.append(orbital_energies(slsp.param, zet[-1]))
return np.asarray([zet, lam, eps]), hlog, mean_f | [
"def",
"solve_loop",
"(",
"slsp",
",",
"u_span",
",",
"j_coup",
")",
":",
"zet",
",",
"lam",
",",
"eps",
",",
"hlog",
",",
"mean_f",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"None",
"]",
"for",
"u",
"in",
"u_spa... | Calculates the quasiparticle for the input loop of:
@param slsp: Slave spin Object
@param Uspan: local Couloumb interation
@param J_coup: Fraction of Uspan of Hund coupling strength | [
"Calculates",
"the",
"quasiparticle",
"for",
"the",
"input",
"loop",
"of",
":"
] | python | train |
persephone-tools/persephone | persephone/corpus.py | https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L368-L392 | def prepare_feats(self) -> None:
""" Prepares input features"""
logger.debug("Preparing input features")
self.feat_dir.mkdir(parents=True, exist_ok=True)
should_extract_feats = False
for path in self.wav_dir.iterdir():
if not path.suffix == ".wav":
logger.info("Non wav file found in wav directory: %s", path)
continue
prefix = os.path.basename(os.path.splitext(str(path))[0])
mono16k_wav_path = self.feat_dir / "{}.wav".format(prefix)
feat_path = self.feat_dir / "{}.{}.npy".format(prefix, self.feat_type)
if not feat_path.is_file():
# Then we should extract feats
should_extract_feats = True
if not mono16k_wav_path.is_file():
feat_extract.convert_wav(path, mono16k_wav_path)
# TODO Should be extracting feats on a per-file basis. Right now we
# check if any feats files don't exist and then do all the feature
# extraction.
if should_extract_feats:
feat_extract.from_dir(self.feat_dir, self.feat_type) | [
"def",
"prepare_feats",
"(",
"self",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Preparing input features\"",
")",
"self",
".",
"feat_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"should_extract_feats",
"=",... | Prepares input features | [
"Prepares",
"input",
"features"
] | python | train |
BoGoEngine/bogo-python | bogo/utils.py | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/utils.py#L51-L75 | def append_comps(comps, char):
"""
Append a character to `comps` following this rule: a vowel is added to the
vowel part if there is no last consonant, else to the last consonant part;
a consonant is added to the first consonant part if there is no vowel, and
to the last consonant part if the vowel part is not empty.
>>> transform(['', '', ''])
['c', '', '']
>>> transform(['c', '', ''], '+o')
['c', 'o', '']
>>> transform(['c', 'o', ''], '+n')
['c', 'o', 'n']
>>> transform(['c', 'o', 'n'], '+o')
['c', 'o', 'no']
"""
c = list(comps)
if is_vowel(char):
if not c[2]: pos = 1
else: pos = 2
else:
if not c[2] and not c[1]: pos = 0
else: pos = 2
c[pos] += char
return c | [
"def",
"append_comps",
"(",
"comps",
",",
"char",
")",
":",
"c",
"=",
"list",
"(",
"comps",
")",
"if",
"is_vowel",
"(",
"char",
")",
":",
"if",
"not",
"c",
"[",
"2",
"]",
":",
"pos",
"=",
"1",
"else",
":",
"pos",
"=",
"2",
"else",
":",
"if",
... | Append a character to `comps` following this rule: a vowel is added to the
vowel part if there is no last consonant, else to the last consonant part;
a consonant is added to the first consonant part if there is no vowel, and
to the last consonant part if the vowel part is not empty.
>>> transform(['', '', ''])
['c', '', '']
>>> transform(['c', '', ''], '+o')
['c', 'o', '']
>>> transform(['c', 'o', ''], '+n')
['c', 'o', 'n']
>>> transform(['c', 'o', 'n'], '+o')
['c', 'o', 'no'] | [
"Append",
"a",
"character",
"to",
"comps",
"following",
"this",
"rule",
":",
"a",
"vowel",
"is",
"added",
"to",
"the",
"vowel",
"part",
"if",
"there",
"is",
"no",
"last",
"consonant",
"else",
"to",
"the",
"last",
"consonant",
"part",
";",
"a",
"consonant... | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.