repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
materialsproject/pymatgen | pymatgen/analysis/reaction_calculator.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/reaction_calculator.py#L440-L451 | def all_entries(self):
"""
Equivalent of all_comp but returns entries, in the same order as the
coefficients.
"""
entries = []
for c in self._all_comp:
for e in self._all_entries:
if e.composition.reduced_formula == c.reduced_formula:
entries.append(e)
break
return entries | [
"def",
"all_entries",
"(",
"self",
")",
":",
"entries",
"=",
"[",
"]",
"for",
"c",
"in",
"self",
".",
"_all_comp",
":",
"for",
"e",
"in",
"self",
".",
"_all_entries",
":",
"if",
"e",
".",
"composition",
".",
"reduced_formula",
"==",
"c",
".",
"reduce... | Equivalent of all_comp but returns entries, in the same order as the
coefficients. | [
"Equivalent",
"of",
"all_comp",
"but",
"returns",
"entries",
"in",
"the",
"same",
"order",
"as",
"the",
"coefficients",
"."
] | python | train |
kstaniek/condoor | condoor/device.py | https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/device.py#L461-L466 | def update_console(self):
"""Update is_console whether connected via console."""
self.chain.connection.log("Detecting console connection")
is_console = self.driver.is_console(self.users_text)
if is_console is not None:
self.is_console = is_console | [
"def",
"update_console",
"(",
"self",
")",
":",
"self",
".",
"chain",
".",
"connection",
".",
"log",
"(",
"\"Detecting console connection\"",
")",
"is_console",
"=",
"self",
".",
"driver",
".",
"is_console",
"(",
"self",
".",
"users_text",
")",
"if",
"is_con... | Update is_console whether connected via console. | [
"Update",
"is_console",
"whether",
"connected",
"via",
"console",
"."
] | python | train |
deepmind/sonnet | sonnet/python/modules/relational_memory.py | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/relational_memory.py#L212-L234 | def _attend_over_memory(self, memory):
"""Perform multiheaded attention over `memory`.
Args:
memory: Current relational memory.
Returns:
The attended-over memory.
"""
attention_mlp = basic.BatchApply(
mlp.MLP([self._mem_size] * self._attention_mlp_layers))
for _ in range(self._num_blocks):
attended_memory = self._multihead_attention(memory)
# Add a skip connection to the multiheaded attention's input.
memory = basic.BatchApply(layer_norm.LayerNorm())(
memory + attended_memory)
# Add a skip connection to the attention_mlp's input.
memory = basic.BatchApply(layer_norm.LayerNorm())(
attention_mlp(memory) + memory)
return memory | [
"def",
"_attend_over_memory",
"(",
"self",
",",
"memory",
")",
":",
"attention_mlp",
"=",
"basic",
".",
"BatchApply",
"(",
"mlp",
".",
"MLP",
"(",
"[",
"self",
".",
"_mem_size",
"]",
"*",
"self",
".",
"_attention_mlp_layers",
")",
")",
"for",
"_",
"in",
... | Perform multiheaded attention over `memory`.
Args:
memory: Current relational memory.
Returns:
The attended-over memory. | [
"Perform",
"multiheaded",
"attention",
"over",
"memory",
"."
] | python | train |
rdussurget/py-altimetry | altimetry/externals/esutils_stat.py | https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/externals/esutils_stat.py#L89-L144 | def _weave_dohist(data, s, binsize, hist, rev, dorev=False, verbose=0):
if dorev:
dorev=1
else:
dorev=0
"""
Weave version of histogram with reverse_indices
s is an index into data, sorted and possibly a subset
"""
code = """
int64_t nbin = hist.size();
int64_t binnum_old = -1;
// index of minimum value
int64_t imin = s(0);
for (int64_t i=0; i<s.size(); i++) {
int64_t offset = i+nbin+1;
int64_t data_index = s(i);
if (dorev) {
rev(offset) = data_index;
}
int64_t binnum = (int64_t) ( (data(data_index)-data(imin))/binsize);
if (binnum >= 0 && binnum < nbin) {
if (dorev && (binnum > binnum_old) ) {
int64_t tbin = binnum_old + 1;
while (tbin <= binnum) {
rev(tbin) = offset;
tbin++;
}
}
hist(binnum) = hist(binnum) + 1;
binnum_old = binnum;
}
}
int64_t tbin = binnum_old + 1;
while (tbin <= nbin) {
if (dorev) {
rev(tbin) = rev.size();
}
tbin++;
}
"""
scipy.weave.inline(code, ['data','s','binsize','hist','rev','dorev'],
type_converters = scipy.weave.converters.blitz, verbose=verbose)
return | [
"def",
"_weave_dohist",
"(",
"data",
",",
"s",
",",
"binsize",
",",
"hist",
",",
"rev",
",",
"dorev",
"=",
"False",
",",
"verbose",
"=",
"0",
")",
":",
"if",
"dorev",
":",
"dorev",
"=",
"1",
"else",
":",
"dorev",
"=",
"0",
"code",
"=",
"\"\"\"\r\... | Weave version of histogram with reverse_indices
s is an index into data, sorted and possibly a subset | [
"Weave",
"version",
"of",
"histogram",
"with",
"reverse_indices",
"s",
"is",
"an",
"index",
"into",
"data",
"sorted",
"and",
"possibly",
"a",
"subset"
] | python | train |
praekeltfoundation/marathon-acme | marathon_acme/acme_util.py | https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/acme_util.py#L58-L85 | def maybe_key_vault(client, mount_path):
"""
Set up a client key in Vault if one does not exist already.
:param client:
The Vault API client to use.
:param mount_path:
The Vault key/value mount path to use.
:rtype: twisted.internet.defer.Deferred
"""
d = client.read_kv2('client_key', mount_path=mount_path)
def get_or_create_key(client_key):
if client_key is not None:
key_data = client_key['data']['data']
key = _load_pem_private_key_bytes(key_data['key'].encode('utf-8'))
return JWKRSA(key=key)
else:
key = generate_private_key(u'rsa')
key_data = {
'key': _dump_pem_private_key_bytes(key).decode('utf-8')
}
d = client.create_or_update_kv2(
'client_key', key_data, mount_path=mount_path)
return d.addCallback(lambda _result: JWKRSA(key=key))
return d.addCallback(get_or_create_key) | [
"def",
"maybe_key_vault",
"(",
"client",
",",
"mount_path",
")",
":",
"d",
"=",
"client",
".",
"read_kv2",
"(",
"'client_key'",
",",
"mount_path",
"=",
"mount_path",
")",
"def",
"get_or_create_key",
"(",
"client_key",
")",
":",
"if",
"client_key",
"is",
"not... | Set up a client key in Vault if one does not exist already.
:param client:
The Vault API client to use.
:param mount_path:
The Vault key/value mount path to use.
:rtype: twisted.internet.defer.Deferred | [
"Set",
"up",
"a",
"client",
"key",
"in",
"Vault",
"if",
"one",
"does",
"not",
"exist",
"already",
"."
] | python | valid |
vfilimonov/pydatastream | pydatastream/pydatastream.py | https://github.com/vfilimonov/pydatastream/blob/15d2adac1c83501715db1542373fa8428542816e/pydatastream/pydatastream.py#L465-L525 | def fetch(self, tickers, fields=None, date=None, date_from=None, date_to=None,
freq='D', only_data=True, static=False):
"""Fetch data from TR DWE.
tickers - ticker or list of tickers
fields - list of fields.
date - date for a single-date query
date_from, date_to - date range (used only if "date" is not specified)
freq - frequency of data: daily('D'), weekly('W') or monthly('M')
only_data - if True then metadata will not be returned
static - if True "static" request is created (i.e. not a series).
In this case 'date_from', 'date_to' and 'freq' are ignored
In case list of tickers is requested, a MultiIndex-dataframe is returned.
Some of available fields:
P - adjusted closing price
PO - opening price
PH - high price
PL - low price
VO - volume, which is expressed in 1000's of shares.
UP - unadjusted price
OI - open interest
MV - market value
EPS - earnings per share
DI - dividend index
MTVB - market to book value
PTVB - price to book value
...
The full list of data fields is available at http://dtg.tfn.com/.
"""
if static:
query = self.construct_request(tickers, fields, date, freq='REP')
else:
query = self.construct_request(tickers, fields, date, date_from, date_to, freq)
raw = self.request(query)
if static:
data, metadata = self.parse_record_static(raw)
elif isinstance(tickers, basestring) or len(tickers) == 1:
data, metadata = self.parse_record(raw)
elif hasattr(tickers, '__len__'):
metadata = pd.DataFrame()
data = {}
for indx in range(len(tickers)):
dat, meta = self.parse_record(raw, indx)
data[tickers[indx]] = dat
metadata = metadata.append(meta, ignore_index=False)
data = pd.concat(data)
else:
raise DatastreamException(('First argument should be either ticker or '
'list of tickers'))
if only_data:
return data
else:
return data, metadata | [
"def",
"fetch",
"(",
"self",
",",
"tickers",
",",
"fields",
"=",
"None",
",",
"date",
"=",
"None",
",",
"date_from",
"=",
"None",
",",
"date_to",
"=",
"None",
",",
"freq",
"=",
"'D'",
",",
"only_data",
"=",
"True",
",",
"static",
"=",
"False",
")",... | Fetch data from TR DWE.
tickers - ticker or list of tickers
fields - list of fields.
date - date for a single-date query
date_from, date_to - date range (used only if "date" is not specified)
freq - frequency of data: daily('D'), weekly('W') or monthly('M')
only_data - if True then metadata will not be returned
static - if True "static" request is created (i.e. not a series).
In this case 'date_from', 'date_to' and 'freq' are ignored
In case list of tickers is requested, a MultiIndex-dataframe is returned.
Some of available fields:
P - adjusted closing price
PO - opening price
PH - high price
PL - low price
VO - volume, which is expressed in 1000's of shares.
UP - unadjusted price
OI - open interest
MV - market value
EPS - earnings per share
DI - dividend index
MTVB - market to book value
PTVB - price to book value
...
The full list of data fields is available at http://dtg.tfn.com/. | [
"Fetch",
"data",
"from",
"TR",
"DWE",
"."
] | python | train |
wandb/client | wandb/run_manager.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/run_manager.py#L792-L851 | def init_run(self, env=None):
"""Ensure we create a Run (Bucket) object
We either create it now or, if the API call fails for some reason (eg.
the network is down), we do it from a thread that we start. We hold
off file syncing and streaming until it succeeds.
Returns the initial step of the run, or None if we didn't create a run
"""
io_wrap.init_sigwinch_handler()
self._check_update_available(__version__)
if self._output:
wandb.termlog("Local directory: %s" % os.path.relpath(self._run.dir))
self._system_stats.start()
self._meta.start()
logger.info("system metrics and metadata threads started")
new_step = None
if self._cloud:
storage_id = None
if self._run.resume != 'never':
# DNS can hang for 60 seconds, we check for resume status in a thread
# TODO: Ideally this thread would continue retrying in case of failure.
# Currently we assume we're not resuming in the case of resume = auto,
# and we throw an error in the case of resume = must.
logger.info("checking resume status, waiting at most %d seconds" % InternalApi.HTTP_TIMEOUT)
async_resume_status = util.async_call(self._api.run_resume_status, InternalApi.HTTP_TIMEOUT)
resume_status, thread = async_resume_status(self._api.settings("entity"), self._project, self._run.id)
if resume_status == None and self._run.resume == 'must':
if thread.isAlive():
raise LaunchError(
"resume='must' but we were unable to connect to the W&B service after %i seconds" % InternalApi.HTTP_TIMEOUT)
else:
raise LaunchError(
"resume='must' but run (%s) doesn't exist" % self._run.id)
if resume_status:
storage_id = resume_status['id']
logger.info("resuming run from id: %s" % storage_id)
self._project = self._resolve_project_name(self._project)
self._setup_resume(resume_status)
try:
history = json.loads(json.loads(resume_status['historyTail'])[-1])
except (IndexError,ValueError):
history = {}
new_step = history.get("_step", 0)
else:
new_step = 0
# DNS lookups can hang for upto 60 seconds, we wait for HTTP_TIMEOUT (10s)
logger.info("upserting run before process can begin, waiting at most %d seconds" % InternalApi.HTTP_TIMEOUT)
async_upsert = util.async_call(self._upsert_run, timeout=InternalApi.HTTP_TIMEOUT)
_, self._upsert_run_thread = async_upsert(True, storage_id, env)
if self._upsert_run_thread.isAlive():
logger.error("Failed to connect to W&B servers after %i seconds.\
Letting user process proceed while attempting to reconnect." % InternalApi.HTTP_TIMEOUT)
return new_step | [
"def",
"init_run",
"(",
"self",
",",
"env",
"=",
"None",
")",
":",
"io_wrap",
".",
"init_sigwinch_handler",
"(",
")",
"self",
".",
"_check_update_available",
"(",
"__version__",
")",
"if",
"self",
".",
"_output",
":",
"wandb",
".",
"termlog",
"(",
"\"Local... | Ensure we create a Run (Bucket) object
We either create it now or, if the API call fails for some reason (eg.
the network is down), we do it from a thread that we start. We hold
off file syncing and streaming until it succeeds.
Returns the initial step of the run, or None if we didn't create a run | [
"Ensure",
"we",
"create",
"a",
"Run",
"(",
"Bucket",
")",
"object"
] | python | train |
lthibault/expmpp | expmpp/client.py | https://github.com/lthibault/expmpp/blob/635fb3187fe4021410e0f06ca6896098b5e1d3b4/expmpp/client.py#L96-L135 | def monitor(self, msg, transformer=lambda _: _, unpack=False):
"""Decorator that sends a notification to all listeners when the
wrapped function returns, optionally reporting said function's return
value(s).
msg : str
Message to send to all listeners. If the message is a
Python-formatted string, the wrapped function's return value will
be inserted into the first positional placeholder (i.e. `{0}`).
transformer : function
Function to format the wrapped function's return value for
insertion into the `msg` parameter. NOTE: this does *not* modify
the function's ultimate return value. Instead, it changes what is
inserted into the message.
[Default: lambda _: _]
unpack : bool
If true, multiple return values will be unpacked and each element
will be inserted in the corresponding placeholder of `msg`. For
ordered collections, insertion is done via positional placeholders.
If the return-value is a `dict`, insertion is performed via keyword
placeholders.
[Default: False]
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kw):
results = fn(*args, **kw)
transformed = transformer(results)
if unpack:
if isinstance(results, dict):
self.notify(msg.format(**transformed))
else:
self.notify(msg.format(*transformed))
else:
self.notify(msg.format(transformed))
return results
return wrapper
return decorator | [
"def",
"monitor",
"(",
"self",
",",
"msg",
",",
"transformer",
"=",
"lambda",
"_",
":",
"_",
",",
"unpack",
"=",
"False",
")",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
... | Decorator that sends a notification to all listeners when the
wrapped function returns, optionally reporting said function's return
value(s).
msg : str
Message to send to all listeners. If the message is a
Python-formatted string, the wrapped function's return value will
be inserted into the first positional placeholder (i.e. `{0}`).
transformer : function
Function to format the wrapped function's return value for
insertion into the `msg` parameter. NOTE: this does *not* modify
the function's ultimate return value. Instead, it changes what is
inserted into the message.
[Default: lambda _: _]
unpack : bool
If true, multiple return values will be unpacked and each element
will be inserted in the corresponding placeholder of `msg`. For
ordered collections, insertion is done via positional placeholders.
If the return-value is a `dict`, insertion is performed via keyword
placeholders.
[Default: False] | [
"Decorator",
"that",
"sends",
"a",
"notification",
"to",
"all",
"listeners",
"when",
"the",
"wrapped",
"function",
"returns",
"optionally",
"reporting",
"said",
"function",
"s",
"return",
"value",
"(",
"s",
")",
"."
] | python | train |
iotile/coretools | iotilesensorgraph/iotile/sg/processors.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/processors.py#L13-L31 | def copy_all_a(input_a, *other_inputs, **kwargs):
"""Copy all readings in input a into the output.
All other inputs are skipped so that after this function runs there are no
readings left in any of the input walkers when the function finishes, even
if it generated no output readings.
Returns:
list(IOTileReading)
"""
output = []
while input_a.count() > 0:
output.append(input_a.pop())
for input_x in other_inputs:
input_x.skip_all()
return output | [
"def",
"copy_all_a",
"(",
"input_a",
",",
"*",
"other_inputs",
",",
"*",
"*",
"kwargs",
")",
":",
"output",
"=",
"[",
"]",
"while",
"input_a",
".",
"count",
"(",
")",
">",
"0",
":",
"output",
".",
"append",
"(",
"input_a",
".",
"pop",
"(",
")",
"... | Copy all readings in input a into the output.
All other inputs are skipped so that after this function runs there are no
readings left in any of the input walkers when the function finishes, even
if it generated no output readings.
Returns:
list(IOTileReading) | [
"Copy",
"all",
"readings",
"in",
"input",
"a",
"into",
"the",
"output",
"."
] | python | train |
ThreatConnect-Inc/tcex | tcex/tcex_data_filter.py | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_data_filter.py#L60-L72 | def _in(field, filter_value):
"""Validate field **IN** string or list.
Args:
filter_value (string | list): A string or list of values.
Returns:
(boolean): Results of check
"""
valid = False
if field in filter_value:
valid = True
return valid | [
"def",
"_in",
"(",
"field",
",",
"filter_value",
")",
":",
"valid",
"=",
"False",
"if",
"field",
"in",
"filter_value",
":",
"valid",
"=",
"True",
"return",
"valid"
] | Validate field **IN** string or list.
Args:
filter_value (string | list): A string or list of values.
Returns:
(boolean): Results of check | [
"Validate",
"field",
"**",
"IN",
"**",
"string",
"or",
"list",
"."
] | python | train |
aws/aws-xray-sdk-python | aws_xray_sdk/core/models/segment.py | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/segment.py#L126-L131 | def remove_subsegment(self, subsegment):
"""
Remove the reference of input subsegment.
"""
super(Segment, self).remove_subsegment(subsegment)
self.decrement_subsegments_size() | [
"def",
"remove_subsegment",
"(",
"self",
",",
"subsegment",
")",
":",
"super",
"(",
"Segment",
",",
"self",
")",
".",
"remove_subsegment",
"(",
"subsegment",
")",
"self",
".",
"decrement_subsegments_size",
"(",
")"
] | Remove the reference of input subsegment. | [
"Remove",
"the",
"reference",
"of",
"input",
"subsegment",
"."
] | python | train |
PierreRust/apigpio | apigpio/apigpio.py | https://github.com/PierreRust/apigpio/blob/2b969f40e06219b43a43498d8baf87f5935ceab2/apigpio/apigpio.py#L559-L573 | def connect(self, address):
"""
Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return:
"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setblocking(False)
# Disable the Nagle algorithm.
self.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
yield from self._loop.sock_connect(self.s, address)
yield from self._notify._connect(address) | [
"def",
"connect",
"(",
"self",
",",
"address",
")",
":",
"self",
".",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"s",
".",
"setblocking",
"(",
"False",
")",
"# Disable the Nagl... | Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return: | [
"Connect",
"to",
"a",
"remote",
"or",
"local",
"gpiod",
"daemon",
".",
":",
"param",
"address",
":",
"a",
"pair",
"(",
"address",
"port",
")",
"the",
"address",
"must",
"be",
"already",
"resolved",
"(",
"for",
"example",
"an",
"ip",
"address",
")",
":"... | python | train |
twilio/twilio-python | twilio/rest/video/v1/recording_settings.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/video/v1/recording_settings.py#L216-L226 | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: RecordingSettingsContext for this RecordingSettingsInstance
:rtype: twilio.rest.video.v1.recording_settings.RecordingSettingsContext
"""
if self._context is None:
self._context = RecordingSettingsContext(self._version, )
return self._context | [
"def",
"_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"None",
":",
"self",
".",
"_context",
"=",
"RecordingSettingsContext",
"(",
"self",
".",
"_version",
",",
")",
"return",
"self",
".",
"_context"
] | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: RecordingSettingsContext for this RecordingSettingsInstance
:rtype: twilio.rest.video.v1.recording_settings.RecordingSettingsContext | [
"Generate",
"an",
"instance",
"context",
"for",
"the",
"instance",
"the",
"context",
"is",
"capable",
"of",
"performing",
"various",
"actions",
".",
"All",
"instance",
"actions",
"are",
"proxied",
"to",
"the",
"context"
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ardupilotmega.py#L10733-L10748 | def mag_cal_progress_send(self, compass_id, cal_mask, cal_status, attempt, completion_pct, completion_mask, direction_x, direction_y, direction_z, force_mavlink1=False):
'''
Reports progress of compass calibration.
compass_id : Compass being calibrated (uint8_t)
cal_mask : Bitmask of compasses being calibrated (uint8_t)
cal_status : Status (see MAG_CAL_STATUS enum) (uint8_t)
attempt : Attempt number (uint8_t)
completion_pct : Completion percentage (uint8_t)
completion_mask : Bitmask of sphere sections (see http://en.wikipedia.org/wiki/Geodesic_grid) (uint8_t)
direction_x : Body frame direction vector for display (float)
direction_y : Body frame direction vector for display (float)
direction_z : Body frame direction vector for display (float)
'''
return self.send(self.mag_cal_progress_encode(compass_id, cal_mask, cal_status, attempt, completion_pct, completion_mask, direction_x, direction_y, direction_z), force_mavlink1=force_mavlink1) | [
"def",
"mag_cal_progress_send",
"(",
"self",
",",
"compass_id",
",",
"cal_mask",
",",
"cal_status",
",",
"attempt",
",",
"completion_pct",
",",
"completion_mask",
",",
"direction_x",
",",
"direction_y",
",",
"direction_z",
",",
"force_mavlink1",
"=",
"False",
")",... | Reports progress of compass calibration.
compass_id : Compass being calibrated (uint8_t)
cal_mask : Bitmask of compasses being calibrated (uint8_t)
cal_status : Status (see MAG_CAL_STATUS enum) (uint8_t)
attempt : Attempt number (uint8_t)
completion_pct : Completion percentage (uint8_t)
completion_mask : Bitmask of sphere sections (see http://en.wikipedia.org/wiki/Geodesic_grid) (uint8_t)
direction_x : Body frame direction vector for display (float)
direction_y : Body frame direction vector for display (float)
direction_z : Body frame direction vector for display (float) | [
"Reports",
"progress",
"of",
"compass",
"calibration",
"."
] | python | train |
juju/charm-helpers | charmhelpers/fetch/ubuntu.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/fetch/ubuntu.py#L690-L708 | def _run_apt_command(cmd, fatal=False):
"""Run an apt command with optional retries.
:param: cmd: str: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
# Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
cmd_env = {
'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
if fatal:
_run_with_retries(
cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
retry_message="Couldn't acquire DPKG lock")
else:
env = os.environ.copy()
env.update(cmd_env)
subprocess.call(cmd, env=env) | [
"def",
"_run_apt_command",
"(",
"cmd",
",",
"fatal",
"=",
"False",
")",
":",
"# Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.",
"cmd_env",
"=",
"{",
"'DEBIAN_FRONTEND'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'DEBIAN_FRONTEND'",
",",
... | Run an apt command with optional retries.
:param: cmd: str: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried. | [
"Run",
"an",
"apt",
"command",
"with",
"optional",
"retries",
"."
] | python | train |
CellProfiler/centrosome | centrosome/haralick.py | https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/haralick.py#L196-L199 | def H7(self):
"Sum variance (error in Haralick's original paper here)."
h6 = np.tile(self.H6(), (self.rlevels2.shape[1], 1)).transpose()
return (((self.rlevels2 + 2) - h6) ** 2 * self.p_xplusy).sum(1) | [
"def",
"H7",
"(",
"self",
")",
":",
"h6",
"=",
"np",
".",
"tile",
"(",
"self",
".",
"H6",
"(",
")",
",",
"(",
"self",
".",
"rlevels2",
".",
"shape",
"[",
"1",
"]",
",",
"1",
")",
")",
".",
"transpose",
"(",
")",
"return",
"(",
"(",
"(",
"... | Sum variance (error in Haralick's original paper here). | [
"Sum",
"variance",
"(",
"error",
"in",
"Haralick",
"s",
"original",
"paper",
"here",
")",
"."
] | python | train |
casebeer/audiogen | audiogen/sampler.py | https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L211-L245 | def play(channels, blocking=True, raw_samples=False):
'''
Play the contents of the generator using PyAudio
Play to the system soundcard using PyAudio. PyAudio, an otherwise optional
depenency, must be installed for this feature to work.
'''
if not pyaudio_loaded:
raise Exception("Soundcard playback requires PyAudio. Install with `pip install pyaudio`.")
channel_count = 1 if hasattr(channels, "next") else len(channels)
wavgen = wav_samples(channels, raw_samples=raw_samples)
p = pyaudio.PyAudio()
stream = p.open(
format=p.get_format_from_width(SAMPLE_WIDTH),
channels=channel_count,
rate=FRAME_RATE,
output=True,
stream_callback=_pyaudio_callback(wavgen) if not blocking else None
)
if blocking:
try:
for chunk in buffer(wavgen, 1024):
stream.write(chunk)
except Exception:
raise
finally:
if not stream.is_stopped():
stream.stop_stream()
try:
stream.close()
except Exception:
pass
else:
return stream | [
"def",
"play",
"(",
"channels",
",",
"blocking",
"=",
"True",
",",
"raw_samples",
"=",
"False",
")",
":",
"if",
"not",
"pyaudio_loaded",
":",
"raise",
"Exception",
"(",
"\"Soundcard playback requires PyAudio. Install with `pip install pyaudio`.\"",
")",
"channel_count",... | Play the contents of the generator using PyAudio
Play to the system soundcard using PyAudio. PyAudio, an otherwise optional
depenency, must be installed for this feature to work. | [
"Play",
"the",
"contents",
"of",
"the",
"generator",
"using",
"PyAudio"
] | python | train |
vtemian/buffpy | buffpy/managers/updates.py | https://github.com/vtemian/buffpy/blob/6c9236fd3b6a8f9e2d70dbf1bc01529242b73075/buffpy/managers/updates.py#L102-L135 | def new(self, text, shorten=None, now=None, top=None, media=None, when=None):
'''
Create one or more new status updates.
'''
url = PATHS['CREATE']
post_data = "text=%s&" % text
post_data += "profile_ids[]=%s&" % self.profile_id
if shorten:
post_data += "shorten=%s&" % shorten
if now:
post_data += "now=%s&" % now
if top:
post_data += "top=%s&" % top
if when:
post_data += "scheduled_at=%s&" % str(when)
if media:
media_format = "media[%s]=%s&"
for media_type, media_item in media.iteritems():
post_data += media_format % (media_type, media_item)
response = self.api.post(url=url, data=post_data)
new_update = Update(api=self.api, raw_response=response['updates'][0])
self.append(new_update)
return new_update | [
"def",
"new",
"(",
"self",
",",
"text",
",",
"shorten",
"=",
"None",
",",
"now",
"=",
"None",
",",
"top",
"=",
"None",
",",
"media",
"=",
"None",
",",
"when",
"=",
"None",
")",
":",
"url",
"=",
"PATHS",
"[",
"'CREATE'",
"]",
"post_data",
"=",
"... | Create one or more new status updates. | [
"Create",
"one",
"or",
"more",
"new",
"status",
"updates",
"."
] | python | valid |
thefab/tornadis | tornadis/connection.py | https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/connection.py#L208-L230 | def disconnect(self):
"""Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors).
"""
if not self.is_connected() and not self.is_connecting():
return
LOG.debug("disconnecting from %s...", self._redis_server())
self.__periodic_callback.stop()
try:
self._ioloop.remove_handler(self.__socket_fileno)
self._listened_events = 0
except Exception:
pass
self.__socket_fileno = -1
try:
self.__socket.close()
except Exception:
pass
self._state.set_disconnected()
self._close_callback()
LOG.debug("disconnected from %s", self._redis_server()) | [
"def",
"disconnect",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_connected",
"(",
")",
"and",
"not",
"self",
".",
"is_connecting",
"(",
")",
":",
"return",
"LOG",
".",
"debug",
"(",
"\"disconnecting from %s...\"",
",",
"self",
".",
"_redis_server"... | Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors). | [
"Disconnects",
"the",
"object",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/attr/_make.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/attr/_make.py#L1934-L1946 | def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth | [
"def",
"validator",
"(",
"self",
",",
"meth",
")",
":",
"if",
"self",
".",
"_validator",
"is",
"None",
":",
"self",
".",
"_validator",
"=",
"meth",
"else",
":",
"self",
".",
"_validator",
"=",
"and_",
"(",
"self",
".",
"_validator",
",",
"meth",
")",... | Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0 | [
"Decorator",
"that",
"adds",
"*",
"meth",
"*",
"to",
"the",
"list",
"of",
"validators",
"."
] | python | train |
KrishnaswamyLab/graphtools | graphtools/base.py | https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/base.py#L825-L852 | def extend_to_data(self, Y):
"""Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of samples in `self.data`. Any
transformation of `self.data` can be trivially applied to `Y` by
performing
`transform_Y = self.interpolate(transform, transitions)`
Parameters
----------
Y: array-like, [n_samples_y, n_dimensions]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, shape=[n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
"""
Y = self._check_extension_shape(Y)
kernel = self.build_kernel_to_data(Y)
transitions = normalize(kernel, norm='l1', axis=1)
return transitions | [
"def",
"extend_to_data",
"(",
"self",
",",
"Y",
")",
":",
"Y",
"=",
"self",
".",
"_check_extension_shape",
"(",
"Y",
")",
"kernel",
"=",
"self",
".",
"build_kernel_to_data",
"(",
"Y",
")",
"transitions",
"=",
"normalize",
"(",
"kernel",
",",
"norm",
"=",... | Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of samples in `self.data`. Any
transformation of `self.data` can be trivially applied to `Y` by
performing
`transform_Y = self.interpolate(transform, transitions)`
Parameters
----------
Y: array-like, [n_samples_y, n_dimensions]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, shape=[n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data` | [
"Build",
"transition",
"matrix",
"from",
"new",
"data",
"to",
"the",
"graph"
] | python | train |
rosshamish/catan-py | catan/game.py | https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/game.py#L171-L209 | def start(self, players):
"""
Start the game.
The value of option 'pregame' determines whether the pregame will occur or not.
- Resets the board
- Sets the players
- Sets the game state to the appropriate first turn of the game
- Finds the robber on the board, sets the robber_tile appropriately
- Logs the catanlog header
:param players: players to start the game with
"""
from .boardbuilder import Opt
self.reset()
if self.board.opts.get('players') == Opt.debug:
players = Game.get_debug_players()
self.set_players(players)
if self.options.get('pregame') is None or self.options.get('pregame') == 'on':
logging.debug('Entering pregame, game options={}'.format(self.options))
self.set_state(catan.states.GameStatePreGamePlacingPiece(self, catan.pieces.PieceType.settlement))
elif self.options.get('pregame') == 'off':
logging.debug('Skipping pregame, game options={}'.format(self.options))
self.set_state(catan.states.GameStateBeginTurn(self))
terrain = list()
numbers = list()
for tile in self.board.tiles:
terrain.append(tile.terrain)
numbers.append(tile.number)
for (_, coord), piece in self.board.pieces.items():
if piece.type == catan.pieces.PieceType.robber:
self.robber_tile = hexgrid.tile_id_from_coord(coord)
logging.debug('Found robber at coord={}, set robber_tile={}'.format(coord, self.robber_tile))
self.catanlog.log_game_start(self.players, terrain, numbers, self.board.ports)
self.notify_observers() | [
"def",
"start",
"(",
"self",
",",
"players",
")",
":",
"from",
".",
"boardbuilder",
"import",
"Opt",
"self",
".",
"reset",
"(",
")",
"if",
"self",
".",
"board",
".",
"opts",
".",
"get",
"(",
"'players'",
")",
"==",
"Opt",
".",
"debug",
":",
"player... | Start the game.
The value of option 'pregame' determines whether the pregame will occur or not.
- Resets the board
- Sets the players
- Sets the game state to the appropriate first turn of the game
- Finds the robber on the board, sets the robber_tile appropriately
- Logs the catanlog header
:param players: players to start the game with | [
"Start",
"the",
"game",
"."
] | python | train |
wadda/gps3 | gps3/agps3.py | https://github.com/wadda/gps3/blob/91adcd7073b891b135b2a46d039ce2125cf09a09/gps3/agps3.py#L87-L97 | def send(self, commands):
"""Ship commands to the daemon
Arguments:
commands: e.g., '?WATCH={{'enable':true,'json':true}}'|'?VERSION;'|'?DEVICES;'|'?DEVICE;'|'?POLL;'
"""
try:
self.streamSock.send(bytes(commands, encoding='utf-8'))
except TypeError:
self.streamSock.send(commands) # 2.7 chokes on 'bytes' and 'encoding='
except (OSError, IOError) as error: # HEY MOE, LEAVE THIS ALONE FOR NOW!
sys.stderr.write(f'\nAGPS3 send command fail with {error}\n') | [
"def",
"send",
"(",
"self",
",",
"commands",
")",
":",
"try",
":",
"self",
".",
"streamSock",
".",
"send",
"(",
"bytes",
"(",
"commands",
",",
"encoding",
"=",
"'utf-8'",
")",
")",
"except",
"TypeError",
":",
"self",
".",
"streamSock",
".",
"send",
"... | Ship commands to the daemon
Arguments:
commands: e.g., '?WATCH={{'enable':true,'json':true}}'|'?VERSION;'|'?DEVICES;'|'?DEVICE;'|'?POLL;' | [
"Ship",
"commands",
"to",
"the",
"daemon",
"Arguments",
":",
"commands",
":",
"e",
".",
"g",
".",
"?WATCH",
"=",
"{{",
"enable",
":",
"true",
"json",
":",
"true",
"}}",
"|",
"?VERSION",
";",
"|",
"?DEVICES",
";",
"|",
"?DEVICE",
";",
"|",
"?POLL",
... | python | train |
zomux/deepy | deepy/dataset/bunch_seq.py | https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/dataset/bunch_seq.py#L58-L64 | def _cut_to_pieces(self, bunch_stack):
"""
:type bunch_stack: list of list of int
"""
stack_len = len(bunch_stack[0])
for i in xrange(0, stack_len, self.fragment_length):
yield np.array(map(lambda stack: stack[i: i + self.fragment_length], bunch_stack)) | [
"def",
"_cut_to_pieces",
"(",
"self",
",",
"bunch_stack",
")",
":",
"stack_len",
"=",
"len",
"(",
"bunch_stack",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"stack_len",
",",
"self",
".",
"fragment_length",
")",
":",
"yield",
"np",
... | :type bunch_stack: list of list of int | [
":",
"type",
"bunch_stack",
":",
"list",
"of",
"list",
"of",
"int"
] | python | test |
user-cont/colin | colin/core/ruleset/loader.py | https://github.com/user-cont/colin/blob/00bb80e6e91522e15361935f813e8cf13d7e76dc/colin/core/ruleset/loader.py#L118-L121 | def other_attributes(self):
""" return dict with all other data except for the described above"""
return {k: v for k, v in self.c.items() if
k not in ["name", "names", "tags", "additional_tags", "usable_targets"]} | [
"def",
"other_attributes",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"c",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"[",
"\"name\"",
",",
"\"names\"",
",",
"\"tags\"",
",",
"\"additional_tag... | return dict with all other data except for the described above | [
"return",
"dict",
"with",
"all",
"other",
"data",
"except",
"for",
"the",
"described",
"above"
] | python | train |
google/grr | grr/server/grr_response_server/rdfvalues/objects.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/rdfvalues/objects.py#L460-L485 | def UpdateFrom(self, src):
"""Merge path info records.
Merges src into self.
Args:
src: An rdfvalues.objects.PathInfo record, will be merged into self.
Raises:
ValueError: If src does not represent the same path.
"""
if not isinstance(src, PathInfo):
raise TypeError("expected `%s` but got `%s`" % (PathInfo, type(src)))
if self.path_type != src.path_type:
raise ValueError(
"src [%s] does not represent the same path type as self [%s]" %
(src.path_type, self.path_type))
if self.components != src.components:
raise ValueError("src [%s] does not represent the same path as self [%s]"
% (src.components, self.components))
if src.HasField("stat_entry"):
self.stat_entry = src.stat_entry
self.last_stat_entry_timestamp = max(self.last_stat_entry_timestamp,
src.last_stat_entry_timestamp)
self.directory = self.directory or src.directory | [
"def",
"UpdateFrom",
"(",
"self",
",",
"src",
")",
":",
"if",
"not",
"isinstance",
"(",
"src",
",",
"PathInfo",
")",
":",
"raise",
"TypeError",
"(",
"\"expected `%s` but got `%s`\"",
"%",
"(",
"PathInfo",
",",
"type",
"(",
"src",
")",
")",
")",
"if",
"... | Merge path info records.
Merges src into self.
Args:
src: An rdfvalues.objects.PathInfo record, will be merged into self.
Raises:
ValueError: If src does not represent the same path. | [
"Merge",
"path",
"info",
"records",
"."
] | python | train |
intake/intake | intake/source/cache.py | https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/source/cache.py#L238-L256 | def clear_all(self):
"""
Clears all cache and metadata.
"""
for urlpath in self._metadata.keys():
self.clear_cache(urlpath)
# Safely clean up anything else.
if not os.path.isdir(self._cache_dir):
return
for subdir in os.listdir(self._cache_dir):
try:
fn = posixpath.join(self._cache_dir, subdir)
if os.path.isdir(fn):
shutil.rmtree(fn)
if os.path.isfile(fn):
os.remove(fn)
except (OSError, IOError) as e:
logger.warning(str(e)) | [
"def",
"clear_all",
"(",
"self",
")",
":",
"for",
"urlpath",
"in",
"self",
".",
"_metadata",
".",
"keys",
"(",
")",
":",
"self",
".",
"clear_cache",
"(",
"urlpath",
")",
"# Safely clean up anything else.",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(... | Clears all cache and metadata. | [
"Clears",
"all",
"cache",
"and",
"metadata",
"."
] | python | train |
wummel/dosage | dosagelib/events.py | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/events.py#L273-L276 | def comicDownloaded(self, comic, filename, text=None):
"""Add URL-to-filename mapping into JSON."""
pageInfo = self.getPageInfo(comic.name, comic.referrer)
pageInfo['images'][comic.url] = os.path.basename(filename) | [
"def",
"comicDownloaded",
"(",
"self",
",",
"comic",
",",
"filename",
",",
"text",
"=",
"None",
")",
":",
"pageInfo",
"=",
"self",
".",
"getPageInfo",
"(",
"comic",
".",
"name",
",",
"comic",
".",
"referrer",
")",
"pageInfo",
"[",
"'images'",
"]",
"[",... | Add URL-to-filename mapping into JSON. | [
"Add",
"URL",
"-",
"to",
"-",
"filename",
"mapping",
"into",
"JSON",
"."
] | python | train |
inspirehep/inspire-dojson | inspire_dojson/hep/rules/bd9xx.py | https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd9xx.py#L140-L186 | def document_type(self, key, value):
"""Populate the ``document_type`` key.
Also populates the ``_collections``, ``citeable``, ``core``, ``deleted``,
``refereed``, ``publication_type``, and ``withdrawn`` keys through side
effects.
"""
schema = load_schema('hep')
publication_type_schema = schema['properties']['publication_type']
valid_publication_types = publication_type_schema['items']['enum']
document_type = self.get('document_type', [])
publication_type = self.get('publication_type', [])
a_values = force_list(value.get('a'))
for a_value in a_values:
normalized_a_value = a_value.strip().lower()
if normalized_a_value == 'arxiv':
continue # XXX: ignored.
elif normalized_a_value == 'citeable':
self['citeable'] = True
elif normalized_a_value == 'core':
self['core'] = True
elif normalized_a_value == 'noncore':
self['core'] = False
elif normalized_a_value == 'published':
self['refereed'] = True
elif normalized_a_value == 'withdrawn':
self['withdrawn'] = True
elif normalized_a_value == 'deleted':
self['deleted'] = True
elif normalized_a_value in COLLECTIONS_MAP:
self.setdefault('_collections', []).append(COLLECTIONS_MAP[normalized_a_value])
elif normalized_a_value in DOCUMENT_TYPE_MAP:
document_type.append(DOCUMENT_TYPE_MAP[normalized_a_value])
elif normalized_a_value in valid_publication_types:
publication_type.append(normalized_a_value)
c_value = force_single_element(value.get('c', ''))
normalized_c_value = c_value.strip().lower()
if normalized_c_value == 'deleted':
self['deleted'] = True
self['publication_type'] = publication_type
return document_type | [
"def",
"document_type",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"schema",
"=",
"load_schema",
"(",
"'hep'",
")",
"publication_type_schema",
"=",
"schema",
"[",
"'properties'",
"]",
"[",
"'publication_type'",
"]",
"valid_publication_types",
"=",
"publica... | Populate the ``document_type`` key.
Also populates the ``_collections``, ``citeable``, ``core``, ``deleted``,
``refereed``, ``publication_type``, and ``withdrawn`` keys through side
effects. | [
"Populate",
"the",
"document_type",
"key",
"."
] | python | train |
raphaelvallat/pingouin | pingouin/pairwise.py | https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/pairwise.py#L725-L1032 | def pairwise_corr(data, columns=None, covar=None, tail='two-sided',
method='pearson', padjust='none', export_filename=None):
'''Pairwise (partial) correlations between columns of a pandas dataframe.
Parameters
----------
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
columns : list or str
Column names in data ::
'["a", "b", "c"]' : combination between columns a, b, and c
'["a"]' : product between a and all the other numeric columns
'[["a"], ["b", "c"]]' : product between ["a"] and ["b", "c"]
'[["a", "d"], ["b", "c"]]' : product between ["a", "d"] and ["b", "c"]
'[["a", "d"], None]' : product between ["a", "d"] and all other columns
Note that if column is not specified, then the function will return the
pairwise correlation between the combination of all the numeric columns
in data. See the examples section for more details on this.
covar : None, string or list
Covariate(s) for partial correlation. Must be one or more columns
in data. Use a list if there are more than one covariate. If
``covar`` is not None, a partial correlation will be computed using
:py:func:`pingouin.partial_corr` function.
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
method : string
Specify which method to use for the computation of the correlation
coefficient. Available methods are ::
'pearson' : Pearson product-moment correlation
'spearman' : Spearman rank-order correlation
'kendall' : Kendall’s tau (ordinal data)
'percbend' : percentage bend correlation (robust)
'shepherd' : Shepherd's pi correlation (robust Spearman)
padjust : string
Method used for testing and adjustment of pvalues.
Available methods are ::
'none' : no correction
'bonferroni' : one-step Bonferroni correction
'holm' : step-down method using Bonferroni adjustments
'fdr_bh' : Benjamini/Hochberg FDR correction
'fdr_by' : Benjamini/Yekutieli FDR correction
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Stats summary ::
'X' : Name(s) of first columns
'Y' : Name(s) of second columns
'method' : method used to compute the correlation
'covar' : List of specified covariate(s) (only for partial correlation)
'tail' : indicates whether the p-values are one-sided or two-sided
'n' : Sample size (after NaN removal)
'r' : Correlation coefficients
'CI95' : 95% parametric confidence intervals
'r2' : R-squared values
'adj_r2' : Adjusted R-squared values
'z' : Standardized correlation coefficients
'p-unc' : uncorrected one or two tailed p-values
'p-corr' : corrected one or two tailed p-values
'p-adjust' : Correction method
Notes
-----
Please refer to the :py:func:`pingouin.corr()` function for a description
of the different methods. NaN are automatically removed from the data.
This function is more flexible and gives a much more detailed
output than the :py:func:`pandas.DataFrame.corr()` method (i.e. p-values,
confidence interval, Bayes Factor..). This comes however at
an increased computational cost. While this should not be discernible for
dataframe with less than 10,000 rows and/or less than 20 columns, this
function can be slow for very large dataset. For speed purpose, the Bayes
Factor is only computed when the sample size is less than 1000
(and method='pearson').
This function also works with two-dimensional multi-index columns. In this
case, columns must be list(s) of tuple(s). See the Jupyter notebook
for more details:
https://github.com/raphaelvallat/pingouin/blob/master/notebooks/04_Correlations.ipynb
If ``covar`` is specified, this function will compute the pairwise partial
correlation between the variables. If you are only interested in computing
the partial correlation matrix (i.e. the raw pairwise partial correlation
coefficient matrix, without the p-values, sample sizes, etc), a better
alternative is to use the :py:func:`pingouin.pcorr` function (see
example 7).
Examples
--------
1. One-tailed spearman correlation corrected for multiple comparisons
>>> from pingouin import pairwise_corr, read_dataset
>>> data = read_dataset('pairwise_corr').iloc[:, 1:]
>>> pairwise_corr(data, method='spearman', tail='two-sided',
... padjust='bonf') # doctest: +SKIP
2. Robust two-sided correlation with uncorrected p-values
>>> pcor = pairwise_corr(data, columns=['Openness', 'Extraversion',
... 'Neuroticism'], method='percbend')
3. One-versus-all pairwise correlations
>>> pairwise_corr(data, columns=['Neuroticism']) # doctest: +SKIP
4. Pairwise correlations between two lists of columns (cartesian product)
>>> columns = [['Neuroticism', 'Extraversion'], ['Openness']]
>>> pairwise_corr(data, columns) # doctest: +SKIP
5. As a Pandas method
>>> pcor = data.pairwise_corr(covar='Neuroticism', method='spearman')
6. Pairwise partial correlation
>>> pcor = pairwise_corr(data, covar='Neuroticism') # One covariate
>>> pcor = pairwise_corr(data, covar=['Neuroticism', 'Openness']) # Two
7. Pairwise partial correlation matrix (only the r-values)
>>> data[['Neuroticism', 'Openness', 'Extraversion']].pcorr()
Neuroticism Openness Extraversion
Neuroticism 1.000000 0.092097 -0.360421
Openness 0.092097 1.000000 0.281312
Extraversion -0.360421 0.281312 1.000000
'''
from pingouin.correlation import corr, partial_corr
if tail not in ['one-sided', 'two-sided']:
raise ValueError('Tail not recognized')
# Keep only numeric columns
data = data._get_numeric_data()
# Remove columns with constant value and/or NaN
data = data.loc[:, data.nunique(dropna=True) >= 2]
# Extract columns names
keys = data.columns.tolist()
# First ensure that columns is a list
if isinstance(columns, (str, tuple)):
columns = [columns]
def traverse(o, tree_types=(list, tuple)):
"""Helper function to flatten nested lists.
From https://stackoverflow.com/a/6340578
"""
if isinstance(o, tree_types):
for value in o:
for subvalue in traverse(value, tree_types):
yield subvalue
else:
yield o
# Check if columns index has multiple levels
if isinstance(data.columns, pd.core.index.MultiIndex):
multi_index = True
if columns is not None:
# Simple List with one element: [('L0', 'L1')]
# Simple list with >= 2 elements: [('L0', 'L1'), ('L0', 'L2')]
# Nested lists: [[('L0', 'L1')], ...] or [..., [('L0', 'L1')]]
col_flatten = list(traverse(columns, tree_types=list))
assert all(isinstance(c, (tuple, type(None))) for c in col_flatten)
else:
multi_index = False
# Then define combinations / products between columns
if columns is None:
# Case A: column is not defined --> corr between all numeric columns
combs = list(combinations(keys, 2))
else:
# Case B: column is specified
if isinstance(columns[0], list):
group1 = [e for e in columns[0] if e in keys]
# Assert that column is two-dimensional
if len(columns) == 1:
columns.append(None)
if isinstance(columns[1], list) and len(columns[1]):
# B1: [['a', 'b'], ['c', 'd']]
group2 = [e for e in columns[1] if e in keys]
else:
# B2: [['a', 'b']], [['a', 'b'], None] or [['a', 'b'], 'all']
group2 = [e for e in keys if e not in group1]
combs = list(product(group1, group2))
else:
# Column is a simple list
if len(columns) == 1:
# Case B3: one-versus-all, e.g. ['a'] or 'a'
# Check that this column exist
if columns[0] not in keys:
msg = ('"%s" is not in data or is not numeric.'
% columns[0])
raise ValueError(msg)
others = [e for e in keys if e != columns[0]]
combs = list(product(columns, others))
else:
# Combinations between all specified columns ['a', 'b', 'c']
# Make sure that we keep numeric columns
columns = [c for c in columns if c in keys]
if len(columns) == 1:
# If only one-column is left, equivalent to ['a']
others = [e for e in keys if e != columns[0]]
combs = list(product(columns, others))
else:
# combinations between ['a', 'b', 'c']
combs = list(combinations(columns, 2))
combs = np.array(combs)
if len(combs) == 0:
raise ValueError("No column combination found. Please make sure that "
"the specified columns exist in the dataframe, are "
"numeric, and contains at least two unique values.")
# Initialize empty dataframe
if multi_index:
X = list(zip(combs[:, 0, 0], combs[:, 0, 1]))
Y = list(zip(combs[:, 1, 0], combs[:, 1, 1]))
else:
X = combs[:, 0]
Y = combs[:, 1]
stats = pd.DataFrame({'X': X, 'Y': Y, 'method': method, 'tail': tail},
index=range(len(combs)),
columns=['X', 'Y', 'method', 'tail', 'n', 'outliers',
'r', 'CI95%', 'r2', 'adj_r2', 'p-val',
'BF10', 'power'])
# Now we check if covariates are present
if covar is not None:
assert isinstance(covar, (str, list)), 'covar must be list or string.'
if isinstance(covar, str):
covar = [covar]
# Check that columns exist and are numeric
assert all([c in keys for c in covar]), 'covar not in data or not num.'
# And we make sure that X or Y does not contain covar
stats = stats[~stats[['X', 'Y']].isin(covar).any(1)]
stats = stats.reset_index(drop=True)
if stats.shape[0] == 0:
raise ValueError("No column combination found. Please make sure "
"that the specified columns and covar exist in "
"the dataframe, are numeric, and contains at "
"least two unique values.")
# Compute pairwise correlations and fill dataframe
dvs = ['n', 'r', 'CI95%', 'r2', 'adj_r2', 'p-val', 'power']
dvs_out = dvs + ['outliers']
dvs_bf10 = dvs + ['BF10']
for i in range(stats.shape[0]):
col1, col2 = stats.loc[i, 'X'], stats.loc[i, 'Y']
if covar is None:
cor_st = corr(data[col1].values, data[col2].values, tail=tail,
method=method)
else:
cor_st = partial_corr(data=data, x=col1, y=col2, covar=covar,
tail=tail, method=method)
cor_st_keys = cor_st.columns.tolist()
if 'BF10' in cor_st_keys:
stats.loc[i, dvs_bf10] = cor_st[dvs_bf10].values
elif 'outliers' in cor_st_keys:
stats.loc[i, dvs_out] = cor_st[dvs_out].values
else:
stats.loc[i, dvs] = cor_st[dvs].values
# Force conversion to numeric
stats = stats.astype({'r': float, 'r2': float, 'adj_r2': float,
'n': int, 'p-val': float, 'outliers': float,
'power': float})
# Multiple comparisons
stats = stats.rename(columns={'p-val': 'p-unc'})
padjust = None if stats['p-unc'].size <= 1 else padjust
if padjust is not None:
if padjust.lower() != 'none':
reject, stats['p-corr'] = multicomp(stats['p-unc'].values,
method=padjust)
stats['p-adjust'] = padjust
else:
stats['p-corr'] = None
stats['p-adjust'] = None
# Standardize correlation coefficients (Fisher z-transformation)
stats['z'] = np.round(np.arctanh(stats['r'].values), 3)
col_order = ['X', 'Y', 'method', 'tail', 'n', 'outliers', 'r', 'CI95%',
'r2', 'adj_r2', 'z', 'p-unc', 'p-corr', 'p-adjust',
'BF10', 'power']
# Reorder columns and remove empty ones
stats = stats.reindex(columns=col_order)
stats = stats.dropna(how='all', axis=1)
# Add covariates names if present
if covar is not None:
stats.insert(loc=3, column='covar', value=str(covar))
if export_filename is not None:
_export_table(stats, export_filename)
return stats | [
"def",
"pairwise_corr",
"(",
"data",
",",
"columns",
"=",
"None",
",",
"covar",
"=",
"None",
",",
"tail",
"=",
"'two-sided'",
",",
"method",
"=",
"'pearson'",
",",
"padjust",
"=",
"'none'",
",",
"export_filename",
"=",
"None",
")",
":",
"from",
"pingouin... | Pairwise (partial) correlations between columns of a pandas dataframe.
Parameters
----------
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
columns : list or str
Column names in data ::
'["a", "b", "c"]' : combination between columns a, b, and c
'["a"]' : product between a and all the other numeric columns
'[["a"], ["b", "c"]]' : product between ["a"] and ["b", "c"]
'[["a", "d"], ["b", "c"]]' : product between ["a", "d"] and ["b", "c"]
'[["a", "d"], None]' : product between ["a", "d"] and all other columns
Note that if column is not specified, then the function will return the
pairwise correlation between the combination of all the numeric columns
in data. See the examples section for more details on this.
covar : None, string or list
Covariate(s) for partial correlation. Must be one or more columns
in data. Use a list if there are more than one covariate. If
``covar`` is not None, a partial correlation will be computed using
:py:func:`pingouin.partial_corr` function.
tail : string
Indicates whether to return the 'two-sided' or 'one-sided' p-values
method : string
Specify which method to use for the computation of the correlation
coefficient. Available methods are ::
'pearson' : Pearson product-moment correlation
'spearman' : Spearman rank-order correlation
'kendall' : Kendall’s tau (ordinal data)
'percbend' : percentage bend correlation (robust)
'shepherd' : Shepherd's pi correlation (robust Spearman)
padjust : string
Method used for testing and adjustment of pvalues.
Available methods are ::
'none' : no correction
'bonferroni' : one-step Bonferroni correction
'holm' : step-down method using Bonferroni adjustments
'fdr_bh' : Benjamini/Hochberg FDR correction
'fdr_by' : Benjamini/Yekutieli FDR correction
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Stats summary ::
'X' : Name(s) of first columns
'Y' : Name(s) of second columns
'method' : method used to compute the correlation
'covar' : List of specified covariate(s) (only for partial correlation)
'tail' : indicates whether the p-values are one-sided or two-sided
'n' : Sample size (after NaN removal)
'r' : Correlation coefficients
'CI95' : 95% parametric confidence intervals
'r2' : R-squared values
'adj_r2' : Adjusted R-squared values
'z' : Standardized correlation coefficients
'p-unc' : uncorrected one or two tailed p-values
'p-corr' : corrected one or two tailed p-values
'p-adjust' : Correction method
Notes
-----
Please refer to the :py:func:`pingouin.corr()` function for a description
of the different methods. NaN are automatically removed from the data.
This function is more flexible and gives a much more detailed
output than the :py:func:`pandas.DataFrame.corr()` method (i.e. p-values,
confidence interval, Bayes Factor..). This comes however at
an increased computational cost. While this should not be discernible for
dataframe with less than 10,000 rows and/or less than 20 columns, this
function can be slow for very large dataset. For speed purpose, the Bayes
Factor is only computed when the sample size is less than 1000
(and method='pearson').
This function also works with two-dimensional multi-index columns. In this
case, columns must be list(s) of tuple(s). See the Jupyter notebook
for more details:
https://github.com/raphaelvallat/pingouin/blob/master/notebooks/04_Correlations.ipynb
If ``covar`` is specified, this function will compute the pairwise partial
correlation between the variables. If you are only interested in computing
the partial correlation matrix (i.e. the raw pairwise partial correlation
coefficient matrix, without the p-values, sample sizes, etc), a better
alternative is to use the :py:func:`pingouin.pcorr` function (see
example 7).
Examples
--------
1. One-tailed spearman correlation corrected for multiple comparisons
>>> from pingouin import pairwise_corr, read_dataset
>>> data = read_dataset('pairwise_corr').iloc[:, 1:]
>>> pairwise_corr(data, method='spearman', tail='two-sided',
... padjust='bonf') # doctest: +SKIP
2. Robust two-sided correlation with uncorrected p-values
>>> pcor = pairwise_corr(data, columns=['Openness', 'Extraversion',
... 'Neuroticism'], method='percbend')
3. One-versus-all pairwise correlations
>>> pairwise_corr(data, columns=['Neuroticism']) # doctest: +SKIP
4. Pairwise correlations between two lists of columns (cartesian product)
>>> columns = [['Neuroticism', 'Extraversion'], ['Openness']]
>>> pairwise_corr(data, columns) # doctest: +SKIP
5. As a Pandas method
>>> pcor = data.pairwise_corr(covar='Neuroticism', method='spearman')
6. Pairwise partial correlation
>>> pcor = pairwise_corr(data, covar='Neuroticism') # One covariate
>>> pcor = pairwise_corr(data, covar=['Neuroticism', 'Openness']) # Two
7. Pairwise partial correlation matrix (only the r-values)
>>> data[['Neuroticism', 'Openness', 'Extraversion']].pcorr()
Neuroticism Openness Extraversion
Neuroticism 1.000000 0.092097 -0.360421
Openness 0.092097 1.000000 0.281312
Extraversion -0.360421 0.281312 1.000000 | [
"Pairwise",
"(",
"partial",
")",
"correlations",
"between",
"columns",
"of",
"a",
"pandas",
"dataframe",
"."
] | python | train |
jrxFive/python-nomad | nomad/api/client.py | https://github.com/jrxFive/python-nomad/blob/37df37e4de21e6f8ac41c6154e7f1f44f1800020/nomad/api/client.py#L43-L59 | def list_files(self, id=None, path="/"):
""" List files in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-ls.html
arguments:
- id
- path
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
if id:
return self.request(id, params={"path": path}, method="get").json()
else:
return self.request(params={"path": path}, method="get").json() | [
"def",
"list_files",
"(",
"self",
",",
"id",
"=",
"None",
",",
"path",
"=",
"\"/\"",
")",
":",
"if",
"id",
":",
"return",
"self",
".",
"request",
"(",
"id",
",",
"params",
"=",
"{",
"\"path\"",
":",
"path",
"}",
",",
"method",
"=",
"\"get\"",
")"... | List files in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-ls.html
arguments:
- id
- path
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException | [
"List",
"files",
"in",
"an",
"allocation",
"directory",
"."
] | python | test |
LonamiWebs/Telethon | telethon/sessions/sqlite.py | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/sessions/sqlite.py#L221-L230 | def _execute(self, stmt, *values):
"""
Gets a cursor, executes `stmt` and closes the cursor,
fetching one row afterwards and returning its result.
"""
c = self._cursor()
try:
return c.execute(stmt, values).fetchone()
finally:
c.close() | [
"def",
"_execute",
"(",
"self",
",",
"stmt",
",",
"*",
"values",
")",
":",
"c",
"=",
"self",
".",
"_cursor",
"(",
")",
"try",
":",
"return",
"c",
".",
"execute",
"(",
"stmt",
",",
"values",
")",
".",
"fetchone",
"(",
")",
"finally",
":",
"c",
"... | Gets a cursor, executes `stmt` and closes the cursor,
fetching one row afterwards and returning its result. | [
"Gets",
"a",
"cursor",
"executes",
"stmt",
"and",
"closes",
"the",
"cursor",
"fetching",
"one",
"row",
"afterwards",
"and",
"returning",
"its",
"result",
"."
] | python | train |
SheffieldML/GPyOpt | GPyOpt/interface/driver.py | https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/interface/driver.py#L100-L119 | def run(self):
"""
Runs the optimization using the previously loaded elements.
"""
space = self._get_space()
obj_func = self._get_obj(space)
model = self._get_model()
acq = self._get_acquisition(model, space)
acq_eval = self._get_acq_evaluator(acq)
from ..experiment_design import initial_design
X_init = initial_design(self.config['initialization']['type'], space, self.config['initialization']['num-eval'])
from ..methods import ModularBayesianOptimization
bo = ModularBayesianOptimization(model, space, obj_func, acq, acq_eval, X_init)
bo.run_optimization(max_iter = self.config['resources']['maximum-iterations'], max_time = self.config['resources']['max-run-time'] if self.config['resources']['max-run-time']!="NA" else np.inf,
eps = self.config['resources']['tolerance'], verbosity=True)
return bo | [
"def",
"run",
"(",
"self",
")",
":",
"space",
"=",
"self",
".",
"_get_space",
"(",
")",
"obj_func",
"=",
"self",
".",
"_get_obj",
"(",
"space",
")",
"model",
"=",
"self",
".",
"_get_model",
"(",
")",
"acq",
"=",
"self",
".",
"_get_acquisition",
"(",
... | Runs the optimization using the previously loaded elements. | [
"Runs",
"the",
"optimization",
"using",
"the",
"previously",
"loaded",
"elements",
"."
] | python | train |
tanghaibao/goatools | goatools/anno/init/reader_gaf.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/init/reader_gaf.py#L265-L278 | def prt_error_summary(self, fout_err):
"""Print a summary about the GAF file that was read."""
# Get summary of error types and their counts
errcnts = []
if self.ignored:
errcnts.append(" {N:9,} IGNORED associations\n".format(N=len(self.ignored)))
if self.illegal_lines:
for err_name, errors in self.illegal_lines.items():
errcnts.append(" {N:9,} {ERROR}\n".format(N=len(errors), ERROR=err_name))
# Save error details into a log file
fout_log = self._wrlog_details_illegal_gaf(fout_err, errcnts)
sys.stdout.write(" WROTE GAF ERROR LOG: {LOG}:\n".format(LOG=fout_log))
for err_cnt in errcnts:
sys.stdout.write(err_cnt) | [
"def",
"prt_error_summary",
"(",
"self",
",",
"fout_err",
")",
":",
"# Get summary of error types and their counts",
"errcnts",
"=",
"[",
"]",
"if",
"self",
".",
"ignored",
":",
"errcnts",
".",
"append",
"(",
"\" {N:9,} IGNORED associations\\n\"",
".",
"format",
"(... | Print a summary about the GAF file that was read. | [
"Print",
"a",
"summary",
"about",
"the",
"GAF",
"file",
"that",
"was",
"read",
"."
] | python | train |
mlperf/training | rnn_translator/pytorch/seq2seq/data/sampler.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/data/sampler.py#L69-L81 | def reshuffle_batches(self, indices, rng):
"""
Permutes global batches
:param indices: torch.tensor with batch indices
:param rng: instance of torch.Generator
"""
indices = indices.view(-1, self.global_batch_size)
num_batches = indices.shape[0]
order = torch.randperm(num_batches, generator=rng)
indices = indices[order, :]
indices = indices.view(-1)
return indices | [
"def",
"reshuffle_batches",
"(",
"self",
",",
"indices",
",",
"rng",
")",
":",
"indices",
"=",
"indices",
".",
"view",
"(",
"-",
"1",
",",
"self",
".",
"global_batch_size",
")",
"num_batches",
"=",
"indices",
".",
"shape",
"[",
"0",
"]",
"order",
"=",
... | Permutes global batches
:param indices: torch.tensor with batch indices
:param rng: instance of torch.Generator | [
"Permutes",
"global",
"batches"
] | python | train |
facebook/watchman | getdeps.py | https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/getdeps.py#L285-L294 | def vcpkg_dir():
""" Figure out where vcpkg is installed.
vcpkg-exported is populated in some flavors of FB internal builds.
C:/tools/vcpkg is the appveyor location.
C:/open/vcpkg is my local location.
"""
for p in ["vcpkg-exported", "C:/tools/vcpkg", "C:/open/vcpkg"]:
if os.path.isdir(p):
return os.path.realpath(p)
raise Exception("cannot find vcpkg") | [
"def",
"vcpkg_dir",
"(",
")",
":",
"for",
"p",
"in",
"[",
"\"vcpkg-exported\"",
",",
"\"C:/tools/vcpkg\"",
",",
"\"C:/open/vcpkg\"",
"]",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"p",
")",
":",
"return",
"os",
".",
"path",
".",
"realpath",
"(",... | Figure out where vcpkg is installed.
vcpkg-exported is populated in some flavors of FB internal builds.
C:/tools/vcpkg is the appveyor location.
C:/open/vcpkg is my local location. | [
"Figure",
"out",
"where",
"vcpkg",
"is",
"installed",
".",
"vcpkg",
"-",
"exported",
"is",
"populated",
"in",
"some",
"flavors",
"of",
"FB",
"internal",
"builds",
".",
"C",
":",
"/",
"tools",
"/",
"vcpkg",
"is",
"the",
"appveyor",
"location",
".",
"C",
... | python | train |
hyperledger/sawtooth-core | cli/sawtooth_cli/network_command/compare.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/network_command/compare.py#L187-L237 | def print_summary(graph, tails, node_id_map):
"""Print out summary and per-node comparison data."""
# Get comparison data
heads = get_heads(tails)
heights = get_heights(tails)
max_height = max(heights)
common_height, block_ids_at_common_height = get_common_height(tails)
lags = get_lags(heights, max_height)
common_ancestor = graph.root
divergences = get_divergences(heights, graph.root)
# Print summary info
col_1 = 8
col_n = 8
format_str = '{:<' + str(col_1) + '} ' + ('{:<' + str(col_n) + '} ') * 2
header = format_str.format("COMMON", "HEIGHT", "BLOCKS")
print(header)
print("-" * len(header))
print(format_str.format(
"ANCESTOR", common_ancestor.num, common_ancestor.ident[:col_n]))
print(format_str.format(
"HEIGHT", common_height, str(block_ids_at_common_height)))
print()
# Print per-node data
node_col_width = get_col_width_for_num(len(tails), len("NODE"))
num_col_width = get_col_width_for_num(max_height, len("HEIGHT"))
lag_col_width = get_col_width_for_num(max(lags), len("LAG"))
diverg_col_width = get_col_width_for_num(max(divergences), len("DIVERG"))
format_str = (
'{:<' + str(node_col_width) + '} '
'{:<8} '
'{:<' + str(num_col_width) + '} '
'{:<' + str(lag_col_width) + '} '
'{:<' + str(diverg_col_width) + '}'
)
header = format_str.format("NODE", "HEAD", "HEIGHT", "LAG", "DIVERG")
print(header)
print('-' * len(header))
for i, _ in enumerate(tails):
print(format_str.format(
node_id_map[i],
heads[i],
heights[i],
lags[i],
divergences[i],
))
print() | [
"def",
"print_summary",
"(",
"graph",
",",
"tails",
",",
"node_id_map",
")",
":",
"# Get comparison data",
"heads",
"=",
"get_heads",
"(",
"tails",
")",
"heights",
"=",
"get_heights",
"(",
"tails",
")",
"max_height",
"=",
"max",
"(",
"heights",
")",
"common_... | Print out summary and per-node comparison data. | [
"Print",
"out",
"summary",
"and",
"per",
"-",
"node",
"comparison",
"data",
"."
] | python | train |
raiden-network/raiden | raiden/blockchain/events.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/blockchain/events.py#L50-L78 | def get_contract_events(
chain: BlockChainService,
abi: Dict,
contract_address: Address,
topics: Optional[List[str]],
from_block: BlockSpecification,
to_block: BlockSpecification,
) -> List[Dict]:
""" Query the blockchain for all events of the smart contract at
`contract_address` that match the filters `topics`, `from_block`, and
`to_block`.
"""
verify_block_number(from_block, 'from_block')
verify_block_number(to_block, 'to_block')
events = chain.client.get_filter_events(
contract_address,
topics=topics,
from_block=from_block,
to_block=to_block,
)
result = []
for event in events:
decoded_event = dict(decode_event(abi, event))
if event.get('blockNumber'):
decoded_event['block_number'] = event['blockNumber']
del decoded_event['blockNumber']
result.append(decoded_event)
return result | [
"def",
"get_contract_events",
"(",
"chain",
":",
"BlockChainService",
",",
"abi",
":",
"Dict",
",",
"contract_address",
":",
"Address",
",",
"topics",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
",",
"from_block",
":",
"BlockSpecification",
",",
"to_... | Query the blockchain for all events of the smart contract at
`contract_address` that match the filters `topics`, `from_block`, and
`to_block`. | [
"Query",
"the",
"blockchain",
"for",
"all",
"events",
"of",
"the",
"smart",
"contract",
"at",
"contract_address",
"that",
"match",
"the",
"filters",
"topics",
"from_block",
"and",
"to_block",
"."
] | python | train |
Scoppio/RagnarokEngine3 | RagnarokEngine3/RE3.py | https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L1306-L1308 | def center_origin(self):
"""Sets the origin to the center of the image."""
self.set_origin(Vector2(self.image.get_width() / 2.0, self.image.get_height() / 2.0)) | [
"def",
"center_origin",
"(",
"self",
")",
":",
"self",
".",
"set_origin",
"(",
"Vector2",
"(",
"self",
".",
"image",
".",
"get_width",
"(",
")",
"/",
"2.0",
",",
"self",
".",
"image",
".",
"get_height",
"(",
")",
"/",
"2.0",
")",
")"
] | Sets the origin to the center of the image. | [
"Sets",
"the",
"origin",
"to",
"the",
"center",
"of",
"the",
"image",
"."
] | python | train |
jic-dtool/dtoolcore | dtoolcore/storagebroker.py | https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/storagebroker.py#L296-L304 | def _document_structure(self):
"""Document the structure of the dataset."""
logger.debug("Documenting dataset structure")
key = self.get_structure_key()
text = json.dumps(self._structure_parameters, indent=2, sort_keys=True)
self.put_text(key, text)
key = self.get_dtool_readme_key()
self.put_text(key, self._dtool_readme_txt) | [
"def",
"_document_structure",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Documenting dataset structure\"",
")",
"key",
"=",
"self",
".",
"get_structure_key",
"(",
")",
"text",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_structure_parameters",
",... | Document the structure of the dataset. | [
"Document",
"the",
"structure",
"of",
"the",
"dataset",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py#L614-L690 | def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
map(shadow_set.add, self) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError,v:
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
map(shadow_set.add, resolvees)
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info | [
"def",
"find_plugins",
"(",
"self",
",",
"plugin_env",
",",
"full_env",
"=",
"None",
",",
"installer",
"=",
"None",
",",
"fallback",
"=",
"True",
")",
":",
"plugin_projects",
"=",
"list",
"(",
"plugin_env",
")",
"plugin_projects",
".",
"sort",
"(",
")",
... | Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance. | [
"Find",
"all",
"activatable",
"distributions",
"in",
"plugin_env"
] | python | test |
user-cont/conu | conu/utils/__init__.py | https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/__init__.py#L310-L332 | def graceful_get(d, *args):
"""
Obtain values from dicts and lists gracefully. Example:
::
print(graceful_get({"a": [{1: 2}, {"b": "c"}]}, "a", "b"))
c
:param d: collection (usually a dict or list)
:param args: list of keys which are used as a lookup
:return: the value from your collection
"""
if not d:
return d
value = d
for arg in args:
try:
value = value[arg]
except (IndexError, KeyError, AttributeError, TypeError) as ex:
logger.debug("exception while getting a value %r from %s", ex, str(value)[:32])
return None
return value | [
"def",
"graceful_get",
"(",
"d",
",",
"*",
"args",
")",
":",
"if",
"not",
"d",
":",
"return",
"d",
"value",
"=",
"d",
"for",
"arg",
"in",
"args",
":",
"try",
":",
"value",
"=",
"value",
"[",
"arg",
"]",
"except",
"(",
"IndexError",
",",
"KeyError... | Obtain values from dicts and lists gracefully. Example:
::
print(graceful_get({"a": [{1: 2}, {"b": "c"}]}, "a", "b"))
c
:param d: collection (usually a dict or list)
:param args: list of keys which are used as a lookup
:return: the value from your collection | [
"Obtain",
"values",
"from",
"dicts",
"and",
"lists",
"gracefully",
".",
"Example",
":"
] | python | train |
libtcod/python-tcod | tcod/bsp.py | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/bsp.py#L260-L272 | def find_node(self, x: int, y: int) -> Optional["BSP"]:
"""Return the deepest node which contains these coordinates.
Returns:
Optional[BSP]: BSP object or None.
"""
if not self.contains(x, y):
return None
for child in self.children:
found = child.find_node(x, y) # type: Optional["BSP"]
if found:
return found
return self | [
"def",
"find_node",
"(",
"self",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
")",
"->",
"Optional",
"[",
"\"BSP\"",
"]",
":",
"if",
"not",
"self",
".",
"contains",
"(",
"x",
",",
"y",
")",
":",
"return",
"None",
"for",
"child",
"in",
"self",
".... | Return the deepest node which contains these coordinates.
Returns:
Optional[BSP]: BSP object or None. | [
"Return",
"the",
"deepest",
"node",
"which",
"contains",
"these",
"coordinates",
"."
] | python | train |
PmagPy/PmagPy | dialogs/pmag_gui_menu3.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_menu3.py#L171-L183 | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
# clear directory, but use previously acquired data_model
if self.data_model_num == 2.5:
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.er_magic.data_model)
elif self.data_model_num == 3:
self.parent.contribution = cb.Contribution(self.parent.WD,
dmodel=self.parent.contribution.data_model) | [
"def",
"on_clear",
"(",
"self",
",",
"event",
")",
":",
"dia",
"=",
"pmag_menu_dialogs",
".",
"ClearWD",
"(",
"self",
".",
"parent",
",",
"self",
".",
"parent",
".",
"WD",
")",
"clear",
"=",
"dia",
".",
"do_clear",
"(",
")",
"if",
"clear",
":",
"# ... | initialize window to allow user to empty the working directory | [
"initialize",
"window",
"to",
"allow",
"user",
"to",
"empty",
"the",
"working",
"directory"
] | python | train |
dropseed/configyaml | configyaml/config/base.py | https://github.com/dropseed/configyaml/blob/d008f251530d054c2d1fb3e8ac1a9030436134c8/configyaml/config/base.py#L173-L179 | def _validate_type(self): # type: () -> None
"""Validation to ensure value is the correct type"""
if not isinstance(self._value, self._type):
title = '{} has an invalid type'.format(self._key_name())
description = '{} must be a {}'.format(self._key_name(), self._type.__name__)
self._add_error(title=title, description=description) | [
"def",
"_validate_type",
"(",
"self",
")",
":",
"# type: () -> None",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_value",
",",
"self",
".",
"_type",
")",
":",
"title",
"=",
"'{} has an invalid type'",
".",
"format",
"(",
"self",
".",
"_key_name",
"(",
"... | Validation to ensure value is the correct type | [
"Validation",
"to",
"ensure",
"value",
"is",
"the",
"correct",
"type"
] | python | train |
pyQode/pyqode.core | pyqode/core/widgets/output_window.py | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/output_window.py#L978-L985 | def _insert_command(self, command):
"""
Insert command by replacing the current input buffer and display it on the text edit.
"""
self._clear_user_buffer()
tc = self.edit.textCursor()
tc.insertText(command)
self.edit.setTextCursor(tc) | [
"def",
"_insert_command",
"(",
"self",
",",
"command",
")",
":",
"self",
".",
"_clear_user_buffer",
"(",
")",
"tc",
"=",
"self",
".",
"edit",
".",
"textCursor",
"(",
")",
"tc",
".",
"insertText",
"(",
"command",
")",
"self",
".",
"edit",
".",
"setTextC... | Insert command by replacing the current input buffer and display it on the text edit. | [
"Insert",
"command",
"by",
"replacing",
"the",
"current",
"input",
"buffer",
"and",
"display",
"it",
"on",
"the",
"text",
"edit",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/zmq/ipkernel.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/ipkernel.py#L647-L654 | def _topic(self, topic):
"""prefixed topic for IOPub messages"""
if self.int_id >= 0:
base = "engine.%i" % self.int_id
else:
base = "kernel.%s" % self.ident
return py3compat.cast_bytes("%s.%s" % (base, topic)) | [
"def",
"_topic",
"(",
"self",
",",
"topic",
")",
":",
"if",
"self",
".",
"int_id",
">=",
"0",
":",
"base",
"=",
"\"engine.%i\"",
"%",
"self",
".",
"int_id",
"else",
":",
"base",
"=",
"\"kernel.%s\"",
"%",
"self",
".",
"ident",
"return",
"py3compat",
... | prefixed topic for IOPub messages | [
"prefixed",
"topic",
"for",
"IOPub",
"messages"
] | python | test |
akatrevorjay/uninhibited | uninhibited/dispatch.py | https://github.com/akatrevorjay/uninhibited/blob/f23079fe61cf831fa274d3c60bda8076c571d3f1/uninhibited/dispatch.py#L189-L202 | def add_event(self, name, send_event=True, event_factory=None):
"""
Add event by name.
This is called for you as needed if you allow auto creation of events (see __init__).
Upon an event being added, all handlers are searched for if they have this event,
and if they do, they are added to the Event's list of callables.
This is only here to ensure my constant hatred for Python 2's horrid variable argument support.
:param str|unicode name: Name
"""
return self.add_events((name,),send_event=send_event,event_factory=event_factory) | [
"def",
"add_event",
"(",
"self",
",",
"name",
",",
"send_event",
"=",
"True",
",",
"event_factory",
"=",
"None",
")",
":",
"return",
"self",
".",
"add_events",
"(",
"(",
"name",
",",
")",
",",
"send_event",
"=",
"send_event",
",",
"event_factory",
"=",
... | Add event by name.
This is called for you as needed if you allow auto creation of events (see __init__).
Upon an event being added, all handlers are searched for if they have this event,
and if they do, they are added to the Event's list of callables.
This is only here to ensure my constant hatred for Python 2's horrid variable argument support.
:param str|unicode name: Name | [
"Add",
"event",
"by",
"name",
"."
] | python | train |
apache/incubator-mxnet | example/gluon/sn_gan/model.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/sn_gan/model.py#L120-L139 | def get_descriptor(ctx):
""" construct and return descriptor """
d_net = gluon.nn.Sequential()
with d_net.name_scope():
d_net.add(SNConv2D(num_filter=64, kernel_size=4, strides=2, padding=1, in_channels=3, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=128, kernel_size=4, strides=2, padding=1, in_channels=64, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=256, kernel_size=4, strides=2, padding=1, in_channels=128, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=512, kernel_size=4, strides=2, padding=1, in_channels=256, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=1, kernel_size=4, strides=1, padding=0, in_channels=512, ctx=ctx))
return d_net | [
"def",
"get_descriptor",
"(",
"ctx",
")",
":",
"d_net",
"=",
"gluon",
".",
"nn",
".",
"Sequential",
"(",
")",
"with",
"d_net",
".",
"name_scope",
"(",
")",
":",
"d_net",
".",
"add",
"(",
"SNConv2D",
"(",
"num_filter",
"=",
"64",
",",
"kernel_size",
"... | construct and return descriptor | [
"construct",
"and",
"return",
"descriptor"
] | python | train |
mar10/pyftpsync | ftpsync/ftp_target.py | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/ftp_target.py#L658-L675 | def _ftp_pwd(self):
"""Variant of `self.ftp.pwd()` that supports encoding-fallback.
Returns:
Current working directory as native string.
"""
try:
return self.ftp.pwd()
except UnicodeEncodeError:
if compat.PY2 or self.ftp.encoding != "utf-8":
raise # should not happen, since Py2 does not try to encode
# TODO: this is NOT THREAD-SAFE!
prev_encoding = self.ftp.encoding
try:
write("ftp.pwd() failed with utf-8: trying Cp1252...", warning=True)
return self.ftp.pwd()
finally:
self.ftp.encoding = prev_encoding | [
"def",
"_ftp_pwd",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"ftp",
".",
"pwd",
"(",
")",
"except",
"UnicodeEncodeError",
":",
"if",
"compat",
".",
"PY2",
"or",
"self",
".",
"ftp",
".",
"encoding",
"!=",
"\"utf-8\"",
":",
"raise",
"# ... | Variant of `self.ftp.pwd()` that supports encoding-fallback.
Returns:
Current working directory as native string. | [
"Variant",
"of",
"self",
".",
"ftp",
".",
"pwd",
"()",
"that",
"supports",
"encoding",
"-",
"fallback",
".",
"Returns",
":",
"Current",
"working",
"directory",
"as",
"native",
"string",
"."
] | python | train |
PmagPy/PmagPy | SPD/lib/lib_arai_plot_statistics.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_arai_plot_statistics.py#L244-L255 | def get_R_det2(y_segment, y_avg, y_prime):
"""
takes in an array of y values, the mean of those values, and the array of y prime values.
returns R_det2
"""
numerator = sum((numpy.array(y_segment) - numpy.array(y_prime))**2)
denominator = sum((numpy.array(y_segment) - y_avg)**2)
if denominator: # prevent divide by zero error
R_det2 = 1 - (old_div(numerator, denominator))
return R_det2
else:
return float('nan') | [
"def",
"get_R_det2",
"(",
"y_segment",
",",
"y_avg",
",",
"y_prime",
")",
":",
"numerator",
"=",
"sum",
"(",
"(",
"numpy",
".",
"array",
"(",
"y_segment",
")",
"-",
"numpy",
".",
"array",
"(",
"y_prime",
")",
")",
"**",
"2",
")",
"denominator",
"=",
... | takes in an array of y values, the mean of those values, and the array of y prime values.
returns R_det2 | [
"takes",
"in",
"an",
"array",
"of",
"y",
"values",
"the",
"mean",
"of",
"those",
"values",
"and",
"the",
"array",
"of",
"y",
"prime",
"values",
".",
"returns",
"R_det2"
] | python | train |
openego/ding0 | ding0/core/network/__init__.py | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/__init__.py#L833-L837 | def close(self):
""" Close a Circuit Breaker #TODO Check
"""
self.grid._graph.add_edge(self.branch_nodes[0], self.branch_nodes[1], branch=self.branch)
self.status = 'closed' | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"grid",
".",
"_graph",
".",
"add_edge",
"(",
"self",
".",
"branch_nodes",
"[",
"0",
"]",
",",
"self",
".",
"branch_nodes",
"[",
"1",
"]",
",",
"branch",
"=",
"self",
".",
"branch",
")",
"self",
... | Close a Circuit Breaker #TODO Check | [
"Close",
"a",
"Circuit",
"Breaker",
"#TODO",
"Check"
] | python | train |
trailofbits/manticore | manticore/native/cpu/x86.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L5353-L5357 | def CDQ(cpu):
"""
EDX:EAX = sign-extend of EAX
"""
cpu.EDX = Operators.EXTRACT(Operators.SEXTEND(cpu.EAX, 32, 64), 32, 32) | [
"def",
"CDQ",
"(",
"cpu",
")",
":",
"cpu",
".",
"EDX",
"=",
"Operators",
".",
"EXTRACT",
"(",
"Operators",
".",
"SEXTEND",
"(",
"cpu",
".",
"EAX",
",",
"32",
",",
"64",
")",
",",
"32",
",",
"32",
")"
] | EDX:EAX = sign-extend of EAX | [
"EDX",
":",
"EAX",
"=",
"sign",
"-",
"extend",
"of",
"EAX"
] | python | valid |
dmlc/xgboost | python-package/xgboost/core.py | https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L197-L205 | def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res | [
"def",
"ctypes2buffer",
"(",
"cptr",
",",
"length",
")",
":",
"if",
"not",
"isinstance",
"(",
"cptr",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'expected char pointer'",
")",
"res",
"=",
"b... | Convert ctypes pointer to buffer type. | [
"Convert",
"ctypes",
"pointer",
"to",
"buffer",
"type",
"."
] | python | train |
delph-in/pydelphin | delphin/tdl.py | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/tdl.py#L360-L393 | def terminate(self, end):
"""
Set the value of the tail of the list.
Adding values via :meth:`append` places them on the `FIRST`
feature of some level of the feature structure (e.g.,
`REST.FIRST`), while :meth:`terminate` places them on the
final `REST` feature (e.g., `REST.REST`). If *end* is a
:class:`Conjunction` or :class:`Term`, it is typically a
:class:`Coreference`, otherwise *end* is set to
`tdl.EMPTY_LIST_TYPE` or `tdl.LIST_TYPE`. This method does
not necessarily close the list; if *end* is `tdl.LIST_TYPE`,
the list is left open, otherwise it is closed.
Args:
end (str, :class:`Conjunction`, :class:`Term`): value to
use as the end of the list.
"""
if self.terminated:
raise TdlError('Cannot terminate a closed list.')
if end == LIST_TYPE:
self.terminated = False
elif end == EMPTY_LIST_TYPE:
if self._last_path:
self[self._last_path] = None
else:
self._avm = None
self.terminated = True
elif self._last_path:
self[self._last_path] = end
self.terminated = True
else:
raise TdlError('Empty list must be {} or {}'.format(
LIST_TYPE, EMPTY_LIST_TYPE)) | [
"def",
"terminate",
"(",
"self",
",",
"end",
")",
":",
"if",
"self",
".",
"terminated",
":",
"raise",
"TdlError",
"(",
"'Cannot terminate a closed list.'",
")",
"if",
"end",
"==",
"LIST_TYPE",
":",
"self",
".",
"terminated",
"=",
"False",
"elif",
"end",
"=... | Set the value of the tail of the list.
Adding values via :meth:`append` places them on the `FIRST`
feature of some level of the feature structure (e.g.,
`REST.FIRST`), while :meth:`terminate` places them on the
final `REST` feature (e.g., `REST.REST`). If *end* is a
:class:`Conjunction` or :class:`Term`, it is typically a
:class:`Coreference`, otherwise *end* is set to
`tdl.EMPTY_LIST_TYPE` or `tdl.LIST_TYPE`. This method does
not necessarily close the list; if *end* is `tdl.LIST_TYPE`,
the list is left open, otherwise it is closed.
Args:
end (str, :class:`Conjunction`, :class:`Term`): value to
use as the end of the list. | [
"Set",
"the",
"value",
"of",
"the",
"tail",
"of",
"the",
"list",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/models/abstract_state.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/abstract_state.py#L524-L548 | def store_meta_data(self, copy_path=None):
"""Save meta data of state model to the file system
This method generates a dictionary of the meta data of the state together with the meta data of all state
elements (data ports, outcomes, etc.) and stores it on the filesystem.
Secure that the store meta data method is called after storing the core data otherwise the last_stored_path is
maybe wrong or None.
The copy path is considered to be a state machine file system path but not the current one but e.g.
of a as copy saved state machine. The meta data will be stored in respective relative state folder in the state
machine hierarchy. This folder has to exist.
Dues the core elements of the state machine has to be stored first.
:param str copy_path: Optional copy path if meta data is not stored to the file system path of state machine
"""
if copy_path:
meta_file_path_json = os.path.join(copy_path, self.state.get_storage_path(), storage.FILE_NAME_META_DATA)
else:
if self.state.file_system_path is None:
logger.error("Meta data of {0} can be stored temporary arbitrary but by default first after the "
"respective state was stored and a file system path is set.".format(self))
return
meta_file_path_json = os.path.join(self.state.file_system_path, storage.FILE_NAME_META_DATA)
meta_data = deepcopy(self.meta)
self._generate_element_meta_data(meta_data)
storage_utils.write_dict_to_json(meta_data, meta_file_path_json) | [
"def",
"store_meta_data",
"(",
"self",
",",
"copy_path",
"=",
"None",
")",
":",
"if",
"copy_path",
":",
"meta_file_path_json",
"=",
"os",
".",
"path",
".",
"join",
"(",
"copy_path",
",",
"self",
".",
"state",
".",
"get_storage_path",
"(",
")",
",",
"stor... | Save meta data of state model to the file system
This method generates a dictionary of the meta data of the state together with the meta data of all state
elements (data ports, outcomes, etc.) and stores it on the filesystem.
Secure that the store meta data method is called after storing the core data otherwise the last_stored_path is
maybe wrong or None.
The copy path is considered to be a state machine file system path but not the current one but e.g.
of a as copy saved state machine. The meta data will be stored in respective relative state folder in the state
machine hierarchy. This folder has to exist.
Dues the core elements of the state machine has to be stored first.
:param str copy_path: Optional copy path if meta data is not stored to the file system path of state machine | [
"Save",
"meta",
"data",
"of",
"state",
"model",
"to",
"the",
"file",
"system"
] | python | train |
SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/model_booster/queryable_mixin.py | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/model_booster/queryable_mixin.py#L416-L447 | def add_all(cls, models, commit=True, check_type=False):
"""Batch method for adding a list of model instances
to the db in one get_or_404.
Args:
models (list): A list of the instances to add.
commit (bool, optional): Defaults to True. If False, the
transaction won't get committed.
check_type (bool, optional) : If True, each instance
is type checked and exception is thrown if it is
not an instance of the model. By default, False.
Returns:
list: A list of `Model` instances
"""
if check_type:
for model in models:
if not isinstance(model, cls):
raise ValueError('%s is not of type %s' (model, cls))
if None in models:
cls.session.add_all([m for m in models if m is not None])
else:
cls.session.add_all(models)
try:
if commit:
cls.session.commit()
return models
except:
cls.session.rollback()
raise | [
"def",
"add_all",
"(",
"cls",
",",
"models",
",",
"commit",
"=",
"True",
",",
"check_type",
"=",
"False",
")",
":",
"if",
"check_type",
":",
"for",
"model",
"in",
"models",
":",
"if",
"not",
"isinstance",
"(",
"model",
",",
"cls",
")",
":",
"raise",
... | Batch method for adding a list of model instances
to the db in one get_or_404.
Args:
models (list): A list of the instances to add.
commit (bool, optional): Defaults to True. If False, the
transaction won't get committed.
check_type (bool, optional) : If True, each instance
is type checked and exception is thrown if it is
not an instance of the model. By default, False.
Returns:
list: A list of `Model` instances | [
"Batch",
"method",
"for",
"adding",
"a",
"list",
"of",
"model",
"instances",
"to",
"the",
"db",
"in",
"one",
"get_or_404",
"."
] | python | train |
apache/incubator-heron | heron/tools/tracker/src/python/handlers/basehandler.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/basehandler.py#L141-L150 | def get_argument_environ(self):
"""
Helper function to get request argument.
Raises exception if argument is missing.
Returns the environ argument.
"""
try:
return self.get_argument(constants.PARAM_ENVIRON)
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) | [
"def",
"get_argument_environ",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"get_argument",
"(",
"constants",
".",
"PARAM_ENVIRON",
")",
"except",
"tornado",
".",
"web",
".",
"MissingArgumentError",
"as",
"e",
":",
"raise",
"Exception",
"(",
"e"... | Helper function to get request argument.
Raises exception if argument is missing.
Returns the environ argument. | [
"Helper",
"function",
"to",
"get",
"request",
"argument",
".",
"Raises",
"exception",
"if",
"argument",
"is",
"missing",
".",
"Returns",
"the",
"environ",
"argument",
"."
] | python | valid |
pyviz/holoviews | holoviews/plotting/mpl/util.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/util.py#L101-L134 | def filter_styles(style, group, other_groups, blacklist=[]):
"""
Filters styles which are specific to a particular artist, e.g.
for a GraphPlot this will filter options specific to the nodes and
edges.
Arguments
---------
style: dict
Dictionary of styles and values
group: str
Group within the styles to filter for
other_groups: list
Other groups to filter out
blacklist: list (optional)
List of options to filter out
Returns
-------
filtered: dict
Filtered dictionary of styles
"""
group = group+'_'
filtered = {}
for k, v in style.items():
if (any(k.startswith(p) for p in other_groups)
or k.startswith(group) or k in blacklist):
continue
filtered[k] = v
for k, v in style.items():
if not k.startswith(group) or k in blacklist:
continue
filtered[k[len(group):]] = v
return filtered | [
"def",
"filter_styles",
"(",
"style",
",",
"group",
",",
"other_groups",
",",
"blacklist",
"=",
"[",
"]",
")",
":",
"group",
"=",
"group",
"+",
"'_'",
"filtered",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"style",
".",
"items",
"(",
")",
":",
"i... | Filters styles which are specific to a particular artist, e.g.
for a GraphPlot this will filter options specific to the nodes and
edges.
Arguments
---------
style: dict
Dictionary of styles and values
group: str
Group within the styles to filter for
other_groups: list
Other groups to filter out
blacklist: list (optional)
List of options to filter out
Returns
-------
filtered: dict
Filtered dictionary of styles | [
"Filters",
"styles",
"which",
"are",
"specific",
"to",
"a",
"particular",
"artist",
"e",
".",
"g",
".",
"for",
"a",
"GraphPlot",
"this",
"will",
"filter",
"options",
"specific",
"to",
"the",
"nodes",
"and",
"edges",
"."
] | python | train |
ethereum/py-evm | eth/tools/_utils/normalization.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/tools/_utils/normalization.py#L72-L90 | def normalize_int(value: IntConvertible) -> int:
"""
Robust to integer conversion, handling hex values, string representations,
and special cases like `0x`.
"""
if is_integer(value):
return cast(int, value)
elif is_bytes(value):
return big_endian_to_int(value)
elif is_hex(value) and is_0x_prefixed(value):
value = cast(str, value)
if len(value) == 2:
return 0
else:
return int(value, 16)
elif is_string(value):
return int(value)
else:
raise TypeError("Unsupported type: Got `{0}`".format(type(value))) | [
"def",
"normalize_int",
"(",
"value",
":",
"IntConvertible",
")",
"->",
"int",
":",
"if",
"is_integer",
"(",
"value",
")",
":",
"return",
"cast",
"(",
"int",
",",
"value",
")",
"elif",
"is_bytes",
"(",
"value",
")",
":",
"return",
"big_endian_to_int",
"(... | Robust to integer conversion, handling hex values, string representations,
and special cases like `0x`. | [
"Robust",
"to",
"integer",
"conversion",
"handling",
"hex",
"values",
"string",
"representations",
"and",
"special",
"cases",
"like",
"0x",
"."
] | python | train |
jupyter-widgets/ipywidgets | ipywidgets/widgets/widget_selection.py | https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/widget_selection.py#L355-L360 | def _validate_value(self, proposal):
"Replace all values with the actual objects in the options list"
try:
return tuple(findvalue(self._options_values, i, self.equals) for i in proposal.value)
except ValueError:
raise TraitError('Invalid selection: value not found') | [
"def",
"_validate_value",
"(",
"self",
",",
"proposal",
")",
":",
"try",
":",
"return",
"tuple",
"(",
"findvalue",
"(",
"self",
".",
"_options_values",
",",
"i",
",",
"self",
".",
"equals",
")",
"for",
"i",
"in",
"proposal",
".",
"value",
")",
"except"... | Replace all values with the actual objects in the options list | [
"Replace",
"all",
"values",
"with",
"the",
"actual",
"objects",
"in",
"the",
"options",
"list"
] | python | train |
PmagPy/PmagPy | programs/s_hext.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/s_hext.py#L8-L76 | def main():
"""
NAME
s_hext.py
DESCRIPTION
calculates Hext statistics for tensor data
SYNTAX
s_hext.py [-h][-i][-f file] [<filename]
OPTIONS
-h prints help message and quits
-f file specifies filename on command line
-l NMEAS do line by line instead of whole file, use number of measurements NMEAS for degrees of freedom
< filename, reads from standard input (Unix like operating systems only)
INPUT
x11,x22,x33,x12,x23,x13,sigma [sigma only if line by line]
OUTPUT
F F12 F23 sigma
and three sets of:
tau dec inc Eij dec inc Eik dec inc
DEFAULT
average whole file
"""
ave=1
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-l' in sys.argv:
ind=sys.argv.index('-l')
npts=int(sys.argv[ind+1])
ave=0
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
f.close()
else:
data=sys.stdin.readlines()
Ss=[]
for line in data:
s=[]
rec=line.split()
for i in range(6):
s.append(float(rec[i]))
if ave==0:
sig=float(rec[6])
hpars=pmag.dohext(npts-6,sig,s)
print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23']))
print('%s %i %s %14.12f'%('Nmeas = ',npts,' sigma = ',sig))
print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] ))
print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] ))
print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] ))
else:
Ss.append(s)
if ave==1:
npts=len(Ss)
nf,sigma,avs=pmag.sbar(Ss)
hpars=pmag.dohext(nf,sigma,avs)
print('%s %4.2f %s %4.2f %s %4.2f'%('F = ',hpars['F'],'F12 = ',hpars['F12'],'F23 = ',hpars['F23']))
print('%s %i %s %14.12f'%('N = ',npts,' sigma = ',sigma))
print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t1"],hpars["v1_dec"],hpars["v1_inc"],hpars["e12"],hpars["v2_dec"],hpars["v2_inc"],hpars["e13"],hpars["v3_dec"],hpars["v3_inc"] ))
print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t2"],hpars["v2_dec"],hpars["v2_inc"],hpars["e23"],hpars["v3_dec"],hpars["v3_inc"],hpars["e12"],hpars["v1_dec"],hpars["v1_inc"] ))
print('%7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(hpars["t3"],hpars["v3_dec"],hpars["v3_inc"],hpars["e13"],hpars["v1_dec"],hpars["v1_inc"],hpars["e23"],hpars["v2_dec"],hpars["v2_inc"] )) | [
"def",
"main",
"(",
")",
":",
"ave",
"=",
"1",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-l'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
... | NAME
s_hext.py
DESCRIPTION
calculates Hext statistics for tensor data
SYNTAX
s_hext.py [-h][-i][-f file] [<filename]
OPTIONS
-h prints help message and quits
-f file specifies filename on command line
-l NMEAS do line by line instead of whole file, use number of measurements NMEAS for degrees of freedom
< filename, reads from standard input (Unix like operating systems only)
INPUT
x11,x22,x33,x12,x23,x13,sigma [sigma only if line by line]
OUTPUT
F F12 F23 sigma
and three sets of:
tau dec inc Eij dec inc Eik dec inc
DEFAULT
average whole file | [
"NAME",
"s_hext",
".",
"py"
] | python | train |
saltstack/salt | salt/modules/chef.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/chef.py#L145-L206 | def solo(whyrun=False,
logfile=None,
**kwargs):
'''
Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.solo override-runlist=test
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
whyrun
Enable whyrun mode when set to True
'''
if logfile is None:
logfile = _default_logfile('chef-solo')
args = ['chef-solo',
'--no-color',
'--logfile "{0}"'.format(logfile),
'--format doc']
if whyrun:
args.append('--why-run')
return _exec_cmd(*args, **kwargs) | [
"def",
"solo",
"(",
"whyrun",
"=",
"False",
",",
"logfile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"logfile",
"is",
"None",
":",
"logfile",
"=",
"_default_logfile",
"(",
"'chef-solo'",
")",
"args",
"=",
"[",
"'chef-solo'",
",",
"'--no-co... | Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.solo override-runlist=test
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
whyrun
Enable whyrun mode when set to True | [
"Execute",
"a",
"chef",
"solo",
"run",
"and",
"return",
"a",
"dict",
"with",
"the",
"stderr",
"stdout",
"return",
"code",
"and",
"pid",
"."
] | python | train |
refindlyllc/rets | rets/session.py | https://github.com/refindlyllc/rets/blob/c615dfc272cff0825fd3b50863c46afc3e33916f/rets/session.py#L253-L286 | def get_object(self, resource, object_type, content_ids, object_ids='*', location=0):
"""
Get a list of Objects from a resource
:param resource: The resource to get objects from
:param object_type: The type of object to fetch
:param content_ids: The unique id of the item to get objects for
:param object_ids: ids of the objects to download
:param location: The path to get Objects from
:return: list
"""
object_helper = GetObject()
request_ids = object_helper.ids(content_ids=content_ids, object_ids=object_ids)
response = self._request(
capability='GetObject',
options={
'query':
{
"Resource": resource,
"Type": object_type,
"ID": ','.join(request_ids),
"Location": location
}
}
)
if 'multipart' in response.headers.get('Content-Type'):
parser = MultipleObjectParser()
collection = parser.parse_image_response(response)
else:
parser = SingleObjectParser()
collection = [parser.parse_image_response(response)]
return collection | [
"def",
"get_object",
"(",
"self",
",",
"resource",
",",
"object_type",
",",
"content_ids",
",",
"object_ids",
"=",
"'*'",
",",
"location",
"=",
"0",
")",
":",
"object_helper",
"=",
"GetObject",
"(",
")",
"request_ids",
"=",
"object_helper",
".",
"ids",
"("... | Get a list of Objects from a resource
:param resource: The resource to get objects from
:param object_type: The type of object to fetch
:param content_ids: The unique id of the item to get objects for
:param object_ids: ids of the objects to download
:param location: The path to get Objects from
:return: list | [
"Get",
"a",
"list",
"of",
"Objects",
"from",
"a",
"resource",
":",
"param",
"resource",
":",
"The",
"resource",
"to",
"get",
"objects",
"from",
":",
"param",
"object_type",
":",
"The",
"type",
"of",
"object",
"to",
"fetch",
":",
"param",
"content_ids",
"... | python | train |
kensho-technologies/graphql-compiler | graphql_compiler/query_formatting/match_formatting.py | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/match_formatting.py#L17-L29 | def _safe_match_string(value):
"""Sanitize and represent a string argument in MATCH."""
if not isinstance(value, six.string_types):
if isinstance(value, bytes): # should only happen in py3
value = value.decode('utf-8')
else:
raise GraphQLInvalidArgumentError(u'Attempting to convert a non-string into a string: '
u'{}'.format(value))
# Using JSON encoding means that all unicode literals and special chars
# (e.g. newlines and backslashes) are replaced by appropriate escape sequences.
# JSON has the same escaping rules as MATCH / SQL, so no further escaping is necessary.
return json.dumps(value) | [
"def",
"_safe_match_string",
"(",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bytes",
")",
":",
"# should only happen in py3",
"value",
"=",
"value",
".",
"d... | Sanitize and represent a string argument in MATCH. | [
"Sanitize",
"and",
"represent",
"a",
"string",
"argument",
"in",
"MATCH",
"."
] | python | train |
croach/django-simple-rest | simple_rest/utils/decorators.py | https://github.com/croach/django-simple-rest/blob/5f5904969d170ef3803a9fb735f814ef76f79427/simple_rest/utils/decorators.py#L7-L32 | def wrap_object(obj, decorator):
"""
Decorates the given object with the decorator function.
If obj is a method, the method is decorated with the decorator function
and returned. If obj is a class (i.e., a class based view), the methods
in the class corresponding to HTTP methods will be decorated and the
resultant class object will be returned.
"""
actual_decorator = method_decorator(decorator)
if inspect.isfunction(obj):
wrapped_obj = actual_decorator(obj)
update_wrapper(wrapped_obj, obj, assigned=available_attrs(obj))
elif inspect.isclass(obj):
for method_name in obj.http_method_names:
if hasattr(obj, method_name):
method = getattr(obj, method_name)
wrapped_method = actual_decorator(method)
update_wrapper(wrapped_method, method, assigned=available_attrs(method))
setattr(obj, method_name, wrapped_method)
wrapped_obj = obj
else:
raise TypeError("received an object of type '{0}' expected 'function' or 'classobj'.".format(type(obj)))
return wrapped_obj | [
"def",
"wrap_object",
"(",
"obj",
",",
"decorator",
")",
":",
"actual_decorator",
"=",
"method_decorator",
"(",
"decorator",
")",
"if",
"inspect",
".",
"isfunction",
"(",
"obj",
")",
":",
"wrapped_obj",
"=",
"actual_decorator",
"(",
"obj",
")",
"update_wrapper... | Decorates the given object with the decorator function.
If obj is a method, the method is decorated with the decorator function
and returned. If obj is a class (i.e., a class based view), the methods
in the class corresponding to HTTP methods will be decorated and the
resultant class object will be returned. | [
"Decorates",
"the",
"given",
"object",
"with",
"the",
"decorator",
"function",
"."
] | python | train |
pycontribs/pyrax | pyrax/cloudloadbalancers.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudloadbalancers.py#L522-L529 | def add_nodes(self, lb, nodes):
"""Adds the list of nodes to the specified load balancer."""
if not isinstance(nodes, (list, tuple)):
nodes = [nodes]
node_dicts = [nd.to_dict() for nd in nodes]
resp, body = self.api.method_post("/loadbalancers/%s/nodes" % lb.id,
body={"nodes": node_dicts})
return resp, body | [
"def",
"add_nodes",
"(",
"self",
",",
"lb",
",",
"nodes",
")",
":",
"if",
"not",
"isinstance",
"(",
"nodes",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"nodes",
"=",
"[",
"nodes",
"]",
"node_dicts",
"=",
"[",
"nd",
".",
"to_dict",
"(",
")",
... | Adds the list of nodes to the specified load balancer. | [
"Adds",
"the",
"list",
"of",
"nodes",
"to",
"the",
"specified",
"load",
"balancer",
"."
] | python | train |
limist/py-moneyed | moneyed/classes.py | https://github.com/limist/py-moneyed/blob/1822e9f77edc6608b429e54c8831b873af9a4de6/moneyed/classes.py#L158-L166 | def round(self, ndigits=0):
"""
Rounds the amount using the current ``Decimal`` rounding algorithm.
"""
if ndigits is None:
ndigits = 0
return self.__class__(
amount=self.amount.quantize(Decimal('1e' + str(-ndigits))),
currency=self.currency) | [
"def",
"round",
"(",
"self",
",",
"ndigits",
"=",
"0",
")",
":",
"if",
"ndigits",
"is",
"None",
":",
"ndigits",
"=",
"0",
"return",
"self",
".",
"__class__",
"(",
"amount",
"=",
"self",
".",
"amount",
".",
"quantize",
"(",
"Decimal",
"(",
"'1e'",
"... | Rounds the amount using the current ``Decimal`` rounding algorithm. | [
"Rounds",
"the",
"amount",
"using",
"the",
"current",
"Decimal",
"rounding",
"algorithm",
"."
] | python | train |
SuperCowPowers/bat | bat/bro_log_reader.py | https://github.com/SuperCowPowers/bat/blob/069e6bc52843dc07760969c531cc442ca7da8e0c/bat/bro_log_reader.py#L115-L152 | def _parse_bro_header(self, bro_log):
"""Parse the Bro log header section.
Format example:
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path httpheader_recon
#fields ts origin useragent header_events_json
#types time string string string
"""
# Open the Bro logfile
with open(bro_log, 'r') as bro_file:
# Skip until you find the #fields line
_line = bro_file.readline()
while not _line.startswith('#fields'):
_line = bro_file.readline()
# Read in the field names
field_names = _line.strip().split(self._delimiter)[1:]
# Read in the types
_line = bro_file.readline()
field_types = _line.strip().split(self._delimiter)[1:]
# Setup the type converters
type_converters = []
for field_type in field_types:
type_converters.append(self.type_mapper.get(field_type, self.type_mapper['unknown']))
# Keep the header offset
offset = bro_file.tell()
# Return the header info
return offset, field_names, field_types, type_converters | [
"def",
"_parse_bro_header",
"(",
"self",
",",
"bro_log",
")",
":",
"# Open the Bro logfile",
"with",
"open",
"(",
"bro_log",
",",
"'r'",
")",
"as",
"bro_file",
":",
"# Skip until you find the #fields line",
"_line",
"=",
"bro_file",
".",
"readline",
"(",
")",
"w... | Parse the Bro log header section.
Format example:
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path httpheader_recon
#fields ts origin useragent header_events_json
#types time string string string | [
"Parse",
"the",
"Bro",
"log",
"header",
"section",
"."
] | python | train |
log2timeline/plaso | plaso/cli/extraction_tool.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/extraction_tool.py#L195-L212 | def _ReadParserPresetsFromFile(self):
"""Reads the parser presets from the presets.yaml file.
Raises:
BadConfigOption: if the parser presets file cannot be read.
"""
self._presets_file = os.path.join(
self._data_location, self._PRESETS_FILE_NAME)
if not os.path.isfile(self._presets_file):
raise errors.BadConfigOption(
'No such parser presets file: {0:s}.'.format(self._presets_file))
try:
parsers_manager.ParsersManager.ReadPresetsFromFile(self._presets_file)
except errors.MalformedPresetError as exception:
raise errors.BadConfigOption(
'Unable to read presets from file with error: {0!s}'.format(
exception)) | [
"def",
"_ReadParserPresetsFromFile",
"(",
"self",
")",
":",
"self",
".",
"_presets_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_data_location",
",",
"self",
".",
"_PRESETS_FILE_NAME",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
... | Reads the parser presets from the presets.yaml file.
Raises:
BadConfigOption: if the parser presets file cannot be read. | [
"Reads",
"the",
"parser",
"presets",
"from",
"the",
"presets",
".",
"yaml",
"file",
"."
] | python | train |
MatiasSM/fcb | fcb/processing/transformations/ToImage.py | https://github.com/MatiasSM/fcb/blob/92a6c535287ea1c1ef986954a5d66e7905fb6221/fcb/processing/transformations/ToImage.py#L15-L23 | def _determine_dimensions(num_of_pixels):
"""
Given a number of pixels, determines the largest width and height that define a
rectangle with such an area
"""
for x in xrange(int(math.sqrt(num_of_pixels)) + 1, 1, -1):
if num_of_pixels % x == 0:
return num_of_pixels // x, x
return 1, num_of_pixels | [
"def",
"_determine_dimensions",
"(",
"num_of_pixels",
")",
":",
"for",
"x",
"in",
"xrange",
"(",
"int",
"(",
"math",
".",
"sqrt",
"(",
"num_of_pixels",
")",
")",
"+",
"1",
",",
"1",
",",
"-",
"1",
")",
":",
"if",
"num_of_pixels",
"%",
"x",
"==",
"0... | Given a number of pixels, determines the largest width and height that define a
rectangle with such an area | [
"Given",
"a",
"number",
"of",
"pixels",
"determines",
"the",
"largest",
"width",
"and",
"height",
"that",
"define",
"a",
"rectangle",
"with",
"such",
"an",
"area"
] | python | train |
leosartaj/sub | sub/main.py | https://github.com/leosartaj/sub/blob/9a8e55a5326c3b41357eedd235e7c36f253db2e0/sub/main.py#L95-L115 | def download(name, options):
"""
download a file or all files in a directory
"""
dire = os.path.dirname(name) # returns the directory name
fName = os.path.basename(name) # returns the filename
fNameOnly, fExt = os.path.splitext(fName)
dwn = 0
if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded
if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose):
dwn += 1
elif dirExists(name):
for filename in os.listdir(name):
if options.recursive:
dwn += download(os.path.join(name, filename), options)
else:
if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose):
dwn += 1
return dwn | [
"def",
"download",
"(",
"name",
",",
"options",
")",
":",
"dire",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"name",
")",
"# returns the directory name",
"fName",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"# returns the filename",
"fName... | download a file or all files in a directory | [
"download",
"a",
"file",
"or",
"all",
"files",
"in",
"a",
"directory"
] | python | train |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/mf.py | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/mf.py#L1070-L1139 | def _resolveCtypesImports(cbinaries):
"""Completes ctypes BINARY entries for modules with their full path.
"""
if is_unix:
envvar = "LD_LIBRARY_PATH"
elif is_darwin:
envvar = "DYLD_LIBRARY_PATH"
else:
envvar = "PATH"
def _setPaths():
path = os.pathsep.join(PyInstaller.__pathex__)
old = compat.getenv(envvar)
if old is not None:
path = os.pathsep.join((path, old))
compat.setenv(envvar, path)
return old
def _restorePaths(old):
if old is None:
compat.unsetenv(envvar)
else:
compat.setenv(envvar, old)
ret = []
# Try to locate the shared library on disk. This is done by
# executing ctypes.utile.find_library prepending ImportTracker's
# local paths to library search paths, then replaces original values.
old = _setPaths()
for cbin in cbinaries:
# Ignore annoying warnings like:
# 'W: library kernel32.dll required via ctypes not found'
# 'W: library coredll.dll required via ctypes not found'
if cbin in ['coredll.dll', 'kernel32.dll']:
continue
ext = os.path.splitext(cbin)[1]
# On Windows, only .dll files can be loaded.
if os.name == "nt" and ext.lower() in [".so", ".dylib"]:
continue
cpath = find_library(os.path.splitext(cbin)[0])
if is_unix:
# CAVEAT: find_library() is not the correct function. Ctype's
# documentation says that it is meant to resolve only the filename
# (as a *compiler* does) not the full path. Anyway, it works well
# enough on Windows and Mac. On Linux, we need to implement
# more code to find out the full path.
if cpath is None:
cpath = cbin
# "man ld.so" says that we should first search LD_LIBRARY_PATH
# and then the ldcache
for d in compat.getenv(envvar, '').split(os.pathsep):
if os.path.isfile(os.path.join(d, cpath)):
cpath = os.path.join(d, cpath)
break
else:
text = compat.exec_command("/sbin/ldconfig", "-p")
for L in text.strip().splitlines():
if cpath in L:
cpath = L.split("=>", 1)[1].strip()
assert os.path.isfile(cpath)
break
else:
cpath = None
if cpath is None:
logger.warn("library %s required via ctypes not found", cbin)
else:
ret.append((cbin, cpath, "BINARY"))
_restorePaths(old)
return ret | [
"def",
"_resolveCtypesImports",
"(",
"cbinaries",
")",
":",
"if",
"is_unix",
":",
"envvar",
"=",
"\"LD_LIBRARY_PATH\"",
"elif",
"is_darwin",
":",
"envvar",
"=",
"\"DYLD_LIBRARY_PATH\"",
"else",
":",
"envvar",
"=",
"\"PATH\"",
"def",
"_setPaths",
"(",
")",
":",
... | Completes ctypes BINARY entries for modules with their full path. | [
"Completes",
"ctypes",
"BINARY",
"entries",
"for",
"modules",
"with",
"their",
"full",
"path",
"."
] | python | train |
klahnakoski/pyLibrary | mo_threads/threads.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_threads/threads.py#L68-L73 | def add(self, target, *args, **kwargs):
"""
target IS THE FUNCTION TO EXECUTE IN THE THREAD
"""
t = Thread.run(target.__name__, target, *args, **kwargs)
self.threads.append(t) | [
"def",
"add",
"(",
"self",
",",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"t",
"=",
"Thread",
".",
"run",
"(",
"target",
".",
"__name__",
",",
"target",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"threads"... | target IS THE FUNCTION TO EXECUTE IN THE THREAD | [
"target",
"IS",
"THE",
"FUNCTION",
"TO",
"EXECUTE",
"IN",
"THE",
"THREAD"
] | python | train |
sveetch/crispy-forms-foundation | crispy_forms_foundation/__init__.py | https://github.com/sveetch/crispy-forms-foundation/blob/835a4152ef9b2a096b9a27748341ef751823b9f0/crispy_forms_foundation/__init__.py#L13-L22 | def _extract_version(package_name):
"""
Get package version from installed distribution or configuration file if not
installed
"""
try:
return pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
_conf = read_configuration(os.path.join(PROJECT_DIR, "setup.cfg"))
return _conf["metadata"]["version"] | [
"def",
"_extract_version",
"(",
"package_name",
")",
":",
"try",
":",
"return",
"pkg_resources",
".",
"get_distribution",
"(",
"package_name",
")",
".",
"version",
"except",
"pkg_resources",
".",
"DistributionNotFound",
":",
"_conf",
"=",
"read_configuration",
"(",
... | Get package version from installed distribution or configuration file if not
installed | [
"Get",
"package",
"version",
"from",
"installed",
"distribution",
"or",
"configuration",
"file",
"if",
"not",
"installed"
] | python | test |
osrg/ryu | ryu/lib/lacplib.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/lacplib.py#L112-L135 | def flow_removed_handler(self, evt):
"""FlowRemoved event handler. when the removed flow entry was
for LACP, set the status of the slave i/f to disabled, and
send a event."""
msg = evt.msg
datapath = msg.datapath
ofproto = datapath.ofproto
dpid = datapath.id
match = msg.match
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
port = match.in_port
dl_type = match.dl_type
else:
port = match['in_port']
dl_type = match['eth_type']
if ether.ETH_TYPE_SLOW != dl_type:
return
self.logger.info(
"SW=%s PORT=%d LACP exchange timeout has occurred.",
dpid_to_str(dpid), port)
self._set_slave_enabled(dpid, port, False)
self._set_slave_timeout(dpid, port, 0)
self.send_event_to_observers(
EventSlaveStateChanged(datapath, port, False)) | [
"def",
"flow_removed_handler",
"(",
"self",
",",
"evt",
")",
":",
"msg",
"=",
"evt",
".",
"msg",
"datapath",
"=",
"msg",
".",
"datapath",
"ofproto",
"=",
"datapath",
".",
"ofproto",
"dpid",
"=",
"datapath",
".",
"id",
"match",
"=",
"msg",
".",
"match",... | FlowRemoved event handler. when the removed flow entry was
for LACP, set the status of the slave i/f to disabled, and
send a event. | [
"FlowRemoved",
"event",
"handler",
".",
"when",
"the",
"removed",
"flow",
"entry",
"was",
"for",
"LACP",
"set",
"the",
"status",
"of",
"the",
"slave",
"i",
"/",
"f",
"to",
"disabled",
"and",
"send",
"a",
"event",
"."
] | python | train |
StackStorm/pybind | pybind/nos/v6_0_2f/preprovision/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/preprovision/__init__.py#L94-L115 | def _set_rbridge_id(self, v, load=False):
"""
Setter method for rbridge_id, mapped from YANG variable /preprovision/rbridge_id (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_rbridge_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rbridge_id() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("rbridge_id wwn",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id wwn', extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rbridge_id must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("rbridge_id wwn",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id wwn', extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Pre-provision configuration', u'callpoint': u'switch_attributes_callpoint', u'display-when': u'((/vcsmode/vcs-mode = "true") and (/vcsmode/vcs-cluster-mode = "true"))', u'cli-mode-name': u'config-preprovision-rbridge-id-$(rbridge-id)'}}, namespace='urn:brocade.com:mgmt:brocade-preprovision', defining_module='brocade-preprovision', yang_type='list', is_config=True)""",
})
self.__rbridge_id = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_rbridge_id",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"bas... | Setter method for rbridge_id, mapped from YANG variable /preprovision/rbridge_id (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_rbridge_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rbridge_id() directly. | [
"Setter",
"method",
"for",
"rbridge_id",
"mapped",
"from",
"YANG",
"variable",
"/",
"preprovision",
"/",
"rbridge_id",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
... | python | train |
RJT1990/pyflux | pyflux/ssm/nllm.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/nllm.py#L278-L298 | def _animate_bbvi(self,stored_latent_variables,stored_predictive_likelihood):
""" Produces animated plot of BBVI optimization
Returns
----------
None (changes model attributes)
"""
from matplotlib.animation import FuncAnimation, writers
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ud = BBVINLLMAnimate(ax,self.data,stored_latent_variables,self.index,self.z_no,self.link)
anim = FuncAnimation(fig, ud, frames=np.arange(stored_latent_variables.shape[0]), init_func=ud.init,
interval=10, blit=True)
plt.plot(self.data)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | [
"def",
"_animate_bbvi",
"(",
"self",
",",
"stored_latent_variables",
",",
"stored_predictive_likelihood",
")",
":",
"from",
"matplotlib",
".",
"animation",
"import",
"FuncAnimation",
",",
"writers",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"seab... | Produces animated plot of BBVI optimization
Returns
----------
None (changes model attributes) | [
"Produces",
"animated",
"plot",
"of",
"BBVI",
"optimization"
] | python | train |
pytest-dev/pytest-xdist | xdist/dsession.py | https://github.com/pytest-dev/pytest-xdist/blob/9fcf8fa636bc69ee6cac9348a6ec20c87f2bb5e4/xdist/dsession.py#L238-L240 | def worker_logstart(self, node, nodeid, location):
"""Emitted when a node calls the pytest_runtest_logstart hook."""
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location) | [
"def",
"worker_logstart",
"(",
"self",
",",
"node",
",",
"nodeid",
",",
"location",
")",
":",
"self",
".",
"config",
".",
"hook",
".",
"pytest_runtest_logstart",
"(",
"nodeid",
"=",
"nodeid",
",",
"location",
"=",
"location",
")"
] | Emitted when a node calls the pytest_runtest_logstart hook. | [
"Emitted",
"when",
"a",
"node",
"calls",
"the",
"pytest_runtest_logstart",
"hook",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/orderedmultidict/orderedmultidict.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/orderedmultidict/orderedmultidict.py#L274-L282 | def getlist(self, key, default=[]):
"""
Returns: The list of values for <key> if <key> is in the dictionary,
else <default>. If <default> is not provided, an empty list is
returned.
"""
if key in self:
return [node.value for node in self._map[key]]
return default | [
"def",
"getlist",
"(",
"self",
",",
"key",
",",
"default",
"=",
"[",
"]",
")",
":",
"if",
"key",
"in",
"self",
":",
"return",
"[",
"node",
".",
"value",
"for",
"node",
"in",
"self",
".",
"_map",
"[",
"key",
"]",
"]",
"return",
"default"
] | Returns: The list of values for <key> if <key> is in the dictionary,
else <default>. If <default> is not provided, an empty list is
returned. | [
"Returns",
":",
"The",
"list",
"of",
"values",
"for",
"<key",
">",
"if",
"<key",
">",
"is",
"in",
"the",
"dictionary",
"else",
"<default",
">",
".",
"If",
"<default",
">",
"is",
"not",
"provided",
"an",
"empty",
"list",
"is",
"returned",
"."
] | python | train |
gwastro/pycbc-glue | pycbc_glue/segments.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/segments.py#L1207-L1232 | def is_coincident(self, other, keys = None):
"""
Return True if any segment in any list in self intersects
any segment in any list in other. If the optional keys
argument is not None, then it should be an iterable of keys
and only segment lists for those keys will be considered in
the test (instead of raising KeyError, keys not present in
both segment list dictionaries will be ignored). If keys
is None (the default) then all segment lists are
considered.
This method is equivalent to the intersects() method, but
without requiring the keys of the intersecting segment
lists to match.
"""
if keys is not None:
keys = set(keys)
self = tuple(self[key] for key in set(self) & keys)
other = tuple(other[key] for key in set(other) & keys)
else:
self = tuple(self.values())
other = tuple(other.values())
# make sure inner loop is smallest
if len(self) < len(other):
self, other = other, self
return any(a.intersects(b) for a in self for b in other) | [
"def",
"is_coincident",
"(",
"self",
",",
"other",
",",
"keys",
"=",
"None",
")",
":",
"if",
"keys",
"is",
"not",
"None",
":",
"keys",
"=",
"set",
"(",
"keys",
")",
"self",
"=",
"tuple",
"(",
"self",
"[",
"key",
"]",
"for",
"key",
"in",
"set",
... | Return True if any segment in any list in self intersects
any segment in any list in other. If the optional keys
argument is not None, then it should be an iterable of keys
and only segment lists for those keys will be considered in
the test (instead of raising KeyError, keys not present in
both segment list dictionaries will be ignored). If keys
is None (the default) then all segment lists are
considered.
This method is equivalent to the intersects() method, but
without requiring the keys of the intersecting segment
lists to match. | [
"Return",
"True",
"if",
"any",
"segment",
"in",
"any",
"list",
"in",
"self",
"intersects",
"any",
"segment",
"in",
"any",
"list",
"in",
"other",
".",
"If",
"the",
"optional",
"keys",
"argument",
"is",
"not",
"None",
"then",
"it",
"should",
"be",
"an",
... | python | train |
materialsproject/pymatgen-db | matgendb/builders/util.py | https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/builders/util.py#L36-L45 | def get_schema_dir(db_version=1):
"""Get path to directory with schemata.
:param db_version: Version of the database
:type db_version: int
:return: Path
:rtype: str
"""
v = str(db_version)
return os.path.join(_top_dir, '..', 'schemata', 'versions', v) | [
"def",
"get_schema_dir",
"(",
"db_version",
"=",
"1",
")",
":",
"v",
"=",
"str",
"(",
"db_version",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"_top_dir",
",",
"'..'",
",",
"'schemata'",
",",
"'versions'",
",",
"v",
")"
] | Get path to directory with schemata.
:param db_version: Version of the database
:type db_version: int
:return: Path
:rtype: str | [
"Get",
"path",
"to",
"directory",
"with",
"schemata",
"."
] | python | train |
svinota/mdns | mdns/zeroconf.py | https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1712-L1719 | def get_service_info(self, type, name, timeout=3000):
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds."""
info = ServiceInfo(type, name)
if info.request(self, timeout):
return info
return None | [
"def",
"get_service_info",
"(",
"self",
",",
"type",
",",
"name",
",",
"timeout",
"=",
"3000",
")",
":",
"info",
"=",
"ServiceInfo",
"(",
"type",
",",
"name",
")",
"if",
"info",
".",
"request",
"(",
"self",
",",
"timeout",
")",
":",
"return",
"info",... | Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds. | [
"Returns",
"network",
"s",
"service",
"information",
"for",
"a",
"particular",
"name",
"and",
"type",
"or",
"None",
"if",
"no",
"service",
"matches",
"by",
"the",
"timeout",
"which",
"defaults",
"to",
"3",
"seconds",
"."
] | python | train |
astrocatalogs/astrocats | astrocats/catalog/argshandler.py | https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/argshandler.py#L56-L68 | def load_args(self, args, clargs):
"""Parse arguments and return configuration settings.
"""
# Parse All Arguments
args = self.parser.parse_args(args=clargs, namespace=args)
# Print the help information if no subcommand is given
# subcommand is required for operation
if args.subcommand is None:
self.parser.print_help()
args = None
return args | [
"def",
"load_args",
"(",
"self",
",",
"args",
",",
"clargs",
")",
":",
"# Parse All Arguments",
"args",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"clargs",
",",
"namespace",
"=",
"args",
")",
"# Print the help information if no subcommand ... | Parse arguments and return configuration settings. | [
"Parse",
"arguments",
"and",
"return",
"configuration",
"settings",
"."
] | python | train |
Nic30/ipCorePackager | ipCorePackager/packager.py | https://github.com/Nic30/ipCorePackager/blob/0af4e56ebfdc3749fffa40d50d9ccbf8b5445881/ipCorePackager/packager.py#L67-L84 | def mkAutoGui(self):
"""
:summary: automatically generate simple gui in TCL
"""
gui = GuiBuilder()
p0 = gui.page("Main")
handlers = []
for p in self.iterParams(self.top):
name = self.getParamPhysicalName(p)
p0.param(name)
for fn in paramManipulatorFns(name):
handlers.append(fn)
with open(self.guiFile, "w") as f:
f.write(gui.asTcl())
for h in handlers:
f.write('\n\n')
f.write(str(h)) | [
"def",
"mkAutoGui",
"(",
"self",
")",
":",
"gui",
"=",
"GuiBuilder",
"(",
")",
"p0",
"=",
"gui",
".",
"page",
"(",
"\"Main\"",
")",
"handlers",
"=",
"[",
"]",
"for",
"p",
"in",
"self",
".",
"iterParams",
"(",
"self",
".",
"top",
")",
":",
"name",... | :summary: automatically generate simple gui in TCL | [
":",
"summary",
":",
"automatically",
"generate",
"simple",
"gui",
"in",
"TCL"
] | python | train |
LordSputnik/mutagen | mutagen/apev2.py | https://github.com/LordSputnik/mutagen/blob/38e62c8dc35c72b16554f5dbe7c0fde91acc3411/mutagen/apev2.py#L386-L432 | def save(self, filename=None):
"""Save changes to a file.
If no filename is given, the one most recently loaded is used.
Tags are always written at the end of the file, and include
a header and a footer.
"""
filename = filename or self.filename
try:
fileobj = open(filename, "r+b")
except IOError:
fileobj = open(filename, "w+b")
data = _APEv2Data(fileobj)
if data.is_at_start:
delete_bytes(fileobj, data.end - data.start, data.start)
elif data.start is not None:
fileobj.seek(data.start)
# Delete an ID3v1 tag if present, too.
fileobj.truncate()
fileobj.seek(0, 2)
# "APE tags items should be sorted ascending by size... This is
# not a MUST, but STRONGLY recommended. Actually the items should
# be sorted by importance/byte, but this is not feasible."
tags = sorted((v._internal(k) for k, v in self.items()), key=len)
num_tags = len(tags)
tags = b"".join(tags)
header = bytearray(b"APETAGEX")
# version, tag size, item count, flags
header += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER | IS_HEADER)
header += b"\0" * 8
fileobj.write(header)
fileobj.write(tags)
footer = bytearray(b"APETAGEX")
footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
HAS_HEADER)
footer += b"\0" * 8
fileobj.write(footer)
fileobj.close() | [
"def",
"save",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"filename",
"=",
"filename",
"or",
"self",
".",
"filename",
"try",
":",
"fileobj",
"=",
"open",
"(",
"filename",
",",
"\"r+b\"",
")",
"except",
"IOError",
":",
"fileobj",
"=",
"open",
... | Save changes to a file.
If no filename is given, the one most recently loaded is used.
Tags are always written at the end of the file, and include
a header and a footer. | [
"Save",
"changes",
"to",
"a",
"file",
"."
] | python | test |
plivo/sharq | sharq/queue.py | https://github.com/plivo/sharq/blob/32bbfbdcbbaa8e154271ffd125ac4500382f3d19/sharq/queue.py#L76-L122 | def _load_lua_scripts(self):
"""Loads all lua scripts required by SharQ."""
# load lua scripts
lua_script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'scripts/lua'
)
with open(os.path.join(
lua_script_path,
'enqueue.lua'), 'r') as enqueue_file:
self._lua_enqueue_script = enqueue_file.read()
self._lua_enqueue = self._r.register_script(
self._lua_enqueue_script)
with open(os.path.join(
lua_script_path,
'dequeue.lua'), 'r') as dequeue_file:
self._lua_dequeue_script = dequeue_file.read()
self._lua_dequeue = self._r.register_script(
self._lua_dequeue_script)
with open(os.path.join(
lua_script_path,
'finish.lua'), 'r') as finish_file:
self._lua_finish_script = finish_file.read()
self._lua_finish = self._r.register_script(self._lua_finish_script)
with open(os.path.join(
lua_script_path,
'interval.lua'), 'r') as interval_file:
self._lua_interval_script = interval_file.read()
self._lua_interval = self._r.register_script(
self._lua_interval_script)
with open(os.path.join(
lua_script_path,
'requeue.lua'), 'r') as requeue_file:
self._lua_requeue_script = requeue_file.read()
self._lua_requeue = self._r.register_script(
self._lua_requeue_script)
with open(os.path.join(
lua_script_path,
'metrics.lua'), 'r') as metrics_file:
self._lua_metrics_script = metrics_file.read()
self._lua_metrics = self._r.register_script(
self._lua_metrics_script) | [
"def",
"_load_lua_scripts",
"(",
"self",
")",
":",
"# load lua scripts",
"lua_script_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'scripts/lua... | Loads all lua scripts required by SharQ. | [
"Loads",
"all",
"lua",
"scripts",
"required",
"by",
"SharQ",
"."
] | python | train |
titusjan/argos | argos/config/qtctis.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/qtctis.py#L576-L592 | def createPen(self, altStyle=None, altWidth=None):
""" Creates a pen from the config values with the style overridden by altStyle if the
None-option is selected in the combo box.
"""
pen = self.configValue
if pen is not None:
style = self.findByNodePath('style').configValue
if style is None and altStyle is not None:
pen.setStyle(altStyle)
width = self.findByNodePath('width').configValue
if width == 0.0 and altWidth is not None:
#logger.debug("Setting altWidth = {!r}".format(altWidth))
pen.setWidthF(altWidth)
return pen | [
"def",
"createPen",
"(",
"self",
",",
"altStyle",
"=",
"None",
",",
"altWidth",
"=",
"None",
")",
":",
"pen",
"=",
"self",
".",
"configValue",
"if",
"pen",
"is",
"not",
"None",
":",
"style",
"=",
"self",
".",
"findByNodePath",
"(",
"'style'",
")",
".... | Creates a pen from the config values with the style overridden by altStyle if the
None-option is selected in the combo box. | [
"Creates",
"a",
"pen",
"from",
"the",
"config",
"values",
"with",
"the",
"style",
"overridden",
"by",
"altStyle",
"if",
"the",
"None",
"-",
"option",
"is",
"selected",
"in",
"the",
"combo",
"box",
"."
] | python | train |
swistakm/graceful | src/graceful/resources/mixins.py | https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/resources/mixins.py#L168-L188 | def on_delete(self, req, resp, handler=None, **kwargs):
"""Respond on DELETE HTTP request assuming resource deletion flow.
This request handler assumes that DELETE requests are associated with
resource deletion. Thus default flow for such requests is:
* Delete existing resource instance.
* Set response status code to ``202 Accepted``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): deletion method handler to be called. Defaults
to ``self.delete``.
**kwargs: additional keyword arguments retrieved from url template.
"""
self.handle(
handler or self.delete, req, resp, **kwargs
)
resp.status = falcon.HTTP_ACCEPTED | [
"def",
"on_delete",
"(",
"self",
",",
"req",
",",
"resp",
",",
"handler",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"handle",
"(",
"handler",
"or",
"self",
".",
"delete",
",",
"req",
",",
"resp",
",",
"*",
"*",
"kwargs",
")",
... | Respond on DELETE HTTP request assuming resource deletion flow.
This request handler assumes that DELETE requests are associated with
resource deletion. Thus default flow for such requests is:
* Delete existing resource instance.
* Set response status code to ``202 Accepted``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): deletion method handler to be called. Defaults
to ``self.delete``.
**kwargs: additional keyword arguments retrieved from url template. | [
"Respond",
"on",
"DELETE",
"HTTP",
"request",
"assuming",
"resource",
"deletion",
"flow",
"."
] | python | train |
python-openxml/python-docx | docx/opc/pkgreader.py | https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/pkgreader.py#L256-L270 | def target_partname(self):
"""
|PackURI| instance containing partname targeted by this relationship.
Raises ``ValueError`` on reference if target_mode is ``'External'``.
Use :attr:`target_mode` to check before referencing.
"""
if self.is_external:
msg = ('target_partname attribute on Relationship is undefined w'
'here TargetMode == "External"')
raise ValueError(msg)
# lazy-load _target_partname attribute
if not hasattr(self, '_target_partname'):
self._target_partname = PackURI.from_rel_ref(self._baseURI,
self.target_ref)
return self._target_partname | [
"def",
"target_partname",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_external",
":",
"msg",
"=",
"(",
"'target_partname attribute on Relationship is undefined w'",
"'here TargetMode == \"External\"'",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"# lazy-load _target_pa... | |PackURI| instance containing partname targeted by this relationship.
Raises ``ValueError`` on reference if target_mode is ``'External'``.
Use :attr:`target_mode` to check before referencing. | [
"|PackURI|",
"instance",
"containing",
"partname",
"targeted",
"by",
"this",
"relationship",
".",
"Raises",
"ValueError",
"on",
"reference",
"if",
"target_mode",
"is",
"External",
".",
"Use",
":",
"attr",
":",
"target_mode",
"to",
"check",
"before",
"referencing",... | python | train |
iterative/dvc | dvc/config.py | https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/config.py#L455-L489 | def save(self, config=None):
"""Saves config to config files.
Args:
config (configobj.ConfigObj): optional config object to save.
Raises:
dvc.config.ConfigError: thrown if failed to write config file.
"""
if config is not None:
clist = [config]
else:
clist = [
self._system_config,
self._global_config,
self._repo_config,
self._local_config,
]
for conf in clist:
if conf.filename is None:
continue
try:
logger.debug("Writing '{}'.".format(conf.filename))
dname = os.path.dirname(os.path.abspath(conf.filename))
try:
os.makedirs(dname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
conf.write()
except Exception as exc:
msg = "failed to write config '{}'".format(conf.filename)
raise ConfigError(msg, exc) | [
"def",
"save",
"(",
"self",
",",
"config",
"=",
"None",
")",
":",
"if",
"config",
"is",
"not",
"None",
":",
"clist",
"=",
"[",
"config",
"]",
"else",
":",
"clist",
"=",
"[",
"self",
".",
"_system_config",
",",
"self",
".",
"_global_config",
",",
"s... | Saves config to config files.
Args:
config (configobj.ConfigObj): optional config object to save.
Raises:
dvc.config.ConfigError: thrown if failed to write config file. | [
"Saves",
"config",
"to",
"config",
"files",
"."
] | python | train |
rstoneback/pysatCDF | pysatCDF/_cdf.py | https://github.com/rstoneback/pysatCDF/blob/479839f719dbece8e52d6bf6a466cb9506db6719/pysatCDF/_cdf.py#L538-L667 | def to_pysat(self, flatten_twod=True, units_label='UNITS', name_label='long_name',
fill_label='FILLVAL', plot_label='FieldNam',
min_label='ValidMin', max_label='ValidMax',
notes_label='Var_Notes', desc_label='CatDesc',
axis_label = 'LablAxis'):
"""
Exports loaded CDF data into data, meta for pysat module
Notes
-----
The *_labels should be set to the values in the file, if present.
Note that once the meta object returned from this function is attached
to a pysat.Instrument object then the *_labels on the Instrument
are assigned to the newly attached Meta object.
The pysat Meta object will use data with labels that match the patterns
in *_labels even if the case does not match.
Parameters
----------
flatten_twod : bool (True)
If True, then two dimensional data is flattened across
columns. Name mangling is used to group data, first column
is 'name', last column is 'name_end'. In between numbers are
appended 'name_1', 'name_2', etc. All data for a given 2D array
may be accessed via, data.ix[:,'item':'item_end']
If False, then 2D data is stored as a series of DataFrames,
indexed by Epoch. data.ix[0, 'item']
units_label : str
Identifier within metadata for units. Defults to CDAWab standard.
name_label : str
Identifier within metadata for variable name. Defults to 'long_name',
not normally present within CDAWeb files. If not, will use values
from the variable name in the file.
fill_label : str
Identifier within metadata for Fill Values. Defults to CDAWab standard.
plot_label : str
Identifier within metadata for variable name used when plotting.
Defults to CDAWab standard.
min_label : str
Identifier within metadata for minimim variable value.
Defults to CDAWab standard.
max_label : str
Identifier within metadata for maximum variable value.
Defults to CDAWab standard.
notes_label : str
Identifier within metadata for notes. Defults to CDAWab standard.
desc_label : str
Identifier within metadata for a variable description.
Defults to CDAWab standard.
axis_label : str
Identifier within metadata for axis name used when plotting.
Defults to CDAWab standard.
Returns
-------
pandas.DataFrame, pysat.Meta
Data and Metadata suitable for attachment to a pysat.Instrument
object.
"""
import string
import pysat
import pandas
# copy data
cdata = self.data.copy()
#
# create pysat.Meta object using data above
# and utilizing the attribute labels provided by the user
meta = pysat.Meta(pysat.DataFrame.from_dict(self.meta, orient='index'),
units_label=units_label, name_label=name_label,
fill_label=fill_label, plot_label=plot_label,
min_label=min_label, max_label=max_label,
notes_label=notes_label, desc_label=desc_label,
axis_label=axis_label)
# account for different possible cases for Epoch, epoch, EPOCH, epOch
lower_names = [name.lower() for name in meta.keys()]
for name, true_name in zip(lower_names, meta.keys()):
if name == 'epoch':
meta.data.rename(index={true_name: 'Epoch'}, inplace=True)
epoch = cdata.pop(true_name)
cdata['Epoch'] = epoch
# ready to format data, iterate over all of the data names
# and put into a pandas DataFrame
two_d_data = []
drop_list = []
for name in cdata.keys():
temp = np.shape(cdata[name])
# treat 2 dimensional data differently
if len(temp) == 2:
if not flatten_twod:
# put 2D data into a Frame at each time
# remove data from dict when adding to the DataFrame
frame = pysat.DataFrame(cdata[name].flatten(), columns=[name])
drop_list.append(name)
step = temp[0]
new_list = []
new_index = np.arange(step)
for i in np.arange(len(epoch)):
new_list.append(frame.iloc[i*step:(i+1)*step, :])
new_list[-1].index = new_index
#new_frame = pandas.DataFrame.from_records(new_list, index=epoch, columns=[name])
new_frame = pandas.Series(new_list, index=epoch, name=name)
two_d_data.append(new_frame)
else:
# flatten 2D into series of 1D columns
new_names = [name + '_{i}'.format(i=i) for i in np.arange(temp[0] - 2)]
new_names.append(name + '_end')
new_names.insert(0, name)
# remove data from dict when adding to the DataFrame
drop_list.append(name)
frame = pysat.DataFrame(cdata[name].T,
index=epoch,
columns=new_names)
two_d_data.append(frame)
for name in drop_list:
_ = cdata.pop(name)
# all of the data left over is 1D, add as Series
data = pysat.DataFrame(cdata, index=epoch)
two_d_data.append(data)
data = pandas.concat(two_d_data, axis=1)
data.drop('Epoch', axis=1, inplace=True)
return data, meta | [
"def",
"to_pysat",
"(",
"self",
",",
"flatten_twod",
"=",
"True",
",",
"units_label",
"=",
"'UNITS'",
",",
"name_label",
"=",
"'long_name'",
",",
"fill_label",
"=",
"'FILLVAL'",
",",
"plot_label",
"=",
"'FieldNam'",
",",
"min_label",
"=",
"'ValidMin'",
",",
... | Exports loaded CDF data into data, meta for pysat module
Notes
-----
The *_labels should be set to the values in the file, if present.
Note that once the meta object returned from this function is attached
to a pysat.Instrument object then the *_labels on the Instrument
are assigned to the newly attached Meta object.
The pysat Meta object will use data with labels that match the patterns
in *_labels even if the case does not match.
Parameters
----------
flatten_twod : bool (True)
If True, then two dimensional data is flattened across
columns. Name mangling is used to group data, first column
is 'name', last column is 'name_end'. In between numbers are
appended 'name_1', 'name_2', etc. All data for a given 2D array
may be accessed via, data.ix[:,'item':'item_end']
If False, then 2D data is stored as a series of DataFrames,
indexed by Epoch. data.ix[0, 'item']
units_label : str
Identifier within metadata for units. Defults to CDAWab standard.
name_label : str
Identifier within metadata for variable name. Defults to 'long_name',
not normally present within CDAWeb files. If not, will use values
from the variable name in the file.
fill_label : str
Identifier within metadata for Fill Values. Defults to CDAWab standard.
plot_label : str
Identifier within metadata for variable name used when plotting.
Defults to CDAWab standard.
min_label : str
Identifier within metadata for minimim variable value.
Defults to CDAWab standard.
max_label : str
Identifier within metadata for maximum variable value.
Defults to CDAWab standard.
notes_label : str
Identifier within metadata for notes. Defults to CDAWab standard.
desc_label : str
Identifier within metadata for a variable description.
Defults to CDAWab standard.
axis_label : str
Identifier within metadata for axis name used when plotting.
Defults to CDAWab standard.
Returns
-------
pandas.DataFrame, pysat.Meta
Data and Metadata suitable for attachment to a pysat.Instrument
object. | [
"Exports",
"loaded",
"CDF",
"data",
"into",
"data",
"meta",
"for",
"pysat",
"module",
"Notes",
"-----",
"The",
"*",
"_labels",
"should",
"be",
"set",
"to",
"the",
"values",
"in",
"the",
"file",
"if",
"present",
".",
"Note",
"that",
"once",
"the",
"meta",... | python | valid |
cga-harvard/Hypermap-Registry | hypermap/aggregator/utils.py | https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/utils.py#L404-L412 | def inverse_mercator(xy):
"""
Given coordinates in spherical mercator, return a lon,lat tuple.
"""
lon = (xy[0] / 20037508.34) * 180
lat = (xy[1] / 20037508.34) * 180
lat = 180 / math.pi * \
(2 * math.atan(math.exp(lat * math.pi / 180)) - math.pi / 2)
return (lon, lat) | [
"def",
"inverse_mercator",
"(",
"xy",
")",
":",
"lon",
"=",
"(",
"xy",
"[",
"0",
"]",
"/",
"20037508.34",
")",
"*",
"180",
"lat",
"=",
"(",
"xy",
"[",
"1",
"]",
"/",
"20037508.34",
")",
"*",
"180",
"lat",
"=",
"180",
"/",
"math",
".",
"pi",
"... | Given coordinates in spherical mercator, return a lon,lat tuple. | [
"Given",
"coordinates",
"in",
"spherical",
"mercator",
"return",
"a",
"lon",
"lat",
"tuple",
"."
] | python | train |
quantopian/empyrical | empyrical/perf_attrib.py | https://github.com/quantopian/empyrical/blob/badbdca75f5b293f28b5e947974894de041d6868/empyrical/perf_attrib.py#L4-L97 | def perf_attrib(returns,
positions,
factor_returns,
factor_loadings):
"""
Attributes the performance of a returns stream to a set of risk factors.
Performance attribution determines how much each risk factor, e.g.,
momentum, the technology sector, etc., contributed to total returns, as
well as the daily exposure to each of the risk factors. The returns that
can be attributed to one of the given risk factors are the
`common_returns`, and the returns that _cannot_ be attributed to a risk
factor are the `specific_returns`. The `common_returns` and
`specific_returns` summed together will always equal the total returns.
Parameters
----------
returns : pd.Series
Returns for each day in the date range.
- Example:
2017-01-01 -0.017098
2017-01-02 0.002683
2017-01-03 -0.008669
positions: pd.Series
Daily holdings in percentages, indexed by date.
- Examples:
dt ticker
2017-01-01 AAPL 0.417582
TLT 0.010989
XOM 0.571429
2017-01-02 AAPL 0.202381
TLT 0.535714
XOM 0.261905
factor_returns : pd.DataFrame
Returns by factor, with date as index and factors as columns
- Example:
momentum reversal
2017-01-01 0.002779 -0.005453
2017-01-02 0.001096 0.010290
factor_loadings : pd.DataFrame
Factor loadings for all days in the date range, with date and ticker as
index, and factors as columns.
- Example:
momentum reversal
dt ticker
2017-01-01 AAPL -1.592914 0.852830
TLT 0.184864 0.895534
XOM 0.993160 1.149353
2017-01-02 AAPL -0.140009 -0.524952
TLT -1.066978 0.185435
XOM -1.798401 0.761549
Returns
-------
tuple of (risk_exposures_portfolio, perf_attribution)
risk_exposures_portfolio : pd.DataFrame
df indexed by datetime, with factors as columns
- Example:
momentum reversal
dt
2017-01-01 -0.238655 0.077123
2017-01-02 0.821872 1.520515
perf_attribution : pd.DataFrame
df with factors, common returns, and specific returns as columns,
and datetimes as index
- Example:
momentum reversal common_returns specific_returns
dt
2017-01-01 0.249087 0.935925 1.185012 1.185012
2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980
Note
----
See https://en.wikipedia.org/wiki/Performance_attribution for more details.
"""
risk_exposures_portfolio = compute_exposures(positions,
factor_loadings)
perf_attrib_by_factor = risk_exposures_portfolio.multiply(factor_returns)
common_returns = perf_attrib_by_factor.sum(axis='columns')
specific_returns = returns - common_returns
returns_df = pd.DataFrame({'total_returns': returns,
'common_returns': common_returns,
'specific_returns': specific_returns})
return (risk_exposures_portfolio,
pd.concat([perf_attrib_by_factor, returns_df], axis='columns')) | [
"def",
"perf_attrib",
"(",
"returns",
",",
"positions",
",",
"factor_returns",
",",
"factor_loadings",
")",
":",
"risk_exposures_portfolio",
"=",
"compute_exposures",
"(",
"positions",
",",
"factor_loadings",
")",
"perf_attrib_by_factor",
"=",
"risk_exposures_portfolio",
... | Attributes the performance of a returns stream to a set of risk factors.
Performance attribution determines how much each risk factor, e.g.,
momentum, the technology sector, etc., contributed to total returns, as
well as the daily exposure to each of the risk factors. The returns that
can be attributed to one of the given risk factors are the
`common_returns`, and the returns that _cannot_ be attributed to a risk
factor are the `specific_returns`. The `common_returns` and
`specific_returns` summed together will always equal the total returns.
Parameters
----------
returns : pd.Series
Returns for each day in the date range.
- Example:
2017-01-01 -0.017098
2017-01-02 0.002683
2017-01-03 -0.008669
positions: pd.Series
Daily holdings in percentages, indexed by date.
- Examples:
dt ticker
2017-01-01 AAPL 0.417582
TLT 0.010989
XOM 0.571429
2017-01-02 AAPL 0.202381
TLT 0.535714
XOM 0.261905
factor_returns : pd.DataFrame
Returns by factor, with date as index and factors as columns
- Example:
momentum reversal
2017-01-01 0.002779 -0.005453
2017-01-02 0.001096 0.010290
factor_loadings : pd.DataFrame
Factor loadings for all days in the date range, with date and ticker as
index, and factors as columns.
- Example:
momentum reversal
dt ticker
2017-01-01 AAPL -1.592914 0.852830
TLT 0.184864 0.895534
XOM 0.993160 1.149353
2017-01-02 AAPL -0.140009 -0.524952
TLT -1.066978 0.185435
XOM -1.798401 0.761549
Returns
-------
tuple of (risk_exposures_portfolio, perf_attribution)
risk_exposures_portfolio : pd.DataFrame
df indexed by datetime, with factors as columns
- Example:
momentum reversal
dt
2017-01-01 -0.238655 0.077123
2017-01-02 0.821872 1.520515
perf_attribution : pd.DataFrame
df with factors, common returns, and specific returns as columns,
and datetimes as index
- Example:
momentum reversal common_returns specific_returns
dt
2017-01-01 0.249087 0.935925 1.185012 1.185012
2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980
Note
----
See https://en.wikipedia.org/wiki/Performance_attribution for more details. | [
"Attributes",
"the",
"performance",
"of",
"a",
"returns",
"stream",
"to",
"a",
"set",
"of",
"risk",
"factors",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xorbtreewidget/xorbtreewidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbtreewidget/xorbtreewidget.py#L1172-L1181 | def emitRecordMiddleClicked(self, item):
"""
Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem>
"""
# emit that the record has been double clicked
if isinstance(item, XOrbRecordItem) and not self.signalsBlocked():
self.recordMiddleClicked.emit(item.record()) | [
"def",
"emitRecordMiddleClicked",
"(",
"self",
",",
"item",
")",
":",
"# emit that the record has been double clicked\r",
"if",
"isinstance",
"(",
"item",
",",
"XOrbRecordItem",
")",
"and",
"not",
"self",
".",
"signalsBlocked",
"(",
")",
":",
"self",
".",
"recordM... | Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem> | [
"Emits",
"the",
"record",
"clicked",
"signal",
"for",
"the",
"given",
"item",
"provided",
"the",
"signals",
"are",
"not",
"currently",
"blocked",
".",
":",
"param",
"item",
"|",
"<QTreeWidgetItem",
">"
] | python | train |
abourget/gevent-socketio | socketio/packet.py | https://github.com/abourget/gevent-socketio/blob/1cdb1594a315326987a17ce0924ea448a82fab01/socketio/packet.py#L36-L103 | def encode(data, json_dumps=default_json_dumps):
"""
Encode an attribute dict into a byte string.
"""
payload = ''
msg = str(MSG_TYPES[data['type']])
if msg in ['0', '1']:
# '1::' [path] [query]
msg += '::' + data['endpoint']
if 'qs' in data and data['qs'] != '':
msg += ':' + data['qs']
elif msg == '2':
# heartbeat
msg += '::'
elif msg in ['3', '4', '5']:
# '3:' [id ('+')] ':' [endpoint] ':' [data]
# '4:' [id ('+')] ':' [endpoint] ':' [json]
# '5:' [id ('+')] ':' [endpoint] ':' [json encoded event]
# The message id is an incremental integer, required for ACKs.
# If the message id is followed by a +, the ACK is not handled by
# socket.io, but by the user instead.
if msg == '3':
payload = data['data']
if msg == '4':
payload = json_dumps(data['data'])
if msg == '5':
d = {}
d['name'] = data['name']
if 'args' in data and data['args'] != []:
d['args'] = data['args']
payload = json_dumps(d)
if 'id' in data:
msg += ':' + str(data['id'])
if data['ack'] == 'data':
msg += '+'
msg += ':'
else:
msg += '::'
if 'endpoint' not in data:
data['endpoint'] = ''
if payload != '':
msg += data['endpoint'] + ':' + payload
else:
msg += data['endpoint']
elif msg == '6':
# '6:::' [id] '+' [data]
msg += '::' + data.get('endpoint', '') + ':' + str(data['ackId'])
if 'args' in data and data['args'] != []:
msg += '+' + json_dumps(data['args'])
elif msg == '7':
# '7::' [endpoint] ':' [reason] '+' [advice]
msg += ':::'
if 'reason' in data and data['reason'] != '':
msg += str(ERROR_REASONS[data['reason']])
if 'advice' in data and data['advice'] != '':
msg += '+' + str(ERROR_ADVICES[data['advice']])
msg += data['endpoint']
# NoOp, used to close a poll after the polling duration time
elif msg == '8':
msg += '::'
return msg | [
"def",
"encode",
"(",
"data",
",",
"json_dumps",
"=",
"default_json_dumps",
")",
":",
"payload",
"=",
"''",
"msg",
"=",
"str",
"(",
"MSG_TYPES",
"[",
"data",
"[",
"'type'",
"]",
"]",
")",
"if",
"msg",
"in",
"[",
"'0'",
",",
"'1'",
"]",
":",
"# '1::... | Encode an attribute dict into a byte string. | [
"Encode",
"an",
"attribute",
"dict",
"into",
"a",
"byte",
"string",
"."
] | python | valid |
paramiko/paramiko | paramiko/sftp_client.py | https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/sftp_client.py#L402-L423 | def rename(self, oldpath, newpath):
"""
Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
newpath = self._adjust_cwd(newpath)
self._log(DEBUG, "rename({!r}, {!r})".format(oldpath, newpath))
self._request(CMD_RENAME, oldpath, newpath) | [
"def",
"rename",
"(",
"self",
",",
"oldpath",
",",
"newpath",
")",
":",
"oldpath",
"=",
"self",
".",
"_adjust_cwd",
"(",
"oldpath",
")",
"newpath",
"=",
"self",
".",
"_adjust_cwd",
"(",
"newpath",
")",
"self",
".",
"_log",
"(",
"DEBUG",
",",
"\"rename(... | Rename a file or folder from ``oldpath`` to ``newpath``.
.. note::
This method implements 'standard' SFTP ``RENAME`` behavior; those
seeking the OpenSSH "POSIX rename" extension behavior should use
`posix_rename`.
:param str oldpath:
existing name of the file or folder
:param str newpath:
new name for the file or folder, must not exist already
:raises:
``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong | [
"Rename",
"a",
"file",
"or",
"folder",
"from",
"oldpath",
"to",
"newpath",
"."
] | python | train |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L133-L147 | def load_config_module():
"""
If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure.
"""
import imp
config_path = config_location()
try:
config = imp.load_source('config', config_path)
except IOError:
log.critical('Config file not found. oaepub exiting...')
sys.exit('Config file not found. Please run \'oaepub configure\'')
else:
log.debug('Config file loaded from {0}'.format(config_path))
return config | [
"def",
"load_config_module",
"(",
")",
":",
"import",
"imp",
"config_path",
"=",
"config_location",
"(",
")",
"try",
":",
"config",
"=",
"imp",
".",
"load_source",
"(",
"'config'",
",",
"config_path",
")",
"except",
"IOError",
":",
"log",
".",
"critical",
... | If the config.py file exists, import it as a module. If it does not exist,
call sys.exit() with a request to run oaepub configure. | [
"If",
"the",
"config",
".",
"py",
"file",
"exists",
"import",
"it",
"as",
"a",
"module",
".",
"If",
"it",
"does",
"not",
"exist",
"call",
"sys",
".",
"exit",
"()",
"with",
"a",
"request",
"to",
"run",
"oaepub",
"configure",
"."
] | python | train |
apache/incubator-heron | heron/tools/explorer/src/python/physicalplan.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/explorer/src/python/physicalplan.py#L61-L72 | def parse_topo_loc(cl_args):
""" parse topology location """
try:
topo_loc = cl_args['cluster/[role]/[env]'].split('/')
topo_name = cl_args['topology-name']
topo_loc.append(topo_name)
if len(topo_loc) != 4:
raise
return topo_loc
except Exception:
Log.error('Invalid topology location')
raise | [
"def",
"parse_topo_loc",
"(",
"cl_args",
")",
":",
"try",
":",
"topo_loc",
"=",
"cl_args",
"[",
"'cluster/[role]/[env]'",
"]",
".",
"split",
"(",
"'/'",
")",
"topo_name",
"=",
"cl_args",
"[",
"'topology-name'",
"]",
"topo_loc",
".",
"append",
"(",
"topo_name... | parse topology location | [
"parse",
"topology",
"location"
] | python | valid |
tradenity/python-sdk | tradenity/resources/gateway.py | https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/gateway.py#L291-L313 | def list_all_gateways(cls, **kwargs):
"""List Gateways
Return a list of Gateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Gateway]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_gateways_with_http_info(**kwargs)
else:
(data) = cls._list_all_gateways_with_http_info(**kwargs)
return data | [
"def",
"list_all_gateways",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_list_all_gateways_with_http_info",
"(",
"*",... | List Gateways
Return a list of Gateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Gateway]
If the method is called asynchronously,
returns the request thread. | [
"List",
"Gateways"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.