text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def close(self):
"""
Closes the stream.
"""
self.flush()
try:
if self.stream is not None:
self.stream.flush()
_name = self.stream.name
self.stream.close()
self.client.copy(_name, self.filename)
except Exception as ex:
print str(ex)
pass
|
[
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"flush",
"(",
")",
"try",
":",
"if",
"self",
".",
"stream",
"is",
"not",
"None",
":",
"self",
".",
"stream",
".",
"flush",
"(",
")",
"_name",
"=",
"self",
".",
"stream",
".",
"name",
"self",
".",
"stream",
".",
"close",
"(",
")",
"self",
".",
"client",
".",
"copy",
"(",
"_name",
",",
"self",
".",
"filename",
")",
"except",
"Exception",
"as",
"ex",
":",
"print",
"str",
"(",
"ex",
")",
"pass"
] | 26.5
| 11.357143
|
def to_fmt(self) -> str:
"""
Provide a useful representation of the register.
"""
infos = fmt.end(";\n", [])
s = fmt.sep(', ', [])
for ids in sorted(self.states.keys()):
s.lsdata.append(str(ids))
infos.lsdata.append(fmt.block('(', ')', [s]))
infos.lsdata.append("events:" + repr(self.events))
infos.lsdata.append(
"named_events:" + repr(list(self.named_events.keys()))
)
infos.lsdata.append("uid_events:" + repr(list(self.uid_events.keys())))
return infos
|
[
"def",
"to_fmt",
"(",
"self",
")",
"->",
"str",
":",
"infos",
"=",
"fmt",
".",
"end",
"(",
"\";\\n\"",
",",
"[",
"]",
")",
"s",
"=",
"fmt",
".",
"sep",
"(",
"', '",
",",
"[",
"]",
")",
"for",
"ids",
"in",
"sorted",
"(",
"self",
".",
"states",
".",
"keys",
"(",
")",
")",
":",
"s",
".",
"lsdata",
".",
"append",
"(",
"str",
"(",
"ids",
")",
")",
"infos",
".",
"lsdata",
".",
"append",
"(",
"fmt",
".",
"block",
"(",
"'('",
",",
"')'",
",",
"[",
"s",
"]",
")",
")",
"infos",
".",
"lsdata",
".",
"append",
"(",
"\"events:\"",
"+",
"repr",
"(",
"self",
".",
"events",
")",
")",
"infos",
".",
"lsdata",
".",
"append",
"(",
"\"named_events:\"",
"+",
"repr",
"(",
"list",
"(",
"self",
".",
"named_events",
".",
"keys",
"(",
")",
")",
")",
")",
"infos",
".",
"lsdata",
".",
"append",
"(",
"\"uid_events:\"",
"+",
"repr",
"(",
"list",
"(",
"self",
".",
"uid_events",
".",
"keys",
"(",
")",
")",
")",
")",
"return",
"infos"
] | 37.4
| 14.466667
|
def inurl(needles, haystack, position='any'):
"""convenience function to make string.find return bool"""
count = 0
# lowercase everything to do case-insensitive search
haystack2 = haystack.lower()
for needle in needles:
needle2 = needle.lower()
if position == 'any':
if haystack2.find(needle2) > -1:
count += 1
elif position == 'end':
if haystack2.endswith(needle2):
count += 1
elif position == 'begin':
if haystack2.startswith(needle2):
count += 1
# assessment
if count > 0:
return True
return False
|
[
"def",
"inurl",
"(",
"needles",
",",
"haystack",
",",
"position",
"=",
"'any'",
")",
":",
"count",
"=",
"0",
"# lowercase everything to do case-insensitive search",
"haystack2",
"=",
"haystack",
".",
"lower",
"(",
")",
"for",
"needle",
"in",
"needles",
":",
"needle2",
"=",
"needle",
".",
"lower",
"(",
")",
"if",
"position",
"==",
"'any'",
":",
"if",
"haystack2",
".",
"find",
"(",
"needle2",
")",
">",
"-",
"1",
":",
"count",
"+=",
"1",
"elif",
"position",
"==",
"'end'",
":",
"if",
"haystack2",
".",
"endswith",
"(",
"needle2",
")",
":",
"count",
"+=",
"1",
"elif",
"position",
"==",
"'begin'",
":",
"if",
"haystack2",
".",
"startswith",
"(",
"needle2",
")",
":",
"count",
"+=",
"1",
"# assessment",
"if",
"count",
">",
"0",
":",
"return",
"True",
"return",
"False"
] | 26.541667
| 17.125
|
def _get_path():
"""Guarantee that /usr/local/bin and /usr/bin are in PATH"""
if _path:
return _path[0]
environ_paths = set(os.environ['PATH'].split(':'))
environ_paths.add('/usr/local/bin')
environ_paths.add('/usr/bin')
_path.append(':'.join(environ_paths))
logger.debug('PATH = %s', _path[-1])
return _path[0]
|
[
"def",
"_get_path",
"(",
")",
":",
"if",
"_path",
":",
"return",
"_path",
"[",
"0",
"]",
"environ_paths",
"=",
"set",
"(",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"':'",
")",
")",
"environ_paths",
".",
"add",
"(",
"'/usr/local/bin'",
")",
"environ_paths",
".",
"add",
"(",
"'/usr/bin'",
")",
"_path",
".",
"append",
"(",
"':'",
".",
"join",
"(",
"environ_paths",
")",
")",
"logger",
".",
"debug",
"(",
"'PATH = %s'",
",",
"_path",
"[",
"-",
"1",
"]",
")",
"return",
"_path",
"[",
"0",
"]"
] | 34.2
| 11.2
|
def get_api_user_key(self, api_dev_key, username=None, password=None):
'''
Get api user key to enable posts from user accounts if username
and password available.
Not getting an api_user_key means that the posts will be "guest" posts
'''
username = username or get_config('pastebin', 'api_user_name')
password = password or get_config('pastebin', 'api_user_password')
if username and password:
data = {
'api_user_name': username,
'api_user_password': password,
'api_dev_key': api_dev_key,
}
urlencoded_data = urllib.urlencode(data)
req = urllib2.Request('http://pastebin.com/api/api_login.php',
urlencoded_data)
response = urllib2.urlopen(req)
user_key = response.read()
logging.debug("User key: %s" % user_key)
return user_key
else:
logging.info("Pastebin: not using any user key")
return ""
|
[
"def",
"get_api_user_key",
"(",
"self",
",",
"api_dev_key",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
")",
":",
"username",
"=",
"username",
"or",
"get_config",
"(",
"'pastebin'",
",",
"'api_user_name'",
")",
"password",
"=",
"password",
"or",
"get_config",
"(",
"'pastebin'",
",",
"'api_user_password'",
")",
"if",
"username",
"and",
"password",
":",
"data",
"=",
"{",
"'api_user_name'",
":",
"username",
",",
"'api_user_password'",
":",
"password",
",",
"'api_dev_key'",
":",
"api_dev_key",
",",
"}",
"urlencoded_data",
"=",
"urllib",
".",
"urlencode",
"(",
"data",
")",
"req",
"=",
"urllib2",
".",
"Request",
"(",
"'http://pastebin.com/api/api_login.php'",
",",
"urlencoded_data",
")",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"req",
")",
"user_key",
"=",
"response",
".",
"read",
"(",
")",
"logging",
".",
"debug",
"(",
"\"User key: %s\"",
"%",
"user_key",
")",
"return",
"user_key",
"else",
":",
"logging",
".",
"info",
"(",
"\"Pastebin: not using any user key\"",
")",
"return",
"\"\""
] | 43.458333
| 18.625
|
def contingency_table(dataframe, rownames, colnames, margins=True):
"""Contingency Table (also called Cross Tabulation)
- Table in a matrix format that displays the (multivariate) frequency distribution of the variables
- http://en.wikipedia.org/wiki/Contingency_table
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
# Taking just the rownames + colnames of the dataframe
sub_set = [rownames, colnames]
_sub_df = dataframe[sub_set]
return _sub_df.pivot_table(index=rownames, columns=colnames, margins=margins, aggfunc=len, fill_value=0)
|
[
"def",
"contingency_table",
"(",
"dataframe",
",",
"rownames",
",",
"colnames",
",",
"margins",
"=",
"True",
")",
":",
"# Taking just the rownames + colnames of the dataframe",
"sub_set",
"=",
"[",
"rownames",
",",
"colnames",
"]",
"_sub_df",
"=",
"dataframe",
"[",
"sub_set",
"]",
"return",
"_sub_df",
".",
"pivot_table",
"(",
"index",
"=",
"rownames",
",",
"columns",
"=",
"colnames",
",",
"margins",
"=",
"margins",
",",
"aggfunc",
"=",
"len",
",",
"fill_value",
"=",
"0",
")"
] | 60.083333
| 28.583333
|
def load_conf(self, instance_id, instance_name, conf):
"""Load configuration received from Arbiter and pushed by our Scheduler daemon
:param instance_name: scheduler instance name
:type instance_name: str
:param instance_id: scheduler instance id
:type instance_id: str
:param conf: configuration to load
:type conf: alignak.objects.config.Config
:return: None
"""
self.pushed_conf = conf
logger.info("loading my configuration (%s / %s):",
instance_id, self.pushed_conf.instance_id)
logger.debug("Properties:")
for key in sorted(self.pushed_conf.properties):
logger.debug("- %s: %s", key, getattr(self.pushed_conf, key, []))
logger.debug("Macros:")
for key in sorted(self.pushed_conf.macros):
logger.debug("- %s: %s", key, getattr(self.pushed_conf.macros, key, []))
logger.debug("Objects types:")
for _, _, strclss, _, _ in list(self.pushed_conf.types_creations.values()):
if strclss in ['arbiters', 'schedulers', 'brokers',
'pollers', 'reactionners', 'receivers']:
continue
setattr(self, strclss, getattr(self.pushed_conf, strclss, []))
# Internal statistics
logger.debug("- %d %s", len(getattr(self, strclss)), strclss)
statsmgr.gauge('configuration.%s' % strclss, len(getattr(self, strclss)))
# We need reversed list for searching in the retention file read
# todo: check what it is about...
self.services.optimize_service_search(self.hosts)
# Just deprecated
# # Compile the triggers
# if getattr(self, 'triggers', None):
# logger.info("compiling the triggers...")
# self.triggers.compile()
# self.triggers.load_objects(self)
# else:
# logger.info("No triggers")
# From the Arbiter configuration. Used for satellites to differentiate the schedulers
self.alignak_name = self.pushed_conf.alignak_name
self.instance_id = instance_id
self.instance_name = instance_name
self.push_flavor = getattr(self.pushed_conf, 'push_flavor', 'None')
logger.info("Set my scheduler instance: %s - %s - %s",
self.instance_id, self.instance_name, self.push_flavor)
# Tag our monitored hosts/services with our instance_id
for item in self.all_my_hosts_and_services():
item.instance_id = self.instance_id
|
[
"def",
"load_conf",
"(",
"self",
",",
"instance_id",
",",
"instance_name",
",",
"conf",
")",
":",
"self",
".",
"pushed_conf",
"=",
"conf",
"logger",
".",
"info",
"(",
"\"loading my configuration (%s / %s):\"",
",",
"instance_id",
",",
"self",
".",
"pushed_conf",
".",
"instance_id",
")",
"logger",
".",
"debug",
"(",
"\"Properties:\"",
")",
"for",
"key",
"in",
"sorted",
"(",
"self",
".",
"pushed_conf",
".",
"properties",
")",
":",
"logger",
".",
"debug",
"(",
"\"- %s: %s\"",
",",
"key",
",",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"key",
",",
"[",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"\"Macros:\"",
")",
"for",
"key",
"in",
"sorted",
"(",
"self",
".",
"pushed_conf",
".",
"macros",
")",
":",
"logger",
".",
"debug",
"(",
"\"- %s: %s\"",
",",
"key",
",",
"getattr",
"(",
"self",
".",
"pushed_conf",
".",
"macros",
",",
"key",
",",
"[",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"\"Objects types:\"",
")",
"for",
"_",
",",
"_",
",",
"strclss",
",",
"_",
",",
"_",
"in",
"list",
"(",
"self",
".",
"pushed_conf",
".",
"types_creations",
".",
"values",
"(",
")",
")",
":",
"if",
"strclss",
"in",
"[",
"'arbiters'",
",",
"'schedulers'",
",",
"'brokers'",
",",
"'pollers'",
",",
"'reactionners'",
",",
"'receivers'",
"]",
":",
"continue",
"setattr",
"(",
"self",
",",
"strclss",
",",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"strclss",
",",
"[",
"]",
")",
")",
"# Internal statistics",
"logger",
".",
"debug",
"(",
"\"- %d %s\"",
",",
"len",
"(",
"getattr",
"(",
"self",
",",
"strclss",
")",
")",
",",
"strclss",
")",
"statsmgr",
".",
"gauge",
"(",
"'configuration.%s'",
"%",
"strclss",
",",
"len",
"(",
"getattr",
"(",
"self",
",",
"strclss",
")",
")",
")",
"# We need reversed list for searching in the retention file read",
"# todo: check what it is about...",
"self",
".",
"services",
".",
"optimize_service_search",
"(",
"self",
".",
"hosts",
")",
"# Just deprecated",
"# # Compile the triggers",
"# if getattr(self, 'triggers', None):",
"# logger.info(\"compiling the triggers...\")",
"# self.triggers.compile()",
"# self.triggers.load_objects(self)",
"# else:",
"# logger.info(\"No triggers\")",
"# From the Arbiter configuration. Used for satellites to differentiate the schedulers",
"self",
".",
"alignak_name",
"=",
"self",
".",
"pushed_conf",
".",
"alignak_name",
"self",
".",
"instance_id",
"=",
"instance_id",
"self",
".",
"instance_name",
"=",
"instance_name",
"self",
".",
"push_flavor",
"=",
"getattr",
"(",
"self",
".",
"pushed_conf",
",",
"'push_flavor'",
",",
"'None'",
")",
"logger",
".",
"info",
"(",
"\"Set my scheduler instance: %s - %s - %s\"",
",",
"self",
".",
"instance_id",
",",
"self",
".",
"instance_name",
",",
"self",
".",
"push_flavor",
")",
"# Tag our monitored hosts/services with our instance_id",
"for",
"item",
"in",
"self",
".",
"all_my_hosts_and_services",
"(",
")",
":",
"item",
".",
"instance_id",
"=",
"self",
".",
"instance_id"
] | 45
| 19.625
|
def local_ip():
"""Get the local network IP of this machine"""
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith('127.'):
# Check eth0, eth1, eth2, en0, ...
interfaces = [
i + str(n) for i in ("eth", "en", "wlan") for n in xrange(3)
] # :(
for interface in interfaces:
try:
ip = interface_ip(interface)
break
except IOError:
pass
return ip
|
[
"def",
"local_ip",
"(",
")",
":",
"ip",
"=",
"socket",
".",
"gethostbyname",
"(",
"socket",
".",
"gethostname",
"(",
")",
")",
"if",
"ip",
".",
"startswith",
"(",
"'127.'",
")",
":",
"# Check eth0, eth1, eth2, en0, ...",
"interfaces",
"=",
"[",
"i",
"+",
"str",
"(",
"n",
")",
"for",
"i",
"in",
"(",
"\"eth\"",
",",
"\"en\"",
",",
"\"wlan\"",
")",
"for",
"n",
"in",
"xrange",
"(",
"3",
")",
"]",
"# :(",
"for",
"interface",
"in",
"interfaces",
":",
"try",
":",
"ip",
"=",
"interface_ip",
"(",
"interface",
")",
"break",
"except",
"IOError",
":",
"pass",
"return",
"ip"
] | 31.533333
| 15.666667
|
def bestModelInSprint(self, sprintIdx):
"""Return the best model ID and it's errScore from the given sprint,
which may still be in progress. This returns the best score from all models
in the sprint which have matured so far.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore)
"""
# Get all the swarms in this sprint
swarms = self.getAllSwarms(sprintIdx)
# Get the best model and score from each swarm
bestModelId = None
bestErrScore = numpy.inf
for swarmId in swarms:
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
if errScore < bestErrScore:
bestModelId = modelId
bestErrScore = errScore
return (bestModelId, bestErrScore)
|
[
"def",
"bestModelInSprint",
"(",
"self",
",",
"sprintIdx",
")",
":",
"# Get all the swarms in this sprint",
"swarms",
"=",
"self",
".",
"getAllSwarms",
"(",
"sprintIdx",
")",
"# Get the best model and score from each swarm",
"bestModelId",
"=",
"None",
"bestErrScore",
"=",
"numpy",
".",
"inf",
"for",
"swarmId",
"in",
"swarms",
":",
"(",
"modelId",
",",
"errScore",
")",
"=",
"self",
".",
"_hsObj",
".",
"_resultsDB",
".",
"bestModelIdAndErrScore",
"(",
"swarmId",
")",
"if",
"errScore",
"<",
"bestErrScore",
":",
"bestModelId",
"=",
"modelId",
"bestErrScore",
"=",
"errScore",
"return",
"(",
"bestModelId",
",",
"bestErrScore",
")"
] | 35.5
| 16.181818
|
def submit_ham(self, params):
"""For submitting a ham comment to Akismet."""
# Check required params for submit-ham
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('submit-ham', params)
if response.status is 200:
return response.read() == "true"
return False
|
[
"def",
"submit_ham",
"(",
"self",
",",
"params",
")",
":",
"# Check required params for submit-ham",
"for",
"required",
"in",
"[",
"'blog'",
",",
"'user_ip'",
",",
"'user_agent'",
"]",
":",
"if",
"required",
"not",
"in",
"params",
":",
"raise",
"MissingParams",
"(",
"required",
")",
"response",
"=",
"self",
".",
"_request",
"(",
"'submit-ham'",
",",
"params",
")",
"if",
"response",
".",
"status",
"is",
"200",
":",
"return",
"response",
".",
"read",
"(",
")",
"==",
"\"true\"",
"return",
"False"
] | 32.214286
| 15.642857
|
def step_note_that(context, remark):
"""
Used as generic step that provides an additional remark/hint
and enhance the readability/understanding without performing any check.
.. code-block:: gherkin
Given that today is "April 1st"
But note that "April 1st is Fools day (and beware)"
"""
log = getattr(context, "log", None)
if log:
log.info(u"NOTE: %s;" % remark)
|
[
"def",
"step_note_that",
"(",
"context",
",",
"remark",
")",
":",
"log",
"=",
"getattr",
"(",
"context",
",",
"\"log\"",
",",
"None",
")",
"if",
"log",
":",
"log",
".",
"info",
"(",
"u\"NOTE: %s;\"",
"%",
"remark",
")"
] | 31.153846
| 16.076923
|
def evaluate(self, num_eval_batches=None):
"""Run one round of evaluation, return loss and accuracy."""
num_eval_batches = num_eval_batches or self.num_eval_batches
with tf.Graph().as_default() as graph:
self.tensors = self.model.build_eval_graph(self.eval_data_paths,
self.batch_size)
self.summary = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.output_path)
self.sv = tf.train.Supervisor(
graph=graph,
logdir=self.output_path,
summary_op=None,
global_step=None,
saver=self.saver)
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
with self.sv.managed_session(master='', start_standard_services=False) as session:
self.sv.saver.restore(session, last_checkpoint)
if not self.batch_of_examples:
self.sv.start_queue_runners(session)
for i in range(num_eval_batches):
self.batch_of_examples.append(session.run(self.tensors.examples))
for i in range(num_eval_batches):
session.run(self.tensors.metric_updates,
{self.tensors.examples: self.batch_of_examples[i]})
metric_values = session.run(self.tensors.metric_values)
global_step = tf.train.global_step(session, self.tensors.global_step)
summary = session.run(self.summary)
self.summary_writer.add_summary(summary, global_step)
self.summary_writer.flush()
return metric_values
|
[
"def",
"evaluate",
"(",
"self",
",",
"num_eval_batches",
"=",
"None",
")",
":",
"num_eval_batches",
"=",
"num_eval_batches",
"or",
"self",
".",
"num_eval_batches",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
"as",
"graph",
":",
"self",
".",
"tensors",
"=",
"self",
".",
"model",
".",
"build_eval_graph",
"(",
"self",
".",
"eval_data_paths",
",",
"self",
".",
"batch_size",
")",
"self",
".",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge_all",
"(",
")",
"self",
".",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"self",
".",
"summary_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"self",
".",
"output_path",
")",
"self",
".",
"sv",
"=",
"tf",
".",
"train",
".",
"Supervisor",
"(",
"graph",
"=",
"graph",
",",
"logdir",
"=",
"self",
".",
"output_path",
",",
"summary_op",
"=",
"None",
",",
"global_step",
"=",
"None",
",",
"saver",
"=",
"self",
".",
"saver",
")",
"last_checkpoint",
"=",
"tf",
".",
"train",
".",
"latest_checkpoint",
"(",
"self",
".",
"checkpoint_path",
")",
"with",
"self",
".",
"sv",
".",
"managed_session",
"(",
"master",
"=",
"''",
",",
"start_standard_services",
"=",
"False",
")",
"as",
"session",
":",
"self",
".",
"sv",
".",
"saver",
".",
"restore",
"(",
"session",
",",
"last_checkpoint",
")",
"if",
"not",
"self",
".",
"batch_of_examples",
":",
"self",
".",
"sv",
".",
"start_queue_runners",
"(",
"session",
")",
"for",
"i",
"in",
"range",
"(",
"num_eval_batches",
")",
":",
"self",
".",
"batch_of_examples",
".",
"append",
"(",
"session",
".",
"run",
"(",
"self",
".",
"tensors",
".",
"examples",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_eval_batches",
")",
":",
"session",
".",
"run",
"(",
"self",
".",
"tensors",
".",
"metric_updates",
",",
"{",
"self",
".",
"tensors",
".",
"examples",
":",
"self",
".",
"batch_of_examples",
"[",
"i",
"]",
"}",
")",
"metric_values",
"=",
"session",
".",
"run",
"(",
"self",
".",
"tensors",
".",
"metric_values",
")",
"global_step",
"=",
"tf",
".",
"train",
".",
"global_step",
"(",
"session",
",",
"self",
".",
"tensors",
".",
"global_step",
")",
"summary",
"=",
"session",
".",
"run",
"(",
"self",
".",
"summary",
")",
"self",
".",
"summary_writer",
".",
"add_summary",
"(",
"summary",
",",
"global_step",
")",
"self",
".",
"summary_writer",
".",
"flush",
"(",
")",
"return",
"metric_values"
] | 40.756757
| 19.081081
|
def clear_matplotlib_ticks(ax=None, axis="both"):
"""
Clears the default matplotlib axes, or the one specified by the axis
argument.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
axis: string, "both"
The axis to clear: "x" or "horizontal", "y" or "vertical", or "both"
"""
if not ax:
return
if axis.lower() in ["both", "x", "horizontal"]:
ax.set_xticks([], [])
if axis.lower() in ["both", "y", "vertical"]:
ax.set_yticks([], [])
|
[
"def",
"clear_matplotlib_ticks",
"(",
"ax",
"=",
"None",
",",
"axis",
"=",
"\"both\"",
")",
":",
"if",
"not",
"ax",
":",
"return",
"if",
"axis",
".",
"lower",
"(",
")",
"in",
"[",
"\"both\"",
",",
"\"x\"",
",",
"\"horizontal\"",
"]",
":",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
",",
"[",
"]",
")",
"if",
"axis",
".",
"lower",
"(",
")",
"in",
"[",
"\"both\"",
",",
"\"y\"",
",",
"\"vertical\"",
"]",
":",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
",",
"[",
"]",
")"
] | 29.388889
| 17.722222
|
def execute(self, **minimize_options):
"""
Execute the basin-hopping minimization.
:param minimize_options: options to be passed on to
:func:`scipy.optimize.basinhopping`.
:return: :class:`symfit.core.fit_results.FitResults`
"""
if 'minimizer_kwargs' not in minimize_options:
minimize_options['minimizer_kwargs'] = {}
if 'method' not in minimize_options['minimizer_kwargs']:
# If no minimizer was set by the user upon execute, use local_minimizer
minimize_options['minimizer_kwargs']['method'] = self.local_minimizer.method_name()
if 'jac' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, GradientMinimizer):
# Assign the jacobian
minimize_options['minimizer_kwargs']['jac'] = self.local_minimizer.wrapped_jacobian
if 'constraints' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, ConstrainedMinimizer):
# Assign constraints
minimize_options['minimizer_kwargs']['constraints'] = self.local_minimizer.wrapped_constraints
if 'bounds' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, BoundedMinimizer):
# Assign bounds
minimize_options['minimizer_kwargs']['bounds'] = self.local_minimizer.bounds
ans = basinhopping(
self.objective,
self.initial_guesses,
**minimize_options
)
return self._pack_output(ans)
|
[
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"minimize_options",
")",
":",
"if",
"'minimizer_kwargs'",
"not",
"in",
"minimize_options",
":",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"=",
"{",
"}",
"if",
"'method'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
":",
"# If no minimizer was set by the user upon execute, use local_minimizer",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'method'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"method_name",
"(",
")",
"if",
"'jac'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"GradientMinimizer",
")",
":",
"# Assign the jacobian",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'jac'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"wrapped_jacobian",
"if",
"'constraints'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"ConstrainedMinimizer",
")",
":",
"# Assign constraints",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'constraints'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"wrapped_constraints",
"if",
"'bounds'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"BoundedMinimizer",
")",
":",
"# Assign bounds",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'bounds'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"bounds",
"ans",
"=",
"basinhopping",
"(",
"self",
".",
"objective",
",",
"self",
".",
"initial_guesses",
",",
"*",
"*",
"minimize_options",
")",
"return",
"self",
".",
"_pack_output",
"(",
"ans",
")"
] | 51.033333
| 28.1
|
def convert_timestamps(obj):
"""
Convert unix timestamps in the scraper output to python datetimes
so that they will be saved properly as Mongo datetimes.
"""
for key in ('date', 'when', 'end', 'start_date', 'end_date'):
value = obj.get(key)
if value:
try:
obj[key] = _timestamp_to_dt(value)
except TypeError:
raise TypeError("expected float for %s, got %s" % (key, value))
for key in ('sources', 'actions', 'votes', 'roles'):
for child in obj.get(key, []):
convert_timestamps(child)
return obj
|
[
"def",
"convert_timestamps",
"(",
"obj",
")",
":",
"for",
"key",
"in",
"(",
"'date'",
",",
"'when'",
",",
"'end'",
",",
"'start_date'",
",",
"'end_date'",
")",
":",
"value",
"=",
"obj",
".",
"get",
"(",
"key",
")",
"if",
"value",
":",
"try",
":",
"obj",
"[",
"key",
"]",
"=",
"_timestamp_to_dt",
"(",
"value",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"expected float for %s, got %s\"",
"%",
"(",
"key",
",",
"value",
")",
")",
"for",
"key",
"in",
"(",
"'sources'",
",",
"'actions'",
",",
"'votes'",
",",
"'roles'",
")",
":",
"for",
"child",
"in",
"obj",
".",
"get",
"(",
"key",
",",
"[",
"]",
")",
":",
"convert_timestamps",
"(",
"child",
")",
"return",
"obj"
] | 33.277778
| 18.388889
|
def order(self, did, service_definition_id, consumer_account, auto_consume=False):
"""
Sign service agreement.
Sign the service agreement defined in the service section identified
by `service_definition_id` in the ddo and send the signed agreement to the purchase endpoint
associated with this service.
:param did: str starting with the prefix `did:op:` and followed by the asset id which is
a hex str
:param service_definition_id: str the service definition id identifying a specific
service in the DDO (DID document)
:param consumer_account: Account instance of the consumer
:param auto_consume: boolean
:return: tuple(agreement_id, signature) the service agreement id (can be used to query
the keeper-contracts for the status of the service agreement) and signed agreement hash
"""
assert consumer_account.address in self._keeper.accounts, f'Unrecognized consumer ' \
f'address `consumer_account`'
agreement_id, signature = self._agreements.prepare(
did, service_definition_id, consumer_account
)
logger.debug(f'about to request create agreement: {agreement_id}')
self._agreements.send(
did,
agreement_id,
service_definition_id,
signature,
consumer_account,
auto_consume=auto_consume
)
return agreement_id
|
[
"def",
"order",
"(",
"self",
",",
"did",
",",
"service_definition_id",
",",
"consumer_account",
",",
"auto_consume",
"=",
"False",
")",
":",
"assert",
"consumer_account",
".",
"address",
"in",
"self",
".",
"_keeper",
".",
"accounts",
",",
"f'Unrecognized consumer '",
"f'address `consumer_account`'",
"agreement_id",
",",
"signature",
"=",
"self",
".",
"_agreements",
".",
"prepare",
"(",
"did",
",",
"service_definition_id",
",",
"consumer_account",
")",
"logger",
".",
"debug",
"(",
"f'about to request create agreement: {agreement_id}'",
")",
"self",
".",
"_agreements",
".",
"send",
"(",
"did",
",",
"agreement_id",
",",
"service_definition_id",
",",
"signature",
",",
"consumer_account",
",",
"auto_consume",
"=",
"auto_consume",
")",
"return",
"agreement_id"
] | 43.848485
| 25.060606
|
def print_report(label, user, system, real):
"""
Prints the report of one step of a benchmark.
"""
print("{:<12s} {:12f} {:12f} ( {:12f} )".format(label,
user,
system,
real))
|
[
"def",
"print_report",
"(",
"label",
",",
"user",
",",
"system",
",",
"real",
")",
":",
"print",
"(",
"\"{:<12s} {:12f} {:12f} ( {:12f} )\"",
".",
"format",
"(",
"label",
",",
"user",
",",
"system",
",",
"real",
")",
")"
] | 42.375
| 10.625
|
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
|
[
"def",
"stream",
"(",
"self",
")",
":",
"if",
"'init'",
"not",
"in",
"self",
".",
"flag",
":",
"root",
"=",
"True",
"self",
".",
"prepare",
"(",
")",
"else",
":",
"root",
"=",
"False",
"# Track which lines were generated in response to which lines of source code.",
"# The end result is that there is one entry here for every line emitted, each integer representing the source",
"# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from",
"# the last entry already in the list.",
"# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates",
"# the head of a linked list; the whole thing needs to be reversed to make sense.",
"mapping",
"=",
"self",
".",
"mapping",
"for",
"line",
"in",
"self",
".",
"input",
":",
"handler",
"=",
"self",
".",
"classify",
"(",
"line",
")",
"if",
"line",
".",
"kind",
"==",
"'code'",
"and",
"line",
".",
"stripped",
"==",
"'end'",
":",
"# Exit the current child scope.",
"return",
"assert",
"handler",
",",
"\"Unable to identify handler for line; this should be impossible!\"",
"self",
".",
"input",
".",
"push",
"(",
"line",
")",
"# Put it back so it can be consumed by the handler.",
"for",
"line",
"in",
"handler",
"(",
"self",
")",
":",
"# This re-indents the code to match, if missing explicit scope.",
"if",
"root",
":",
"mapping",
".",
"appendleft",
"(",
"line",
".",
"number",
"or",
"mapping",
"[",
"0",
"]",
")",
"# Track source line number.",
"if",
"line",
".",
"scope",
"is",
"None",
":",
"line",
"=",
"line",
".",
"clone",
"(",
"scope",
"=",
"self",
".",
"scope",
")",
"yield",
"line"
] | 38.972973
| 32.702703
|
def norm(self) -> bk.BKTensor:
"""Return the norm of this vector"""
return bk.absolute(bk.inner(self.tensor, self.tensor))
|
[
"def",
"norm",
"(",
"self",
")",
"->",
"bk",
".",
"BKTensor",
":",
"return",
"bk",
".",
"absolute",
"(",
"bk",
".",
"inner",
"(",
"self",
".",
"tensor",
",",
"self",
".",
"tensor",
")",
")"
] | 45.333333
| 10.666667
|
def update(self, sums):
"""
Update self.et with the sums as returned by VersionsX.sums_get
@param sums: {'version': {'file1':'hash1'}}
"""
for version in sums:
hashes = sums[version]
for filename in hashes:
hsh = hashes[filename]
file_xpath = './files/*[@url="%s"]' % filename
try:
file_add = self.root.findall(file_xpath)[0]
except IndexError:
raise ValueError("Attempted to update element '%s' which doesn't exist" % filename)
# Do not add duplicate, equal hashes.
if not self.version_exists(file_add, version, hsh):
new_ver = ET.SubElement(file_add, 'version')
new_ver.attrib = {
'md5': hsh,
'nb': version
}
|
[
"def",
"update",
"(",
"self",
",",
"sums",
")",
":",
"for",
"version",
"in",
"sums",
":",
"hashes",
"=",
"sums",
"[",
"version",
"]",
"for",
"filename",
"in",
"hashes",
":",
"hsh",
"=",
"hashes",
"[",
"filename",
"]",
"file_xpath",
"=",
"'./files/*[@url=\"%s\"]'",
"%",
"filename",
"try",
":",
"file_add",
"=",
"self",
".",
"root",
".",
"findall",
"(",
"file_xpath",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"\"Attempted to update element '%s' which doesn't exist\"",
"%",
"filename",
")",
"# Do not add duplicate, equal hashes.",
"if",
"not",
"self",
".",
"version_exists",
"(",
"file_add",
",",
"version",
",",
"hsh",
")",
":",
"new_ver",
"=",
"ET",
".",
"SubElement",
"(",
"file_add",
",",
"'version'",
")",
"new_ver",
".",
"attrib",
"=",
"{",
"'md5'",
":",
"hsh",
",",
"'nb'",
":",
"version",
"}"
] | 41.181818
| 15.636364
|
def subelement(element, xpath, tag, text, **kwargs):
"""
Searches element matching the *xpath* in *parent* and replaces it's *tag*,
*text* and *kwargs* attributes.
If the element in *xpath* is not found a new child element is created
with *kwargs* attributes and added.
Returns the found/created element.
"""
subelm = element.find(xpath)
if subelm is None:
subelm = etree.SubElement(element, tag)
else:
subelm.tag = tag
subelm.text = text
for attr, value in kwargs.items():
subelm.set(attr, value)
return subelm
|
[
"def",
"subelement",
"(",
"element",
",",
"xpath",
",",
"tag",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
":",
"subelm",
"=",
"element",
".",
"find",
"(",
"xpath",
")",
"if",
"subelm",
"is",
"None",
":",
"subelm",
"=",
"etree",
".",
"SubElement",
"(",
"element",
",",
"tag",
")",
"else",
":",
"subelm",
".",
"tag",
"=",
"tag",
"subelm",
".",
"text",
"=",
"text",
"for",
"attr",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"subelm",
".",
"set",
"(",
"attr",
",",
"value",
")",
"return",
"subelm"
] | 25.954545
| 19.227273
|
def split_docstring(self, block):
"""Split a code block into a docstring and a body."""
try:
first_line, rest_of_lines = block.split("\n", 1)
except ValueError:
pass
else:
raw_first_line = split_leading_trailing_indent(rem_comment(first_line))[1]
if match_in(self.just_a_string, raw_first_line):
return first_line, rest_of_lines
return None, block
|
[
"def",
"split_docstring",
"(",
"self",
",",
"block",
")",
":",
"try",
":",
"first_line",
",",
"rest_of_lines",
"=",
"block",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"raw_first_line",
"=",
"split_leading_trailing_indent",
"(",
"rem_comment",
"(",
"first_line",
")",
")",
"[",
"1",
"]",
"if",
"match_in",
"(",
"self",
".",
"just_a_string",
",",
"raw_first_line",
")",
":",
"return",
"first_line",
",",
"rest_of_lines",
"return",
"None",
",",
"block"
] | 40.090909
| 18.909091
|
def slug_sub(match):
"""Assigns id-less headers a slug that is derived from their
titles. Slugs are generated by lower-casing the titles, stripping
all punctuation, and converting spaces to hyphens (-).
"""
level = match.group(1)
title = match.group(2)
slug = title.lower()
slug = re.sub(r'<.+?>|[^\w-]', ' ', slug)
slug = re.sub(r'[ \t]+', ' ', slug).strip()
slug = slug.replace(' ', '-')
if slug:
return '<{0} id="{1}">{2}</{0}>'.format(level, slug, title)
return match.group(0)
|
[
"def",
"slug_sub",
"(",
"match",
")",
":",
"level",
"=",
"match",
".",
"group",
"(",
"1",
")",
"title",
"=",
"match",
".",
"group",
"(",
"2",
")",
"slug",
"=",
"title",
".",
"lower",
"(",
")",
"slug",
"=",
"re",
".",
"sub",
"(",
"r'<.+?>|[^\\w-]'",
",",
"' '",
",",
"slug",
")",
"slug",
"=",
"re",
".",
"sub",
"(",
"r'[ \\t]+'",
",",
"' '",
",",
"slug",
")",
".",
"strip",
"(",
")",
"slug",
"=",
"slug",
".",
"replace",
"(",
"' '",
",",
"'-'",
")",
"if",
"slug",
":",
"return",
"'<{0} id=\"{1}\">{2}</{0}>'",
".",
"format",
"(",
"level",
",",
"slug",
",",
"title",
")",
"return",
"match",
".",
"group",
"(",
"0",
")"
] | 37.357143
| 14.285714
|
def help_center_article_translations_missing(self, article_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/translations#list-missing-translations"
api_path = "/api/v2/help_center/articles/{article_id}/translations/missing.json"
api_path = api_path.format(article_id=article_id)
return self.call(api_path, **kwargs)
|
[
"def",
"help_center_article_translations_missing",
"(",
"self",
",",
"article_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/help_center/articles/{article_id}/translations/missing.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"article_id",
"=",
"article_id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"*",
"*",
"kwargs",
")"
] | 73.2
| 33.2
|
def get_version(self):
"""
This gets the version of OpenALPR
:return: Version information
"""
ptr = self._get_version_func(self.alpr_pointer)
version_number = ctypes.cast(ptr, ctypes.c_char_p).value
version_number = _convert_from_charp(version_number)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return version_number
|
[
"def",
"get_version",
"(",
"self",
")",
":",
"ptr",
"=",
"self",
".",
"_get_version_func",
"(",
"self",
".",
"alpr_pointer",
")",
"version_number",
"=",
"ctypes",
".",
"cast",
"(",
"ptr",
",",
"ctypes",
".",
"c_char_p",
")",
".",
"value",
"version_number",
"=",
"_convert_from_charp",
"(",
"version_number",
")",
"self",
".",
"_free_json_mem_func",
"(",
"ctypes",
".",
"c_void_p",
"(",
"ptr",
")",
")",
"return",
"version_number"
] | 31.916667
| 15.583333
|
def es_get_class_defs(cls_def, cls_name):
"""
Reads through the class defs and gets the related es class
defintions
Args:
-----
class_defs: RdfDataset of class definitions
"""
rtn_dict = {key: value for key, value in cls_def.items() \
if key.startswith("kds_es")}
for key in rtn_dict:
del cls_def[key]
return rtn_dict
|
[
"def",
"es_get_class_defs",
"(",
"cls_def",
",",
"cls_name",
")",
":",
"rtn_dict",
"=",
"{",
"key",
":",
"value",
"for",
"key",
",",
"value",
"in",
"cls_def",
".",
"items",
"(",
")",
"if",
"key",
".",
"startswith",
"(",
"\"kds_es\"",
")",
"}",
"for",
"key",
"in",
"rtn_dict",
":",
"del",
"cls_def",
"[",
"key",
"]",
"return",
"rtn_dict"
] | 26.642857
| 17.214286
|
def _draw_mark(mark_type, options={}, axes_options={}, **kwargs):
"""Draw the mark of specified mark type.
Parameters
----------
mark_type: type
The type of mark to be drawn
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
figure: Figure or None
The figure to which the mark is to be added.
If the value is None, the current figure is used.
cmap: list or string
List of css colors, or name of bqplot color scheme
"""
fig = kwargs.pop('figure', current_figure())
scales = kwargs.pop('scales', {})
update_context = kwargs.pop('update_context', True)
# Set the color map of the color scale
cmap = kwargs.pop('cmap', None)
if cmap is not None:
# Add the colors or scheme to the color scale options
options['color'] = dict(options.get('color', {}),
**_process_cmap(cmap))
# Going through the list of data attributes
for name in mark_type.class_trait_names(scaled=True):
dimension = _get_attribute_dimension(name, mark_type)
# TODO: the following should also happen if name in kwargs and
# scales[name] is incompatible.
if name not in kwargs:
# The scaled attribute is not being passed to the mark. So no need
# create a scale for this.
continue
elif name in scales:
if update_context:
_context['scales'][dimension] = scales[name]
# Scale has to be fetched from the context or created as it has not
# been passed.
elif dimension not in _context['scales']:
# Creating a scale for the dimension if a matching scale is not
# present in _context['scales']
traitlet = mark_type.class_traits()[name]
rtype = traitlet.get_metadata('rtype')
dtype = traitlet.validate(None, kwargs[name]).dtype
# Fetching the first matching scale for the rtype and dtype of the
# scaled attributes of the mark.
compat_scale_types = [
Scale.scale_types[key]
for key in Scale.scale_types
if Scale.scale_types[key].rtype == rtype and
issubdtype(dtype, Scale.scale_types[key].dtype)
]
sorted_scales = sorted(compat_scale_types,
key=lambda x: x.precedence)
scales[name] = sorted_scales[-1](**options.get(name, {}))
# Adding the scale to the context scales
if update_context:
_context['scales'][dimension] = scales[name]
else:
scales[name] = _context['scales'][dimension]
mark = mark_type(scales=scales, **kwargs)
_context['last_mark'] = mark
fig.marks = [m for m in fig.marks] + [mark]
if kwargs.get('axes', True):
axes(mark, options=axes_options)
return mark
|
[
"def",
"_draw_mark",
"(",
"mark_type",
",",
"options",
"=",
"{",
"}",
",",
"axes_options",
"=",
"{",
"}",
",",
"*",
"*",
"kwargs",
")",
":",
"fig",
"=",
"kwargs",
".",
"pop",
"(",
"'figure'",
",",
"current_figure",
"(",
")",
")",
"scales",
"=",
"kwargs",
".",
"pop",
"(",
"'scales'",
",",
"{",
"}",
")",
"update_context",
"=",
"kwargs",
".",
"pop",
"(",
"'update_context'",
",",
"True",
")",
"# Set the color map of the color scale",
"cmap",
"=",
"kwargs",
".",
"pop",
"(",
"'cmap'",
",",
"None",
")",
"if",
"cmap",
"is",
"not",
"None",
":",
"# Add the colors or scheme to the color scale options",
"options",
"[",
"'color'",
"]",
"=",
"dict",
"(",
"options",
".",
"get",
"(",
"'color'",
",",
"{",
"}",
")",
",",
"*",
"*",
"_process_cmap",
"(",
"cmap",
")",
")",
"# Going through the list of data attributes",
"for",
"name",
"in",
"mark_type",
".",
"class_trait_names",
"(",
"scaled",
"=",
"True",
")",
":",
"dimension",
"=",
"_get_attribute_dimension",
"(",
"name",
",",
"mark_type",
")",
"# TODO: the following should also happen if name in kwargs and",
"# scales[name] is incompatible.",
"if",
"name",
"not",
"in",
"kwargs",
":",
"# The scaled attribute is not being passed to the mark. So no need",
"# create a scale for this.",
"continue",
"elif",
"name",
"in",
"scales",
":",
"if",
"update_context",
":",
"_context",
"[",
"'scales'",
"]",
"[",
"dimension",
"]",
"=",
"scales",
"[",
"name",
"]",
"# Scale has to be fetched from the context or created as it has not",
"# been passed.",
"elif",
"dimension",
"not",
"in",
"_context",
"[",
"'scales'",
"]",
":",
"# Creating a scale for the dimension if a matching scale is not",
"# present in _context['scales']",
"traitlet",
"=",
"mark_type",
".",
"class_traits",
"(",
")",
"[",
"name",
"]",
"rtype",
"=",
"traitlet",
".",
"get_metadata",
"(",
"'rtype'",
")",
"dtype",
"=",
"traitlet",
".",
"validate",
"(",
"None",
",",
"kwargs",
"[",
"name",
"]",
")",
".",
"dtype",
"# Fetching the first matching scale for the rtype and dtype of the",
"# scaled attributes of the mark.",
"compat_scale_types",
"=",
"[",
"Scale",
".",
"scale_types",
"[",
"key",
"]",
"for",
"key",
"in",
"Scale",
".",
"scale_types",
"if",
"Scale",
".",
"scale_types",
"[",
"key",
"]",
".",
"rtype",
"==",
"rtype",
"and",
"issubdtype",
"(",
"dtype",
",",
"Scale",
".",
"scale_types",
"[",
"key",
"]",
".",
"dtype",
")",
"]",
"sorted_scales",
"=",
"sorted",
"(",
"compat_scale_types",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"precedence",
")",
"scales",
"[",
"name",
"]",
"=",
"sorted_scales",
"[",
"-",
"1",
"]",
"(",
"*",
"*",
"options",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
")",
"# Adding the scale to the context scales",
"if",
"update_context",
":",
"_context",
"[",
"'scales'",
"]",
"[",
"dimension",
"]",
"=",
"scales",
"[",
"name",
"]",
"else",
":",
"scales",
"[",
"name",
"]",
"=",
"_context",
"[",
"'scales'",
"]",
"[",
"dimension",
"]",
"mark",
"=",
"mark_type",
"(",
"scales",
"=",
"scales",
",",
"*",
"*",
"kwargs",
")",
"_context",
"[",
"'last_mark'",
"]",
"=",
"mark",
"fig",
".",
"marks",
"=",
"[",
"m",
"for",
"m",
"in",
"fig",
".",
"marks",
"]",
"+",
"[",
"mark",
"]",
"if",
"kwargs",
".",
"get",
"(",
"'axes'",
",",
"True",
")",
":",
"axes",
"(",
"mark",
",",
"options",
"=",
"axes_options",
")",
"return",
"mark"
] | 44.12
| 17.413333
|
def copy_data(self, project, logstore, from_time, to_time=None,
to_client=None, to_project=None, to_logstore=None,
shard_list=None,
batch_size=None, compress=None, new_topic=None, new_source=None):
"""
copy data from one logstore to another one (could be the same or in different region), the time is log received time on server side.
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type from_time: string/int
:param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html
:type to_time: string/int
:param to_time: curosr value, default is "end", could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html
:type to_client: LogClient
:param to_client: logclient instance, if empty will use source client
:type to_project: string
:param to_project: project name, if empty will use source project
:type to_logstore: string
:param to_logstore: logstore name, if empty will use source logstore
:type shard_list: string
:param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40
:type batch_size: int
:param batch_size: batch size to fetch the data in each iteration. by default it's 500
:type compress: bool
:param compress: if use compression, by default it's True
:type new_topic: string
:param new_topic: overwrite the copied topic with the passed one
:type new_source: string
:param new_source: overwrite the copied source with the passed one
:return: LogResponse {"total_count": 30, "shards": {0: 10, 1: 20} })
"""
return copy_data(self, project, logstore, from_time, to_time=to_time,
to_client=to_client, to_project=to_project, to_logstore=to_logstore,
shard_list=shard_list,
batch_size=batch_size, compress=compress, new_topic=new_topic, new_source=new_source)
|
[
"def",
"copy_data",
"(",
"self",
",",
"project",
",",
"logstore",
",",
"from_time",
",",
"to_time",
"=",
"None",
",",
"to_client",
"=",
"None",
",",
"to_project",
"=",
"None",
",",
"to_logstore",
"=",
"None",
",",
"shard_list",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"compress",
"=",
"None",
",",
"new_topic",
"=",
"None",
",",
"new_source",
"=",
"None",
")",
":",
"return",
"copy_data",
"(",
"self",
",",
"project",
",",
"logstore",
",",
"from_time",
",",
"to_time",
"=",
"to_time",
",",
"to_client",
"=",
"to_client",
",",
"to_project",
"=",
"to_project",
",",
"to_logstore",
"=",
"to_logstore",
",",
"shard_list",
"=",
"shard_list",
",",
"batch_size",
"=",
"batch_size",
",",
"compress",
"=",
"compress",
",",
"new_topic",
"=",
"new_topic",
",",
"new_source",
"=",
"new_source",
")"
] | 53.3
| 40.22
|
def list_probes():
"""Return the list of built-in probes."""
curdir = op.realpath(op.dirname(__file__))
return [op.splitext(fn)[0] for fn in os.listdir(op.join(curdir, 'probes'))
if fn.endswith('.prb')]
|
[
"def",
"list_probes",
"(",
")",
":",
"curdir",
"=",
"op",
".",
"realpath",
"(",
"op",
".",
"dirname",
"(",
"__file__",
")",
")",
"return",
"[",
"op",
".",
"splitext",
"(",
"fn",
")",
"[",
"0",
"]",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"op",
".",
"join",
"(",
"curdir",
",",
"'probes'",
")",
")",
"if",
"fn",
".",
"endswith",
"(",
"'.prb'",
")",
"]"
] | 44.4
| 14.2
|
def _get(self, api_call, params=None, method='GET', auth=False,
file_=None):
"""Function to preapre API call.
Parameters:
api_call (str): API function to be called.
params (str): API function parameters.
method (str): (Defauld: GET) HTTP method (GET, POST, PUT or
DELETE)
file_ (file): File to upload (only uploads).
Raise:
PybooruError: When 'username' or 'api_key' are not set.
"""
url = "{0}/{1}".format(self.site_url, api_call)
if method == 'GET':
request_args = {'params': params}
else:
request_args = {'data': params, 'files': file_}
# Adds auth. Also adds auth if username and api_key are specified
# Members+ have less restrictions
if auth or (self.username and self.api_key):
if self.username and self.api_key:
request_args['auth'] = (self.username, self.api_key)
else:
raise PybooruError("'username' and 'api_key' attribute of "
"Danbooru are required.")
# Do call
return self._request(url, api_call, request_args, method)
|
[
"def",
"_get",
"(",
"self",
",",
"api_call",
",",
"params",
"=",
"None",
",",
"method",
"=",
"'GET'",
",",
"auth",
"=",
"False",
",",
"file_",
"=",
"None",
")",
":",
"url",
"=",
"\"{0}/{1}\"",
".",
"format",
"(",
"self",
".",
"site_url",
",",
"api_call",
")",
"if",
"method",
"==",
"'GET'",
":",
"request_args",
"=",
"{",
"'params'",
":",
"params",
"}",
"else",
":",
"request_args",
"=",
"{",
"'data'",
":",
"params",
",",
"'files'",
":",
"file_",
"}",
"# Adds auth. Also adds auth if username and api_key are specified",
"# Members+ have less restrictions",
"if",
"auth",
"or",
"(",
"self",
".",
"username",
"and",
"self",
".",
"api_key",
")",
":",
"if",
"self",
".",
"username",
"and",
"self",
".",
"api_key",
":",
"request_args",
"[",
"'auth'",
"]",
"=",
"(",
"self",
".",
"username",
",",
"self",
".",
"api_key",
")",
"else",
":",
"raise",
"PybooruError",
"(",
"\"'username' and 'api_key' attribute of \"",
"\"Danbooru are required.\"",
")",
"# Do call",
"return",
"self",
".",
"_request",
"(",
"url",
",",
"api_call",
",",
"request_args",
",",
"method",
")"
] | 38.03125
| 21.0625
|
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
|
[
"def",
"getitem",
"(",
"self",
",",
"obj",
",",
"argument",
")",
":",
"try",
":",
"return",
"obj",
"[",
"argument",
"]",
"except",
"(",
"TypeError",
",",
"LookupError",
")",
":",
"if",
"isinstance",
"(",
"argument",
",",
"string_types",
")",
":",
"try",
":",
"attr",
"=",
"str",
"(",
"argument",
")",
"except",
"Exception",
":",
"pass",
"else",
":",
"try",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"self",
".",
"undefined",
"(",
"obj",
"=",
"obj",
",",
"name",
"=",
"argument",
")"
] | 36.0625
| 10.6875
|
def _log_exception(self, exception):
"""
Logs an exception.
:param Exception exception: The exception.
:rtype: None
"""
self._io.error(str(exception).strip().split(os.linesep))
|
[
"def",
"_log_exception",
"(",
"self",
",",
"exception",
")",
":",
"self",
".",
"_io",
".",
"error",
"(",
"str",
"(",
"exception",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"os",
".",
"linesep",
")",
")"
] | 24.222222
| 16.888889
|
def fix_linkdate(self, entry):
"""
Give a date for the entry, depending on feed.sync_by_date
Save it as feed.linkdate
"""
if self.sync_by_date:
try:
entry.linkdate = list(entry.published_parsed)
self.linkdate = list(entry.published_parsed)
except (AttributeError, TypeError):
try:
entry.linkdate = list(entry.updated_parsed)
self.linkdate = list(entry.updated_parsed)
except (AttributeError, TypeError):
print(("This entry doesn't seem to have a parseable date. "
"I will use your local time instead."),
file=sys.stderr, flush=True)
entry.linkdate = list(time.localtime())
self.linkdate = list(time.localtime())
else:
entry.linkdate = list(time.localtime())
|
[
"def",
"fix_linkdate",
"(",
"self",
",",
"entry",
")",
":",
"if",
"self",
".",
"sync_by_date",
":",
"try",
":",
"entry",
".",
"linkdate",
"=",
"list",
"(",
"entry",
".",
"published_parsed",
")",
"self",
".",
"linkdate",
"=",
"list",
"(",
"entry",
".",
"published_parsed",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"try",
":",
"entry",
".",
"linkdate",
"=",
"list",
"(",
"entry",
".",
"updated_parsed",
")",
"self",
".",
"linkdate",
"=",
"list",
"(",
"entry",
".",
"updated_parsed",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"print",
"(",
"(",
"\"This entry doesn't seem to have a parseable date. \"",
"\"I will use your local time instead.\"",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
",",
"flush",
"=",
"True",
")",
"entry",
".",
"linkdate",
"=",
"list",
"(",
"time",
".",
"localtime",
"(",
")",
")",
"self",
".",
"linkdate",
"=",
"list",
"(",
"time",
".",
"localtime",
"(",
")",
")",
"else",
":",
"entry",
".",
"linkdate",
"=",
"list",
"(",
"time",
".",
"localtime",
"(",
")",
")"
] | 44.666667
| 16.952381
|
def includePoint(self, p):
"""Extend rectangle to include point p."""
if not len(p) == 2:
raise ValueError("bad sequ. length")
self.x0, self.y0, self.x1, self.y1 = TOOLS._include_point_in_rect(self, p)
return self
|
[
"def",
"includePoint",
"(",
"self",
",",
"p",
")",
":",
"if",
"not",
"len",
"(",
"p",
")",
"==",
"2",
":",
"raise",
"ValueError",
"(",
"\"bad sequ. length\"",
")",
"self",
".",
"x0",
",",
"self",
".",
"y0",
",",
"self",
".",
"x1",
",",
"self",
".",
"y1",
"=",
"TOOLS",
".",
"_include_point_in_rect",
"(",
"self",
",",
"p",
")",
"return",
"self"
] | 42
| 16.333333
|
def get_stack(self, f, t):
"""Build the stack from frame and traceback"""
stack = []
if t and t.tb_frame == f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
|
[
"def",
"get_stack",
"(",
"self",
",",
"f",
",",
"t",
")",
":",
"stack",
"=",
"[",
"]",
"if",
"t",
"and",
"t",
".",
"tb_frame",
"==",
"f",
":",
"t",
"=",
"t",
".",
"tb_next",
"while",
"f",
"is",
"not",
"None",
":",
"stack",
".",
"append",
"(",
"(",
"f",
",",
"f",
".",
"f_lineno",
")",
")",
"f",
"=",
"f",
".",
"f_back",
"stack",
".",
"reverse",
"(",
")",
"i",
"=",
"max",
"(",
"0",
",",
"len",
"(",
"stack",
")",
"-",
"1",
")",
"while",
"t",
"is",
"not",
"None",
":",
"stack",
".",
"append",
"(",
"(",
"t",
".",
"tb_frame",
",",
"t",
".",
"tb_lineno",
")",
")",
"t",
"=",
"t",
".",
"tb_next",
"if",
"f",
"is",
"None",
":",
"i",
"=",
"max",
"(",
"0",
",",
"len",
"(",
"stack",
")",
"-",
"1",
")",
"return",
"stack",
",",
"i"
] | 30.75
| 11.625
|
async def close_async(self):
"""Close the client asynchronously. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
"""
if self.message_handler:
await self.message_handler.destroy_async()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
await self._keep_alive_thread
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._connection.cbs:
_logger.info("Closing non-CBS session.")
await asyncio.shield(self._session.destroy_async())
else:
_logger.info("CBS session pending %r.", self._connection.container_id)
self._session = None
if not self._ext_connection:
_logger.info("Closing exclusive connection %r.", self._connection.container_id)
await asyncio.shield(self._connection.destroy_async())
else:
_logger.info("Shared connection remaining open.")
self._connection = None
|
[
"async",
"def",
"close_async",
"(",
"self",
")",
":",
"if",
"self",
".",
"message_handler",
":",
"await",
"self",
".",
"message_handler",
".",
"destroy_async",
"(",
")",
"self",
".",
"message_handler",
"=",
"None",
"self",
".",
"_shutdown",
"=",
"True",
"if",
"self",
".",
"_keep_alive_thread",
":",
"await",
"self",
".",
"_keep_alive_thread",
"self",
".",
"_keep_alive_thread",
"=",
"None",
"if",
"not",
"self",
".",
"_session",
":",
"return",
"# already closed.",
"if",
"not",
"self",
".",
"_connection",
".",
"cbs",
":",
"_logger",
".",
"info",
"(",
"\"Closing non-CBS session.\"",
")",
"await",
"asyncio",
".",
"shield",
"(",
"self",
".",
"_session",
".",
"destroy_async",
"(",
")",
")",
"else",
":",
"_logger",
".",
"info",
"(",
"\"CBS session pending %r.\"",
",",
"self",
".",
"_connection",
".",
"container_id",
")",
"self",
".",
"_session",
"=",
"None",
"if",
"not",
"self",
".",
"_ext_connection",
":",
"_logger",
".",
"info",
"(",
"\"Closing exclusive connection %r.\"",
",",
"self",
".",
"_connection",
".",
"container_id",
")",
"await",
"asyncio",
".",
"shield",
"(",
"self",
".",
"_connection",
".",
"destroy_async",
"(",
")",
")",
"else",
":",
"_logger",
".",
"info",
"(",
"\"Shared connection remaining open.\"",
")",
"self",
".",
"_connection",
"=",
"None"
] | 43.851852
| 14
|
def executeTask(self,
inputs,
outSR=None,
processSR=None,
returnZ=False,
returnM=False,
f="json",
method="POST"
):
"""
performs the execute task method
"""
params = {
"f" : f
}
url = self._url + "/execute"
params = { "f" : "json" }
if not outSR is None:
params['env:outSR'] = outSR
if not processSR is None:
params['end:processSR'] = processSR
params['returnZ'] = returnZ
params['returnM'] = returnM
for p in inputs:
if isinstance(p, BaseGPObject):
params[p.paramName] = p.value
del p
if method.lower() == "post":
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
[
"def",
"executeTask",
"(",
"self",
",",
"inputs",
",",
"outSR",
"=",
"None",
",",
"processSR",
"=",
"None",
",",
"returnZ",
"=",
"False",
",",
"returnM",
"=",
"False",
",",
"f",
"=",
"\"json\"",
",",
"method",
"=",
"\"POST\"",
")",
":",
"params",
"=",
"{",
"\"f\"",
":",
"f",
"}",
"url",
"=",
"self",
".",
"_url",
"+",
"\"/execute\"",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"if",
"not",
"outSR",
"is",
"None",
":",
"params",
"[",
"'env:outSR'",
"]",
"=",
"outSR",
"if",
"not",
"processSR",
"is",
"None",
":",
"params",
"[",
"'end:processSR'",
"]",
"=",
"processSR",
"params",
"[",
"'returnZ'",
"]",
"=",
"returnZ",
"params",
"[",
"'returnM'",
"]",
"=",
"returnM",
"for",
"p",
"in",
"inputs",
":",
"if",
"isinstance",
"(",
"p",
",",
"BaseGPObject",
")",
":",
"params",
"[",
"p",
".",
"paramName",
"]",
"=",
"p",
".",
"value",
"del",
"p",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"\"post\"",
":",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")",
"else",
":",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] | 35.846154
| 11.846154
|
def symmetric_error(self):
"""Return the symmertic error
Similar to above, but zero implies no error estimate,
and otherwise this will either be the symmetric error,
or the average of the low,high asymmetric errors.
"""
# ADW: Should this be `np.nan`?
if self.__errors__ is None:
return 0.
if np.isscalar(self.__errors__):
return self.__errors__
return 0.5 * (self.__errors__[0] + self.__errors__[1])
|
[
"def",
"symmetric_error",
"(",
"self",
")",
":",
"# ADW: Should this be `np.nan`?",
"if",
"self",
".",
"__errors__",
"is",
"None",
":",
"return",
"0.",
"if",
"np",
".",
"isscalar",
"(",
"self",
".",
"__errors__",
")",
":",
"return",
"self",
".",
"__errors__",
"return",
"0.5",
"*",
"(",
"self",
".",
"__errors__",
"[",
"0",
"]",
"+",
"self",
".",
"__errors__",
"[",
"1",
"]",
")"
] | 37.307692
| 12.846154
|
def load_keysym_group(group):
'''Load all the keysyms in group.
Given a group name such as 'latin1' or 'katakana' load the keysyms
defined in module 'Xlib.keysymdef.group-name' into this XK module.'''
if '.' in group:
raise ValueError('invalid keysym group name: %s' % group)
G = globals() #Get a reference to XK.__dict__ a.k.a. globals
#Import just the keysyms module.
mod = __import__('Xlib.keysymdef.%s' % group, G, locals(), [group])
#Extract names of just the keysyms.
keysyms = [n for n in dir(mod) if n.startswith('XK_')]
#Copy the named keysyms into XK.__dict__
for keysym in keysyms:
## k = mod.__dict__[keysym]; assert k == int(k) #probably too much.
G[keysym] = mod.__dict__[keysym]
#And get rid of the keysym module.
del mod
|
[
"def",
"load_keysym_group",
"(",
"group",
")",
":",
"if",
"'.'",
"in",
"group",
":",
"raise",
"ValueError",
"(",
"'invalid keysym group name: %s'",
"%",
"group",
")",
"G",
"=",
"globals",
"(",
")",
"#Get a reference to XK.__dict__ a.k.a. globals",
"#Import just the keysyms module.",
"mod",
"=",
"__import__",
"(",
"'Xlib.keysymdef.%s'",
"%",
"group",
",",
"G",
",",
"locals",
"(",
")",
",",
"[",
"group",
"]",
")",
"#Extract names of just the keysyms.",
"keysyms",
"=",
"[",
"n",
"for",
"n",
"in",
"dir",
"(",
"mod",
")",
"if",
"n",
".",
"startswith",
"(",
"'XK_'",
")",
"]",
"#Copy the named keysyms into XK.__dict__",
"for",
"keysym",
"in",
"keysyms",
":",
"## k = mod.__dict__[keysym]; assert k == int(k) #probably too much.",
"G",
"[",
"keysym",
"]",
"=",
"mod",
".",
"__dict__",
"[",
"keysym",
"]",
"#And get rid of the keysym module.",
"del",
"mod"
] | 34.608696
| 22.782609
|
def quoted(text):
"""
Args:
text (str | unicode | None): Text to optionally quote
Returns:
(str): Quoted if 'text' contains spaces
"""
if text and " " in text:
sep = "'" if '"' in text else '"'
return "%s%s%s" % (sep, text, sep)
return text
|
[
"def",
"quoted",
"(",
"text",
")",
":",
"if",
"text",
"and",
"\" \"",
"in",
"text",
":",
"sep",
"=",
"\"'\"",
"if",
"'\"'",
"in",
"text",
"else",
"'\"'",
"return",
"\"%s%s%s\"",
"%",
"(",
"sep",
",",
"text",
",",
"sep",
")",
"return",
"text"
] | 23.833333
| 15.833333
|
def metablock(self):
"""Process the data.
Relevant variables of self:
numberOfBlockTypes[kind]: number of block types
currentBlockTypes[kind]: current block types (=0)
literalContextModes: the context modes for the literal block types
currentBlockCounts[kind]: counters for block types
blockTypeCodes[kind]: code for block type
blockCountCodes[kind]: code for block count
cmaps[kind]: the context maps (not for I)
prefixCodes[kind][#]: the prefix codes
lastDistances: the last four distances
lastChars: the last two chars
output: the result
"""
print('Meta block contents'.center(60, '='))
self.currentBlockTypes = {L:0, I:0, D:0, pL:1, pI:1, pD:1}
self.lastDistances = deque([17,16,11,4], maxlen=4)
#the current context mode is for block type 0
self.contextMode = ContextModeKeeper(self.literalContextModes[0])
wordList = WordList()
#setup distance callback function
def distanceCallback(symbol, extra):
"callback function for displaying decoded distance"
index, offset = symbol.value(extra)
if index:
#recent distance
distance = self.lastDistances[-index]+offset
return 'Distance: {}last{:+d}={}'.format(index, offset, distance)
#absolute value
if offset<=maxDistance:
return 'Absolute value: {} (pos {})'.format(offset, maxDistance-offset)
#word list value
action, word = divmod(offset-maxDistance, 1<<wordList.NDBITS[copyLen])
return '{}-{} gives word {},{} action {}'.format(
offset, maxDistance, copyLen, word, action)
for dpc in self.prefixCodes[D]: dpc.callback = distanceCallback
blockLen = 0
#there we go
while blockLen<self.MLEN:
#get insert© command
litLen, copyLen, dist0Flag = self.verboseRead(
self.prefixCodes[I][
self.figureBlockType(I)])
#literal data
for i in range(litLen):
bt = self.figureBlockType(L)
cm = self.contextMode.getIndex()
ct = self.cmaps[L][bt<<6|cm]
char = self.verboseRead(
self.prefixCodes[L][ct],
context='{},{}='.format(bt,cm))
self.contextMode.add(char)
self.output.append(char)
blockLen += litLen
#check if we're done
if blockLen>=self.MLEN: return
#distance
#distances are computed relative to output length, at most window size
maxDistance = min(len(self.output), self.windowSize)
if dist0Flag:
distance = self.lastDistances[-1]
else:
bt = self.figureBlockType(D)
cm = {2:0, 3:1, 4:2}.get(copyLen, 3)
ct = self.cmaps[D][bt<<2|cm]
index, offset = self.verboseRead(
self.prefixCodes[D][ct],
context='{},{}='.format(bt,cm))
distance = self.lastDistances[-index]+offset if index else offset
if index==1 and offset==0:
#to make sure distance is not put in last distance list
dist0Flag = True
if distance<=maxDistance:
#copy from output
for i in range(
maxDistance-distance,
maxDistance-distance+copyLen):
self.output.append(self.output[i])
if not dist0Flag: self.lastDistances.append(distance)
comment = 'Seen before'
else:
#fetch from wordlist
newWord = wordList.word(copyLen, distance-maxDistance-1)
self.output.extend(newWord)
#adjust copyLen to reflect actual new data
copyLen = len(newWord)
comment = 'From wordlist'
blockLen += copyLen
print(' '*40,
comment,
': "',
outputFormatter(self.output[-copyLen:]),
'"',
sep='')
self.contextMode.add(self.output[-2])
self.contextMode.add(self.output[-1])
|
[
"def",
"metablock",
"(",
"self",
")",
":",
"print",
"(",
"'Meta block contents'",
".",
"center",
"(",
"60",
",",
"'='",
")",
")",
"self",
".",
"currentBlockTypes",
"=",
"{",
"L",
":",
"0",
",",
"I",
":",
"0",
",",
"D",
":",
"0",
",",
"pL",
":",
"1",
",",
"pI",
":",
"1",
",",
"pD",
":",
"1",
"}",
"self",
".",
"lastDistances",
"=",
"deque",
"(",
"[",
"17",
",",
"16",
",",
"11",
",",
"4",
"]",
",",
"maxlen",
"=",
"4",
")",
"#the current context mode is for block type 0",
"self",
".",
"contextMode",
"=",
"ContextModeKeeper",
"(",
"self",
".",
"literalContextModes",
"[",
"0",
"]",
")",
"wordList",
"=",
"WordList",
"(",
")",
"#setup distance callback function",
"def",
"distanceCallback",
"(",
"symbol",
",",
"extra",
")",
":",
"\"callback function for displaying decoded distance\"",
"index",
",",
"offset",
"=",
"symbol",
".",
"value",
"(",
"extra",
")",
"if",
"index",
":",
"#recent distance",
"distance",
"=",
"self",
".",
"lastDistances",
"[",
"-",
"index",
"]",
"+",
"offset",
"return",
"'Distance: {}last{:+d}={}'",
".",
"format",
"(",
"index",
",",
"offset",
",",
"distance",
")",
"#absolute value",
"if",
"offset",
"<=",
"maxDistance",
":",
"return",
"'Absolute value: {} (pos {})'",
".",
"format",
"(",
"offset",
",",
"maxDistance",
"-",
"offset",
")",
"#word list value",
"action",
",",
"word",
"=",
"divmod",
"(",
"offset",
"-",
"maxDistance",
",",
"1",
"<<",
"wordList",
".",
"NDBITS",
"[",
"copyLen",
"]",
")",
"return",
"'{}-{} gives word {},{} action {}'",
".",
"format",
"(",
"offset",
",",
"maxDistance",
",",
"copyLen",
",",
"word",
",",
"action",
")",
"for",
"dpc",
"in",
"self",
".",
"prefixCodes",
"[",
"D",
"]",
":",
"dpc",
".",
"callback",
"=",
"distanceCallback",
"blockLen",
"=",
"0",
"#there we go",
"while",
"blockLen",
"<",
"self",
".",
"MLEN",
":",
"#get insert© command",
"litLen",
",",
"copyLen",
",",
"dist0Flag",
"=",
"self",
".",
"verboseRead",
"(",
"self",
".",
"prefixCodes",
"[",
"I",
"]",
"[",
"self",
".",
"figureBlockType",
"(",
"I",
")",
"]",
")",
"#literal data",
"for",
"i",
"in",
"range",
"(",
"litLen",
")",
":",
"bt",
"=",
"self",
".",
"figureBlockType",
"(",
"L",
")",
"cm",
"=",
"self",
".",
"contextMode",
".",
"getIndex",
"(",
")",
"ct",
"=",
"self",
".",
"cmaps",
"[",
"L",
"]",
"[",
"bt",
"<<",
"6",
"|",
"cm",
"]",
"char",
"=",
"self",
".",
"verboseRead",
"(",
"self",
".",
"prefixCodes",
"[",
"L",
"]",
"[",
"ct",
"]",
",",
"context",
"=",
"'{},{}='",
".",
"format",
"(",
"bt",
",",
"cm",
")",
")",
"self",
".",
"contextMode",
".",
"add",
"(",
"char",
")",
"self",
".",
"output",
".",
"append",
"(",
"char",
")",
"blockLen",
"+=",
"litLen",
"#check if we're done",
"if",
"blockLen",
">=",
"self",
".",
"MLEN",
":",
"return",
"#distance",
"#distances are computed relative to output length, at most window size",
"maxDistance",
"=",
"min",
"(",
"len",
"(",
"self",
".",
"output",
")",
",",
"self",
".",
"windowSize",
")",
"if",
"dist0Flag",
":",
"distance",
"=",
"self",
".",
"lastDistances",
"[",
"-",
"1",
"]",
"else",
":",
"bt",
"=",
"self",
".",
"figureBlockType",
"(",
"D",
")",
"cm",
"=",
"{",
"2",
":",
"0",
",",
"3",
":",
"1",
",",
"4",
":",
"2",
"}",
".",
"get",
"(",
"copyLen",
",",
"3",
")",
"ct",
"=",
"self",
".",
"cmaps",
"[",
"D",
"]",
"[",
"bt",
"<<",
"2",
"|",
"cm",
"]",
"index",
",",
"offset",
"=",
"self",
".",
"verboseRead",
"(",
"self",
".",
"prefixCodes",
"[",
"D",
"]",
"[",
"ct",
"]",
",",
"context",
"=",
"'{},{}='",
".",
"format",
"(",
"bt",
",",
"cm",
")",
")",
"distance",
"=",
"self",
".",
"lastDistances",
"[",
"-",
"index",
"]",
"+",
"offset",
"if",
"index",
"else",
"offset",
"if",
"index",
"==",
"1",
"and",
"offset",
"==",
"0",
":",
"#to make sure distance is not put in last distance list",
"dist0Flag",
"=",
"True",
"if",
"distance",
"<=",
"maxDistance",
":",
"#copy from output",
"for",
"i",
"in",
"range",
"(",
"maxDistance",
"-",
"distance",
",",
"maxDistance",
"-",
"distance",
"+",
"copyLen",
")",
":",
"self",
".",
"output",
".",
"append",
"(",
"self",
".",
"output",
"[",
"i",
"]",
")",
"if",
"not",
"dist0Flag",
":",
"self",
".",
"lastDistances",
".",
"append",
"(",
"distance",
")",
"comment",
"=",
"'Seen before'",
"else",
":",
"#fetch from wordlist",
"newWord",
"=",
"wordList",
".",
"word",
"(",
"copyLen",
",",
"distance",
"-",
"maxDistance",
"-",
"1",
")",
"self",
".",
"output",
".",
"extend",
"(",
"newWord",
")",
"#adjust copyLen to reflect actual new data",
"copyLen",
"=",
"len",
"(",
"newWord",
")",
"comment",
"=",
"'From wordlist'",
"blockLen",
"+=",
"copyLen",
"print",
"(",
"' '",
"*",
"40",
",",
"comment",
",",
"': \"'",
",",
"outputFormatter",
"(",
"self",
".",
"output",
"[",
"-",
"copyLen",
":",
"]",
")",
",",
"'\"'",
",",
"sep",
"=",
"''",
")",
"self",
".",
"contextMode",
".",
"add",
"(",
"self",
".",
"output",
"[",
"-",
"2",
"]",
")",
"self",
".",
"contextMode",
".",
"add",
"(",
"self",
".",
"output",
"[",
"-",
"1",
"]",
")"
] | 43.626263
| 13.89899
|
def write_json(self, path, contents, message):
"""Write json to disk.
Args:
path (str): the path to write to
contents (dict): the contents of the json blob
message (str): the message to log
"""
log.debug(message.format(path=path))
makedirs(os.path.dirname(path))
with open(path, "w") as fh:
json.dump(contents, fh, indent=2, sort_keys=True)
|
[
"def",
"write_json",
"(",
"self",
",",
"path",
",",
"contents",
",",
"message",
")",
":",
"log",
".",
"debug",
"(",
"message",
".",
"format",
"(",
"path",
"=",
"path",
")",
")",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
"with",
"open",
"(",
"path",
",",
"\"w\"",
")",
"as",
"fh",
":",
"json",
".",
"dump",
"(",
"contents",
",",
"fh",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")"
] | 32.769231
| 13.153846
|
def _scobit_transform_deriv_v(systematic_utilities,
alt_IDs,
rows_to_alts,
shape_params,
output_array=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
All elements should be ints, floats, or longs. Should contain the
systematic utilities of each observation per available alternative.
Note that this vector is formed by the dot product of the design matrix
with the vector of utility coefficients.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
shape_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. There
should be one value per shape parameter of the model being used.
output_array : 2D scipy sparse array.
The array should be square and it should have
`systematic_utilities.shape[0]` rows. It's data is to be replaced with
the correct derivatives of the transformation vector with respect to
the vector of systematic utilities. This argument is NOT optional.
Returns
-------
output_array : 2D scipy sparse array.
The shape of the returned array is `(systematic_utilities.shape[0],
systematic_utilities.shape[0])`. The returned array specifies the
derivative of the transformed utilities with respect to the systematic
utilities. All elements are ints, floats, or longs.
"""
# Note the np.exp is needed because the raw curvature params are the log
# of the 'natural' curvature params. This is to ensure the natural shape
# params are always positive
curve_shapes = np.exp(shape_params)
curve_shapes[np.isposinf(curve_shapes)] = max_comp_value
long_curve_shapes = rows_to_alts.dot(curve_shapes)
# Generate the needed terms for the derivative of the transformation with
# respect to the systematic utility and guard against underflow or overflow
exp_neg_v = np.exp(-1 * systematic_utilities)
powered_term = np.power(1 + exp_neg_v, long_curve_shapes)
small_powered_term = np.power(1 + exp_neg_v, long_curve_shapes - 1)
derivs = (long_curve_shapes *
exp_neg_v *
small_powered_term /
(powered_term - 1))
# Use L'Hopitals rule to deal with overflow from v --> -inf
# From plots, the assignment below may also correctly handle cases where we
# have overflow from moderate v (say |v| <= 10) and large shape parameters.
too_big_idx = (np.isposinf(derivs) +
np.isposinf(exp_neg_v) +
np.isposinf(powered_term) +
np.isposinf(small_powered_term)).astype(bool)
derivs[too_big_idx] = long_curve_shapes[too_big_idx]
# Use L'Hopitals rule to deal with underflow from v --> inf
too_small_idx = np.where((exp_neg_v == 0) | (powered_term - 1 == 0))
derivs[too_small_idx] = 1.0
# Assign the calculated derivatives to the output array
output_array.data = derivs
assert output_array.shape == (systematic_utilities.shape[0],
systematic_utilities.shape[0])
# Return the matrix of dh_dv. Note the off-diagonal entries are zero
# because each transformation only depends on its value of v and no others
return output_array
|
[
"def",
"_scobit_transform_deriv_v",
"(",
"systematic_utilities",
",",
"alt_IDs",
",",
"rows_to_alts",
",",
"shape_params",
",",
"output_array",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Note the np.exp is needed because the raw curvature params are the log",
"# of the 'natural' curvature params. This is to ensure the natural shape",
"# params are always positive",
"curve_shapes",
"=",
"np",
".",
"exp",
"(",
"shape_params",
")",
"curve_shapes",
"[",
"np",
".",
"isposinf",
"(",
"curve_shapes",
")",
"]",
"=",
"max_comp_value",
"long_curve_shapes",
"=",
"rows_to_alts",
".",
"dot",
"(",
"curve_shapes",
")",
"# Generate the needed terms for the derivative of the transformation with",
"# respect to the systematic utility and guard against underflow or overflow",
"exp_neg_v",
"=",
"np",
".",
"exp",
"(",
"-",
"1",
"*",
"systematic_utilities",
")",
"powered_term",
"=",
"np",
".",
"power",
"(",
"1",
"+",
"exp_neg_v",
",",
"long_curve_shapes",
")",
"small_powered_term",
"=",
"np",
".",
"power",
"(",
"1",
"+",
"exp_neg_v",
",",
"long_curve_shapes",
"-",
"1",
")",
"derivs",
"=",
"(",
"long_curve_shapes",
"*",
"exp_neg_v",
"*",
"small_powered_term",
"/",
"(",
"powered_term",
"-",
"1",
")",
")",
"# Use L'Hopitals rule to deal with overflow from v --> -inf",
"# From plots, the assignment below may also correctly handle cases where we",
"# have overflow from moderate v (say |v| <= 10) and large shape parameters.",
"too_big_idx",
"=",
"(",
"np",
".",
"isposinf",
"(",
"derivs",
")",
"+",
"np",
".",
"isposinf",
"(",
"exp_neg_v",
")",
"+",
"np",
".",
"isposinf",
"(",
"powered_term",
")",
"+",
"np",
".",
"isposinf",
"(",
"small_powered_term",
")",
")",
".",
"astype",
"(",
"bool",
")",
"derivs",
"[",
"too_big_idx",
"]",
"=",
"long_curve_shapes",
"[",
"too_big_idx",
"]",
"# Use L'Hopitals rule to deal with underflow from v --> inf",
"too_small_idx",
"=",
"np",
".",
"where",
"(",
"(",
"exp_neg_v",
"==",
"0",
")",
"|",
"(",
"powered_term",
"-",
"1",
"==",
"0",
")",
")",
"derivs",
"[",
"too_small_idx",
"]",
"=",
"1.0",
"# Assign the calculated derivatives to the output array",
"output_array",
".",
"data",
"=",
"derivs",
"assert",
"output_array",
".",
"shape",
"==",
"(",
"systematic_utilities",
".",
"shape",
"[",
"0",
"]",
",",
"systematic_utilities",
".",
"shape",
"[",
"0",
"]",
")",
"# Return the matrix of dh_dv. Note the off-diagonal entries are zero",
"# because each transformation only depends on its value of v and no others",
"return",
"output_array"
] | 48.936709
| 22.202532
|
def integridad_data(self, data_integr=None, key=None):
"""
Comprueba que el index de cada dataframe de la base de datos sea de fechas, único (sin duplicados) y creciente
:param data_integr:
:param key:
"""
def _assert_integridad(df):
if df is not None and not df.empty:
assert(df.index.is_unique and df.index.is_monotonic_increasing and df.index.is_all_dates)
if data_integr is None:
data_integr = self.data
if type(data_integr) is dict:
if key is None:
keys = data_integr.keys()
else:
keys = [key]
[_assert_integridad(data_integr[k]) for k in keys]
else:
_assert_integridad(data_integr)
|
[
"def",
"integridad_data",
"(",
"self",
",",
"data_integr",
"=",
"None",
",",
"key",
"=",
"None",
")",
":",
"def",
"_assert_integridad",
"(",
"df",
")",
":",
"if",
"df",
"is",
"not",
"None",
"and",
"not",
"df",
".",
"empty",
":",
"assert",
"(",
"df",
".",
"index",
".",
"is_unique",
"and",
"df",
".",
"index",
".",
"is_monotonic_increasing",
"and",
"df",
".",
"index",
".",
"is_all_dates",
")",
"if",
"data_integr",
"is",
"None",
":",
"data_integr",
"=",
"self",
".",
"data",
"if",
"type",
"(",
"data_integr",
")",
"is",
"dict",
":",
"if",
"key",
"is",
"None",
":",
"keys",
"=",
"data_integr",
".",
"keys",
"(",
")",
"else",
":",
"keys",
"=",
"[",
"key",
"]",
"[",
"_assert_integridad",
"(",
"data_integr",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"keys",
"]",
"else",
":",
"_assert_integridad",
"(",
"data_integr",
")"
] | 38.05
| 18.05
|
def deep_update_dict(default, options):
"""
Updates the values in a nested dict, while unspecified values will remain
unchanged
"""
for key in options.keys():
default_setting = default.get(key)
new_setting = options.get(key)
if isinstance(default_setting, dict):
deep_update_dict(default_setting, new_setting)
else:
default[key] = new_setting
|
[
"def",
"deep_update_dict",
"(",
"default",
",",
"options",
")",
":",
"for",
"key",
"in",
"options",
".",
"keys",
"(",
")",
":",
"default_setting",
"=",
"default",
".",
"get",
"(",
"key",
")",
"new_setting",
"=",
"options",
".",
"get",
"(",
"key",
")",
"if",
"isinstance",
"(",
"default_setting",
",",
"dict",
")",
":",
"deep_update_dict",
"(",
"default_setting",
",",
"new_setting",
")",
"else",
":",
"default",
"[",
"key",
"]",
"=",
"new_setting"
] | 33.916667
| 10.916667
|
def Enumerate():
"""See base class."""
# Init a HID manager
hid_mgr = iokit.IOHIDManagerCreate(None, None)
if not hid_mgr:
raise errors.OsHidError('Unable to obtain HID manager reference')
iokit.IOHIDManagerSetDeviceMatching(hid_mgr, None)
# Get devices from HID manager
device_set_ref = iokit.IOHIDManagerCopyDevices(hid_mgr)
if not device_set_ref:
raise errors.OsHidError('Failed to obtain devices from HID manager')
num = iokit.CFSetGetCount(device_set_ref)
devices = (IO_HID_DEVICE_REF * num)()
iokit.CFSetGetValues(device_set_ref, devices)
# Retrieve and build descriptor dictionaries for each device
descriptors = []
for dev in devices:
d = base.DeviceDescriptor()
d.vendor_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_VENDOR_ID)
d.product_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRODUCT_ID)
d.product_string = GetDeviceStringProperty(dev,
HID_DEVICE_PROPERTY_PRODUCT)
d.usage = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE)
d.usage_page = GetDeviceIntProperty(
dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE)
d.report_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_REPORT_ID)
d.path = GetDevicePath(dev)
descriptors.append(d.ToPublicDict())
# Clean up CF objects
cf.CFRelease(device_set_ref)
cf.CFRelease(hid_mgr)
return descriptors
|
[
"def",
"Enumerate",
"(",
")",
":",
"# Init a HID manager",
"hid_mgr",
"=",
"iokit",
".",
"IOHIDManagerCreate",
"(",
"None",
",",
"None",
")",
"if",
"not",
"hid_mgr",
":",
"raise",
"errors",
".",
"OsHidError",
"(",
"'Unable to obtain HID manager reference'",
")",
"iokit",
".",
"IOHIDManagerSetDeviceMatching",
"(",
"hid_mgr",
",",
"None",
")",
"# Get devices from HID manager",
"device_set_ref",
"=",
"iokit",
".",
"IOHIDManagerCopyDevices",
"(",
"hid_mgr",
")",
"if",
"not",
"device_set_ref",
":",
"raise",
"errors",
".",
"OsHidError",
"(",
"'Failed to obtain devices from HID manager'",
")",
"num",
"=",
"iokit",
".",
"CFSetGetCount",
"(",
"device_set_ref",
")",
"devices",
"=",
"(",
"IO_HID_DEVICE_REF",
"*",
"num",
")",
"(",
")",
"iokit",
".",
"CFSetGetValues",
"(",
"device_set_ref",
",",
"devices",
")",
"# Retrieve and build descriptor dictionaries for each device",
"descriptors",
"=",
"[",
"]",
"for",
"dev",
"in",
"devices",
":",
"d",
"=",
"base",
".",
"DeviceDescriptor",
"(",
")",
"d",
".",
"vendor_id",
"=",
"GetDeviceIntProperty",
"(",
"dev",
",",
"HID_DEVICE_PROPERTY_VENDOR_ID",
")",
"d",
".",
"product_id",
"=",
"GetDeviceIntProperty",
"(",
"dev",
",",
"HID_DEVICE_PROPERTY_PRODUCT_ID",
")",
"d",
".",
"product_string",
"=",
"GetDeviceStringProperty",
"(",
"dev",
",",
"HID_DEVICE_PROPERTY_PRODUCT",
")",
"d",
".",
"usage",
"=",
"GetDeviceIntProperty",
"(",
"dev",
",",
"HID_DEVICE_PROPERTY_PRIMARY_USAGE",
")",
"d",
".",
"usage_page",
"=",
"GetDeviceIntProperty",
"(",
"dev",
",",
"HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE",
")",
"d",
".",
"report_id",
"=",
"GetDeviceIntProperty",
"(",
"dev",
",",
"HID_DEVICE_PROPERTY_REPORT_ID",
")",
"d",
".",
"path",
"=",
"GetDevicePath",
"(",
"dev",
")",
"descriptors",
".",
"append",
"(",
"d",
".",
"ToPublicDict",
"(",
")",
")",
"# Clean up CF objects",
"cf",
".",
"CFRelease",
"(",
"device_set_ref",
")",
"cf",
".",
"CFRelease",
"(",
"hid_mgr",
")",
"return",
"descriptors"
] | 38.864865
| 20.243243
|
def set_substitution(self, word, substitution):
"""
Add a word substitution
:param word: The word to replace
:type word: str
:param substitution: The word's substitution
:type substitution: str
"""
# Parse the word and its substitution
raw_word = re.escape(word)
raw_substitution = substitution
case_word = re.escape(normalize(word, preserve_case=True))
case_substitution = normalize(substitution, preserve_case=True)
word = re.escape(normalize(word))
substitution = normalize(substitution)
# Compile and group the regular expressions
raw_sub = (re.compile(r'\b{word}\b'.format(word=raw_word), re.IGNORECASE), raw_substitution)
case_sub = (re.compile(r'\b{word}\b'.format(word=case_word), re.IGNORECASE), case_substitution)
sub = (re.compile(r'\b{word}\b'.format(word=word), re.IGNORECASE), substitution)
sub_group = (sub, case_sub, raw_sub)
# Make sure this substitution hasn't already been processed and add it to the substitutions list
if sub_group not in self._substitutions:
self._log.info('Appending new word substitution: "{word}" => "{sub}"'.format(word=word, sub=substitution))
self._substitutions.append(sub_group)
|
[
"def",
"set_substitution",
"(",
"self",
",",
"word",
",",
"substitution",
")",
":",
"# Parse the word and its substitution",
"raw_word",
"=",
"re",
".",
"escape",
"(",
"word",
")",
"raw_substitution",
"=",
"substitution",
"case_word",
"=",
"re",
".",
"escape",
"(",
"normalize",
"(",
"word",
",",
"preserve_case",
"=",
"True",
")",
")",
"case_substitution",
"=",
"normalize",
"(",
"substitution",
",",
"preserve_case",
"=",
"True",
")",
"word",
"=",
"re",
".",
"escape",
"(",
"normalize",
"(",
"word",
")",
")",
"substitution",
"=",
"normalize",
"(",
"substitution",
")",
"# Compile and group the regular expressions",
"raw_sub",
"=",
"(",
"re",
".",
"compile",
"(",
"r'\\b{word}\\b'",
".",
"format",
"(",
"word",
"=",
"raw_word",
")",
",",
"re",
".",
"IGNORECASE",
")",
",",
"raw_substitution",
")",
"case_sub",
"=",
"(",
"re",
".",
"compile",
"(",
"r'\\b{word}\\b'",
".",
"format",
"(",
"word",
"=",
"case_word",
")",
",",
"re",
".",
"IGNORECASE",
")",
",",
"case_substitution",
")",
"sub",
"=",
"(",
"re",
".",
"compile",
"(",
"r'\\b{word}\\b'",
".",
"format",
"(",
"word",
"=",
"word",
")",
",",
"re",
".",
"IGNORECASE",
")",
",",
"substitution",
")",
"sub_group",
"=",
"(",
"sub",
",",
"case_sub",
",",
"raw_sub",
")",
"# Make sure this substitution hasn't already been processed and add it to the substitutions list",
"if",
"sub_group",
"not",
"in",
"self",
".",
"_substitutions",
":",
"self",
".",
"_log",
".",
"info",
"(",
"'Appending new word substitution: \"{word}\" => \"{sub}\"'",
".",
"format",
"(",
"word",
"=",
"word",
",",
"sub",
"=",
"substitution",
")",
")",
"self",
".",
"_substitutions",
".",
"append",
"(",
"sub_group",
")"
] | 43.166667
| 23.766667
|
def do_EOF(self, args):
"""Exit on system end of file character"""
if _debug: ConsoleCmd._debug("do_EOF %r", args)
return self.do_exit(args)
|
[
"def",
"do_EOF",
"(",
"self",
",",
"args",
")",
":",
"if",
"_debug",
":",
"ConsoleCmd",
".",
"_debug",
"(",
"\"do_EOF %r\"",
",",
"args",
")",
"return",
"self",
".",
"do_exit",
"(",
"args",
")"
] | 40.25
| 9.75
|
def _make_non_blocking(file_obj):
"""make file object non-blocking
Windows doesn't have the fcntl module, but someone on
stack overflow supplied this code as an answer, and it works
http://stackoverflow.com/a/34504971/2893090"""
if USING_WINDOWS:
LPDWORD = POINTER(DWORD)
PIPE_NOWAIT = wintypes.DWORD(0x00000001)
SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState
SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD]
SetNamedPipeHandleState.restype = BOOL
h = msvcrt.get_osfhandle(file_obj.fileno())
res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None)
if res == 0:
raise ValueError(WinError())
else:
# Set the file status flag (F_SETFL) on the pipes to be non-blocking
# so we can attempt to read from a pipe with no new data without locking
# the program up
fcntl.fcntl(file_obj, fcntl.F_SETFL, os.O_NONBLOCK)
|
[
"def",
"_make_non_blocking",
"(",
"file_obj",
")",
":",
"if",
"USING_WINDOWS",
":",
"LPDWORD",
"=",
"POINTER",
"(",
"DWORD",
")",
"PIPE_NOWAIT",
"=",
"wintypes",
".",
"DWORD",
"(",
"0x00000001",
")",
"SetNamedPipeHandleState",
"=",
"windll",
".",
"kernel32",
".",
"SetNamedPipeHandleState",
"SetNamedPipeHandleState",
".",
"argtypes",
"=",
"[",
"HANDLE",
",",
"LPDWORD",
",",
"LPDWORD",
",",
"LPDWORD",
"]",
"SetNamedPipeHandleState",
".",
"restype",
"=",
"BOOL",
"h",
"=",
"msvcrt",
".",
"get_osfhandle",
"(",
"file_obj",
".",
"fileno",
"(",
")",
")",
"res",
"=",
"windll",
".",
"kernel32",
".",
"SetNamedPipeHandleState",
"(",
"h",
",",
"byref",
"(",
"PIPE_NOWAIT",
")",
",",
"None",
",",
"None",
")",
"if",
"res",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"WinError",
"(",
")",
")",
"else",
":",
"# Set the file status flag (F_SETFL) on the pipes to be non-blocking",
"# so we can attempt to read from a pipe with no new data without locking",
"# the program up",
"fcntl",
".",
"fcntl",
"(",
"file_obj",
",",
"fcntl",
".",
"F_SETFL",
",",
"os",
".",
"O_NONBLOCK",
")"
] | 39.4
| 23.24
|
def _match_regex(regex, obj):
"""
Returns true if the regex matches the object, or a string in the object
if it is some sort of container.
:param regex: A regex.
:type regex: ``regex``
:param obj: An arbitrary object.
:type object: ``object``
:rtype: ``bool``
"""
if isinstance(obj, six.string_types):
return len(regex.findall(obj)) > 0
elif isinstance(obj, dict):
return _match_regex(regex, obj.values())
elif hasattr(obj, '__iter__'):
# Object is a list or some other iterable.
return any(_match_regex(regex, s)
for s in obj if isinstance(s, six.string_types))
else:
return False
|
[
"def",
"_match_regex",
"(",
"regex",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
":",
"return",
"len",
"(",
"regex",
".",
"findall",
"(",
"obj",
")",
")",
">",
"0",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"_match_regex",
"(",
"regex",
",",
"obj",
".",
"values",
"(",
")",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"'__iter__'",
")",
":",
"# Object is a list or some other iterable.",
"return",
"any",
"(",
"_match_regex",
"(",
"regex",
",",
"s",
")",
"for",
"s",
"in",
"obj",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
")",
"else",
":",
"return",
"False"
] | 30.590909
| 14.045455
|
def placeholder_symbol_table(name, version, max_id):
"""Constructs a shared symbol table that consists symbols that all have no known text.
This is generally used for cases where a shared symbol table is not available by the
application.
Args:
name (unicode): The name of the shared symbol table.
version (int): The version of the shared symbol table.
max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0``
Returns:
SymbolTable: The synthesized table.
"""
if version <= 0:
raise ValueError('Version must be grater than or equal to 1: %s' % version)
if max_id < 0:
raise ValueError('Max ID must be zero or positive: %s' % max_id)
return SymbolTable(
table_type=SHARED_TABLE_TYPE,
symbols=repeat(None, max_id),
name=name,
version=version,
is_substitute=True
)
|
[
"def",
"placeholder_symbol_table",
"(",
"name",
",",
"version",
",",
"max_id",
")",
":",
"if",
"version",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Version must be grater than or equal to 1: %s'",
"%",
"version",
")",
"if",
"max_id",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Max ID must be zero or positive: %s'",
"%",
"max_id",
")",
"return",
"SymbolTable",
"(",
"table_type",
"=",
"SHARED_TABLE_TYPE",
",",
"symbols",
"=",
"repeat",
"(",
"None",
",",
"max_id",
")",
",",
"name",
"=",
"name",
",",
"version",
"=",
"version",
",",
"is_substitute",
"=",
"True",
")"
] | 34.115385
| 23.846154
|
def scalar_projection(v1, v2):
'''compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
'''
return np.dot(v1, v2) / np.linalg.norm(v2)
|
[
"def",
"scalar_projection",
"(",
"v1",
",",
"v2",
")",
":",
"return",
"np",
".",
"dot",
"(",
"v1",
",",
"v2",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"v2",
")"
] | 28.090909
| 24.090909
|
def ensure_has_same_campaigns(self):
"""Ensure that the 2 campaigns to merge have been generated
from the same campaign.yaml
"""
lhs_yaml = osp.join(self.lhs, 'campaign.yaml')
rhs_yaml = osp.join(self.rhs, 'campaign.yaml')
assert osp.isfile(lhs_yaml)
assert osp.isfile(rhs_yaml)
assert filecmp.cmp(lhs_yaml, rhs_yaml)
|
[
"def",
"ensure_has_same_campaigns",
"(",
"self",
")",
":",
"lhs_yaml",
"=",
"osp",
".",
"join",
"(",
"self",
".",
"lhs",
",",
"'campaign.yaml'",
")",
"rhs_yaml",
"=",
"osp",
".",
"join",
"(",
"self",
".",
"rhs",
",",
"'campaign.yaml'",
")",
"assert",
"osp",
".",
"isfile",
"(",
"lhs_yaml",
")",
"assert",
"osp",
".",
"isfile",
"(",
"rhs_yaml",
")",
"assert",
"filecmp",
".",
"cmp",
"(",
"lhs_yaml",
",",
"rhs_yaml",
")"
] | 41.444444
| 5.888889
|
def encoded_query(self):
"""Returns the encoded query string of the URL. This may be different from the rawquery element,
as that contains the query parsed by urllib but unmodified.
The return value takes the form of key=value&key=value, and it never contains a leading question mark.
"""
if self.query is not None and self.query != '' and self.query != {}:
try:
return urlencode(self.query, doseq=True, quote_via=urlquote)
except TypeError:
return '&'.join(["{0}={1}".format(urlquote(k), urlquote(self.query[k][0])) for k in self.query])
else:
return ''
|
[
"def",
"encoded_query",
"(",
"self",
")",
":",
"if",
"self",
".",
"query",
"is",
"not",
"None",
"and",
"self",
".",
"query",
"!=",
"''",
"and",
"self",
".",
"query",
"!=",
"{",
"}",
":",
"try",
":",
"return",
"urlencode",
"(",
"self",
".",
"query",
",",
"doseq",
"=",
"True",
",",
"quote_via",
"=",
"urlquote",
")",
"except",
"TypeError",
":",
"return",
"'&'",
".",
"join",
"(",
"[",
"\"{0}={1}\"",
".",
"format",
"(",
"urlquote",
"(",
"k",
")",
",",
"urlquote",
"(",
"self",
".",
"query",
"[",
"k",
"]",
"[",
"0",
"]",
")",
")",
"for",
"k",
"in",
"self",
".",
"query",
"]",
")",
"else",
":",
"return",
"''"
] | 54.916667
| 28.166667
|
def read_html(io, match='.+', flavor=None, header=None, index_col=None,
skiprows=None, attrs=None, parse_dates=False,
tupleize_cols=None, thousands=',', encoding=None,
decimal='.', converters=None, na_values=None,
keep_default_na=True, displayed_only=True):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
tupleize_cols : bool, optional
If ``False`` try to parse multiple header rows into a
:class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to
``False``.
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
.. versionadded:: 0.19.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
.. versionadded:: 0.19.0
na_values : iterable, default None
Custom NA values
.. versionadded:: 0.19.0
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
.. versionadded:: 0.19.0
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
.. versionadded:: 0.23.0
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError('cannot skip rows starting from the end of the '
'data (you passed a negative value)')
_validate_header_arg(header)
return _parse(flavor=flavor, io=io, match=match, header=header,
index_col=index_col, skiprows=skiprows,
parse_dates=parse_dates, tupleize_cols=tupleize_cols,
thousands=thousands, attrs=attrs, encoding=encoding,
decimal=decimal, converters=converters, na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only)
|
[
"def",
"read_html",
"(",
"io",
",",
"match",
"=",
"'.+'",
",",
"flavor",
"=",
"None",
",",
"header",
"=",
"None",
",",
"index_col",
"=",
"None",
",",
"skiprows",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"parse_dates",
"=",
"False",
",",
"tupleize_cols",
"=",
"None",
",",
"thousands",
"=",
"','",
",",
"encoding",
"=",
"None",
",",
"decimal",
"=",
"'.'",
",",
"converters",
"=",
"None",
",",
"na_values",
"=",
"None",
",",
"keep_default_na",
"=",
"True",
",",
"displayed_only",
"=",
"True",
")",
":",
"_importers",
"(",
")",
"# Type check here. We don't want to parse only to fail because of an",
"# invalid value of an integer skiprows.",
"if",
"isinstance",
"(",
"skiprows",
",",
"numbers",
".",
"Integral",
")",
"and",
"skiprows",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'cannot skip rows starting from the end of the '",
"'data (you passed a negative value)'",
")",
"_validate_header_arg",
"(",
"header",
")",
"return",
"_parse",
"(",
"flavor",
"=",
"flavor",
",",
"io",
"=",
"io",
",",
"match",
"=",
"match",
",",
"header",
"=",
"header",
",",
"index_col",
"=",
"index_col",
",",
"skiprows",
"=",
"skiprows",
",",
"parse_dates",
"=",
"parse_dates",
",",
"tupleize_cols",
"=",
"tupleize_cols",
",",
"thousands",
"=",
"thousands",
",",
"attrs",
"=",
"attrs",
",",
"encoding",
"=",
"encoding",
",",
"decimal",
"=",
"decimal",
",",
"converters",
"=",
"converters",
",",
"na_values",
"=",
"na_values",
",",
"keep_default_na",
"=",
"keep_default_na",
",",
"displayed_only",
"=",
"displayed_only",
")"
] | 42.857143
| 26.910714
|
def securityHandler(self, value):
""" sets the security handler """
if isinstance(value, BaseSecurityHandler):
if isinstance(value, security.AGOLTokenSecurityHandler):
self._securityHandler = value
elif isinstance(value, security.OAuthSecurityHandler):
self._securityHandler = value
else:
pass
|
[
"def",
"securityHandler",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"BaseSecurityHandler",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"security",
".",
"AGOLTokenSecurityHandler",
")",
":",
"self",
".",
"_securityHandler",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"security",
".",
"OAuthSecurityHandler",
")",
":",
"self",
".",
"_securityHandler",
"=",
"value",
"else",
":",
"pass"
] | 42.777778
| 13.777778
|
def _remove_debug_only(self):
"""Iterate through each handler removing the invalid dictConfig key of
debug_only.
"""
LOGGER.debug('Removing debug only from handlers')
for handler in self.config[self.HANDLERS]:
if self.DEBUG_ONLY in self.config[self.HANDLERS][handler]:
del self.config[self.HANDLERS][handler][self.DEBUG_ONLY]
|
[
"def",
"_remove_debug_only",
"(",
"self",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Removing debug only from handlers'",
")",
"for",
"handler",
"in",
"self",
".",
"config",
"[",
"self",
".",
"HANDLERS",
"]",
":",
"if",
"self",
".",
"DEBUG_ONLY",
"in",
"self",
".",
"config",
"[",
"self",
".",
"HANDLERS",
"]",
"[",
"handler",
"]",
":",
"del",
"self",
".",
"config",
"[",
"self",
".",
"HANDLERS",
"]",
"[",
"handler",
"]",
"[",
"self",
".",
"DEBUG_ONLY",
"]"
] | 42.888889
| 17.888889
|
def _OsmoticPressure(T, P, S):
"""Procedure to calculate the osmotic pressure of seawater
Parameters
----------
T : float
Tmperature, [K]
P : float
Pressure, [MPa]
S : float
Salinity, [kg/kg]
Returns
-------
Posm : float
Osmotic pressure, [MPa]
References
----------
IAPWS, Advisory Note No. 5: Industrial Calculation of the Thermodynamic
Properties of Seawater, http://www.iapws.org/relguide/Advise5.html, Eq 15
"""
pw = _Region1(T, P)
gw = pw["h"]-T*pw["s"]
def f(Posm):
pw2 = _Region1(T, P+Posm)
gw2 = pw2["h"]-T*pw2["s"]
ps = SeaWater._saline(T, P+Posm, S)
return -ps["g"]+S*ps["gs"]-gw+gw2
Posm = fsolve(f, 0)[0]
return Posm
|
[
"def",
"_OsmoticPressure",
"(",
"T",
",",
"P",
",",
"S",
")",
":",
"pw",
"=",
"_Region1",
"(",
"T",
",",
"P",
")",
"gw",
"=",
"pw",
"[",
"\"h\"",
"]",
"-",
"T",
"*",
"pw",
"[",
"\"s\"",
"]",
"def",
"f",
"(",
"Posm",
")",
":",
"pw2",
"=",
"_Region1",
"(",
"T",
",",
"P",
"+",
"Posm",
")",
"gw2",
"=",
"pw2",
"[",
"\"h\"",
"]",
"-",
"T",
"*",
"pw2",
"[",
"\"s\"",
"]",
"ps",
"=",
"SeaWater",
".",
"_saline",
"(",
"T",
",",
"P",
"+",
"Posm",
",",
"S",
")",
"return",
"-",
"ps",
"[",
"\"g\"",
"]",
"+",
"S",
"*",
"ps",
"[",
"\"gs\"",
"]",
"-",
"gw",
"+",
"gw2",
"Posm",
"=",
"fsolve",
"(",
"f",
",",
"0",
")",
"[",
"0",
"]",
"return",
"Posm"
] | 22.515152
| 21.818182
|
def _make_package(binder):
"""Makes an ``.epub.Package`` from a Binder'ish instance."""
package_id = binder.id
if package_id is None:
package_id = hash(binder)
package_name = "{}.opf".format(package_id)
extensions = get_model_extensions(binder)
template_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True)
# Build the package item list.
items = []
# Build the binder as an item, specifically a navigation item.
navigation_document = bytes(HTMLFormatter(binder, extensions))
navigation_document_name = "{}{}".format(
package_id,
mimetypes.guess_extension('application/xhtml+xml', strict=False))
item = Item(str(navigation_document_name),
io.BytesIO(navigation_document),
'application/xhtml+xml',
is_navigation=True, properties=['nav'])
items.append(item)
resources = {}
# Roll through the model list again, making each one an item.
for model in flatten_model(binder):
for resource in getattr(model, 'resources', []):
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
if isinstance(model, (Binder, TranslucentBinder,)):
continue
if isinstance(model, DocumentPointer):
content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(content),
model.media_type)
items.append(item)
continue
for reference in model.references:
if reference.remote_type == INLINE_REFERENCE_TYPE:
# has side effects - converts ref type to INTERNAL w/
# appropriate uri, so need to replicate resource treatment from
# above
resource = _make_resource_from_inline(reference)
model.resources.append(resource)
resources[resource.id] = resource
with resource.open() as data:
item = Item(resource.id, data, resource.media_type)
items.append(item)
reference.bind(resource, '../resources/{}')
elif reference.remote_type == INTERNAL_REFERENCE_TYPE:
filename = os.path.basename(reference.uri)
resource = resources.get(filename)
if resource:
reference.bind(resource, '../resources/{}')
complete_content = bytes(HTMLFormatter(model))
item = Item(''.join([model.ident_hash, extensions[model.id]]),
io.BytesIO(complete_content),
model.media_type)
items.append(item)
# Build the package.
package = Package(package_name, items, binder.metadata)
return package
|
[
"def",
"_make_package",
"(",
"binder",
")",
":",
"package_id",
"=",
"binder",
".",
"id",
"if",
"package_id",
"is",
"None",
":",
"package_id",
"=",
"hash",
"(",
"binder",
")",
"package_name",
"=",
"\"{}.opf\"",
".",
"format",
"(",
"package_id",
")",
"extensions",
"=",
"get_model_extensions",
"(",
"binder",
")",
"template_env",
"=",
"jinja2",
".",
"Environment",
"(",
"trim_blocks",
"=",
"True",
",",
"lstrip_blocks",
"=",
"True",
")",
"# Build the package item list.",
"items",
"=",
"[",
"]",
"# Build the binder as an item, specifically a navigation item.",
"navigation_document",
"=",
"bytes",
"(",
"HTMLFormatter",
"(",
"binder",
",",
"extensions",
")",
")",
"navigation_document_name",
"=",
"\"{}{}\"",
".",
"format",
"(",
"package_id",
",",
"mimetypes",
".",
"guess_extension",
"(",
"'application/xhtml+xml'",
",",
"strict",
"=",
"False",
")",
")",
"item",
"=",
"Item",
"(",
"str",
"(",
"navigation_document_name",
")",
",",
"io",
".",
"BytesIO",
"(",
"navigation_document",
")",
",",
"'application/xhtml+xml'",
",",
"is_navigation",
"=",
"True",
",",
"properties",
"=",
"[",
"'nav'",
"]",
")",
"items",
".",
"append",
"(",
"item",
")",
"resources",
"=",
"{",
"}",
"# Roll through the model list again, making each one an item.",
"for",
"model",
"in",
"flatten_model",
"(",
"binder",
")",
":",
"for",
"resource",
"in",
"getattr",
"(",
"model",
",",
"'resources'",
",",
"[",
"]",
")",
":",
"resources",
"[",
"resource",
".",
"id",
"]",
"=",
"resource",
"with",
"resource",
".",
"open",
"(",
")",
"as",
"data",
":",
"item",
"=",
"Item",
"(",
"resource",
".",
"id",
",",
"data",
",",
"resource",
".",
"media_type",
")",
"items",
".",
"append",
"(",
"item",
")",
"if",
"isinstance",
"(",
"model",
",",
"(",
"Binder",
",",
"TranslucentBinder",
",",
")",
")",
":",
"continue",
"if",
"isinstance",
"(",
"model",
",",
"DocumentPointer",
")",
":",
"content",
"=",
"bytes",
"(",
"HTMLFormatter",
"(",
"model",
")",
")",
"item",
"=",
"Item",
"(",
"''",
".",
"join",
"(",
"[",
"model",
".",
"ident_hash",
",",
"extensions",
"[",
"model",
".",
"id",
"]",
"]",
")",
",",
"io",
".",
"BytesIO",
"(",
"content",
")",
",",
"model",
".",
"media_type",
")",
"items",
".",
"append",
"(",
"item",
")",
"continue",
"for",
"reference",
"in",
"model",
".",
"references",
":",
"if",
"reference",
".",
"remote_type",
"==",
"INLINE_REFERENCE_TYPE",
":",
"# has side effects - converts ref type to INTERNAL w/",
"# appropriate uri, so need to replicate resource treatment from",
"# above",
"resource",
"=",
"_make_resource_from_inline",
"(",
"reference",
")",
"model",
".",
"resources",
".",
"append",
"(",
"resource",
")",
"resources",
"[",
"resource",
".",
"id",
"]",
"=",
"resource",
"with",
"resource",
".",
"open",
"(",
")",
"as",
"data",
":",
"item",
"=",
"Item",
"(",
"resource",
".",
"id",
",",
"data",
",",
"resource",
".",
"media_type",
")",
"items",
".",
"append",
"(",
"item",
")",
"reference",
".",
"bind",
"(",
"resource",
",",
"'../resources/{}'",
")",
"elif",
"reference",
".",
"remote_type",
"==",
"INTERNAL_REFERENCE_TYPE",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"reference",
".",
"uri",
")",
"resource",
"=",
"resources",
".",
"get",
"(",
"filename",
")",
"if",
"resource",
":",
"reference",
".",
"bind",
"(",
"resource",
",",
"'../resources/{}'",
")",
"complete_content",
"=",
"bytes",
"(",
"HTMLFormatter",
"(",
"model",
")",
")",
"item",
"=",
"Item",
"(",
"''",
".",
"join",
"(",
"[",
"model",
".",
"ident_hash",
",",
"extensions",
"[",
"model",
".",
"id",
"]",
"]",
")",
",",
"io",
".",
"BytesIO",
"(",
"complete_content",
")",
",",
"model",
".",
"media_type",
")",
"items",
".",
"append",
"(",
"item",
")",
"# Build the package.",
"package",
"=",
"Package",
"(",
"package_name",
",",
"items",
",",
"binder",
".",
"metadata",
")",
"return",
"package"
] | 40.871429
| 18.028571
|
def _write_json(filepath, data, kwargs):
"""See documentation of mpu.io.write."""
with io_stl.open(filepath, 'w', encoding='utf8') as outfile:
if 'indent' not in kwargs:
kwargs['indent'] = 4
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
if 'separators' not in kwargs:
kwargs['separators'] = (',', ': ')
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
str_ = json.dumps(data, **kwargs)
outfile.write(to_unicode(str_))
return data
|
[
"def",
"_write_json",
"(",
"filepath",
",",
"data",
",",
"kwargs",
")",
":",
"with",
"io_stl",
".",
"open",
"(",
"filepath",
",",
"'w'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"outfile",
":",
"if",
"'indent'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'indent'",
"]",
"=",
"4",
"if",
"'sort_keys'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'sort_keys'",
"]",
"=",
"True",
"if",
"'separators'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'separators'",
"]",
"=",
"(",
"','",
",",
"': '",
")",
"if",
"'ensure_ascii'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'ensure_ascii'",
"]",
"=",
"False",
"str_",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
"outfile",
".",
"write",
"(",
"to_unicode",
"(",
"str_",
")",
")",
"return",
"data"
] | 39.285714
| 5.714286
|
def get_config_parameter_loglevel(config: ConfigParser,
section: str,
param: str,
default: int) -> int:
"""
Get ``loglevel`` parameter from ``configparser`` ``.INI`` file, e.g.
mapping ``'debug'`` to ``logging.DEBUG``.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
"""
try:
value = config.get(section, param).lower()
if value == "debug":
return logging.DEBUG # 10
elif value == "info":
return logging.INFO
elif value in ["warn", "warning"]:
return logging.WARN
elif value == "error":
return logging.ERROR
elif value in ["critical", "fatal"]:
return logging.CRITICAL # 50
else:
raise ValueError
except (TypeError, ValueError, NoOptionError, AttributeError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
return default
|
[
"def",
"get_config_parameter_loglevel",
"(",
"config",
":",
"ConfigParser",
",",
"section",
":",
"str",
",",
"param",
":",
"str",
",",
"default",
":",
"int",
")",
"->",
"int",
":",
"try",
":",
"value",
"=",
"config",
".",
"get",
"(",
"section",
",",
"param",
")",
".",
"lower",
"(",
")",
"if",
"value",
"==",
"\"debug\"",
":",
"return",
"logging",
".",
"DEBUG",
"# 10",
"elif",
"value",
"==",
"\"info\"",
":",
"return",
"logging",
".",
"INFO",
"elif",
"value",
"in",
"[",
"\"warn\"",
",",
"\"warning\"",
"]",
":",
"return",
"logging",
".",
"WARN",
"elif",
"value",
"==",
"\"error\"",
":",
"return",
"logging",
".",
"ERROR",
"elif",
"value",
"in",
"[",
"\"critical\"",
",",
"\"fatal\"",
"]",
":",
"return",
"logging",
".",
"CRITICAL",
"# 50",
"else",
":",
"raise",
"ValueError",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"NoOptionError",
",",
"AttributeError",
")",
":",
"log",
".",
"warning",
"(",
"\"Configuration variable {} not found or improper in section [{}]; \"",
"\"using default of {!r}\"",
",",
"param",
",",
"section",
",",
"default",
")",
"return",
"default"
] | 35.742857
| 13.857143
|
def markdown2html(markdown_text):
'''
Convert markdown text to HTML. with extensions.
:param markdown_text: The markdown text.
:return: The HTML text.
'''
html = markdown.markdown(
markdown_text,
extensions=[
WikiLinkExtension(base_url='/wiki/', end_url=''),
'markdown.extensions.extra',
'markdown.extensions.toc',
'markdown.extensions.codehilite',
'markdown.extensions.meta'
]
)
han_biaodians = ['。', ',', ';', '、', '!', '?']
for han_biaodian in han_biaodians:
html = html.replace(han_biaodian + '\n', han_biaodian)
return tornado.escape.xhtml_escape(html)
|
[
"def",
"markdown2html",
"(",
"markdown_text",
")",
":",
"html",
"=",
"markdown",
".",
"markdown",
"(",
"markdown_text",
",",
"extensions",
"=",
"[",
"WikiLinkExtension",
"(",
"base_url",
"=",
"'/wiki/'",
",",
"end_url",
"=",
"''",
")",
",",
"'markdown.extensions.extra'",
",",
"'markdown.extensions.toc'",
",",
"'markdown.extensions.codehilite'",
",",
"'markdown.extensions.meta'",
"]",
")",
"han_biaodians",
"=",
"[",
"'。', ",
"'",
"', ';",
"'",
" '、',",
" ",
"!', '",
"?",
"]",
"",
"",
"",
"for",
"han_biaodian",
"in",
"han_biaodians",
":",
"html",
"=",
"html",
".",
"replace",
"(",
"han_biaodian",
"+",
"'\\n'",
",",
"han_biaodian",
")",
"return",
"tornado",
".",
"escape",
".",
"xhtml_escape",
"(",
"html",
")"
] | 33.65
| 14.25
|
def _speak_none(self, element):
"""
No speak any content of element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
# pylint: disable=no-self-use
element.set_attribute('role', 'presentation')
element.set_attribute('aria-hidden', 'true')
element.set_attribute(AccessibleCSSImplementation.DATA_SPEAK, 'none')
|
[
"def",
"_speak_none",
"(",
"self",
",",
"element",
")",
":",
"# pylint: disable=no-self-use",
"element",
".",
"set_attribute",
"(",
"'role'",
",",
"'presentation'",
")",
"element",
".",
"set_attribute",
"(",
"'aria-hidden'",
",",
"'true'",
")",
"element",
".",
"set_attribute",
"(",
"AccessibleCSSImplementation",
".",
"DATA_SPEAK",
",",
"'none'",
")"
] | 35.333333
| 16.166667
|
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
opt = "db_url"
if opt not in options:
options[opt] = "jdbc:mysql://somehost:3306/somedatabase"
if opt not in self.help:
self.help[opt] = "The JDBC database URL to connect to (str)."
opt = "user"
if opt not in options:
options[opt] = "user"
if opt not in self.help:
self.help[opt] = "The database user to use for connecting (str)."
opt = "password"
if opt not in options:
options[opt] = "secret"
if opt not in self.help:
self.help[opt] = "The password for the database user (str)."
opt = "query"
if opt not in options:
options[opt] = "SELECT * FROM table"
if opt not in self.help:
self.help[opt] = "The SQL query for generating the dataset (str)."
opt = "sparse"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to return the data in sparse format (bool)."
opt = "custom_props"
if opt not in options:
options[opt] = ""
if opt not in self.help:
self.help[opt] = "Custom properties filename (str)."
return super(LoadDatabase, self).fix_config(options)
|
[
"def",
"fix_config",
"(",
"self",
",",
"options",
")",
":",
"opt",
"=",
"\"db_url\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"\"jdbc:mysql://somehost:3306/somedatabase\"",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"The JDBC database URL to connect to (str).\"",
"opt",
"=",
"\"user\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"\"user\"",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"The database user to use for connecting (str).\"",
"opt",
"=",
"\"password\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"\"secret\"",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"The password for the database user (str).\"",
"opt",
"=",
"\"query\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"\"SELECT * FROM table\"",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"The SQL query for generating the dataset (str).\"",
"opt",
"=",
"\"sparse\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"False",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"Whether to return the data in sparse format (bool).\"",
"opt",
"=",
"\"custom_props\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"\"\"",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"Custom properties filename (str).\"",
"return",
"super",
"(",
"LoadDatabase",
",",
"self",
")",
".",
"fix_config",
"(",
"options",
")"
] | 33.565217
| 19.391304
|
def do_get_dataset(data, key, create=False):
"""Get a dataset.
Parameters
----------
data : `None` or `dict` of `dict` of ([`int`, `str`] or [`list` of `int`, `list` of `str`])
Data.
key : `str`
Dataset key.
create : `bool`, optional
Create a dataset if it does not exist.
Returns
-------
`None` or `dict` of ([`int`, `str`] or [`list` of `int`, `list` of `str`])
"""
if data is None:
return None
try:
return data[key]
except KeyError:
if create:
dataset = {}
data[key] = dataset
return dataset
else:
raise
|
[
"def",
"do_get_dataset",
"(",
"data",
",",
"key",
",",
"create",
"=",
"False",
")",
":",
"if",
"data",
"is",
"None",
":",
"return",
"None",
"try",
":",
"return",
"data",
"[",
"key",
"]",
"except",
"KeyError",
":",
"if",
"create",
":",
"dataset",
"=",
"{",
"}",
"data",
"[",
"key",
"]",
"=",
"dataset",
"return",
"dataset",
"else",
":",
"raise"
] | 27.185185
| 19.703704
|
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
|
[
"def",
"handle_packet",
"(",
"self",
",",
"packet",
")",
":",
"if",
"not",
"isinstance",
"(",
"packet",
",",
"RTMPPacket",
")",
":",
"raise",
"ValueError",
"(",
"\"A RTMPPacket argument is required\"",
")",
"return",
"librtmp",
".",
"RTMP_ClientPacket",
"(",
"self",
".",
"rtmp",
",",
"packet",
".",
"packet",
")"
] | 36.625
| 18.125
|
def check_block_spacing(
self,
first_block_type: LineType,
second_block_type: LineType,
error_message: str,
) -> typing.Generator[AAAError, None, None]:
"""
Checks there is a clear single line between ``first_block_type`` and
``second_block_type``.
Note:
Is tested via ``check_arrange_act_spacing()`` and
``check_act_assert_spacing()``.
"""
numbered_lines = list(enumerate(self))
first_block_lines = filter(lambda l: l[1] is first_block_type, numbered_lines)
try:
first_block_lineno = list(first_block_lines)[-1][0]
except IndexError:
# First block has no lines
return
second_block_lines = filter(lambda l: l[1] is second_block_type, numbered_lines)
try:
second_block_lineno = next(second_block_lines)[0]
except StopIteration:
# Second block has no lines
return
blank_lines = [
bl for bl in numbered_lines[first_block_lineno + 1:second_block_lineno] if bl[1] is LineType.blank_line
]
if not blank_lines:
# Point at line above second block
yield AAAError(
line_number=self.fn_offset + second_block_lineno - 1,
offset=0,
text=error_message.format('none'),
)
return
if len(blank_lines) > 1:
# Too many blank lines - point at the first extra one, the 2nd
yield AAAError(
line_number=self.fn_offset + blank_lines[1][0],
offset=0,
text=error_message.format(len(blank_lines)),
)
|
[
"def",
"check_block_spacing",
"(",
"self",
",",
"first_block_type",
":",
"LineType",
",",
"second_block_type",
":",
"LineType",
",",
"error_message",
":",
"str",
",",
")",
"->",
"typing",
".",
"Generator",
"[",
"AAAError",
",",
"None",
",",
"None",
"]",
":",
"numbered_lines",
"=",
"list",
"(",
"enumerate",
"(",
"self",
")",
")",
"first_block_lines",
"=",
"filter",
"(",
"lambda",
"l",
":",
"l",
"[",
"1",
"]",
"is",
"first_block_type",
",",
"numbered_lines",
")",
"try",
":",
"first_block_lineno",
"=",
"list",
"(",
"first_block_lines",
")",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"# First block has no lines",
"return",
"second_block_lines",
"=",
"filter",
"(",
"lambda",
"l",
":",
"l",
"[",
"1",
"]",
"is",
"second_block_type",
",",
"numbered_lines",
")",
"try",
":",
"second_block_lineno",
"=",
"next",
"(",
"second_block_lines",
")",
"[",
"0",
"]",
"except",
"StopIteration",
":",
"# Second block has no lines",
"return",
"blank_lines",
"=",
"[",
"bl",
"for",
"bl",
"in",
"numbered_lines",
"[",
"first_block_lineno",
"+",
"1",
":",
"second_block_lineno",
"]",
"if",
"bl",
"[",
"1",
"]",
"is",
"LineType",
".",
"blank_line",
"]",
"if",
"not",
"blank_lines",
":",
"# Point at line above second block",
"yield",
"AAAError",
"(",
"line_number",
"=",
"self",
".",
"fn_offset",
"+",
"second_block_lineno",
"-",
"1",
",",
"offset",
"=",
"0",
",",
"text",
"=",
"error_message",
".",
"format",
"(",
"'none'",
")",
",",
")",
"return",
"if",
"len",
"(",
"blank_lines",
")",
">",
"1",
":",
"# Too many blank lines - point at the first extra one, the 2nd",
"yield",
"AAAError",
"(",
"line_number",
"=",
"self",
".",
"fn_offset",
"+",
"blank_lines",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"offset",
"=",
"0",
",",
"text",
"=",
"error_message",
".",
"format",
"(",
"len",
"(",
"blank_lines",
")",
")",
",",
")"
] | 34.285714
| 21.22449
|
def _add_index_server(self):
"""Adds index-server to 'distutil's 'index-servers' param."""
index_servers = '\n\t'.join(self.servers.keys())
self.conf.set('distutils', 'index-servers', index_servers)
|
[
"def",
"_add_index_server",
"(",
"self",
")",
":",
"index_servers",
"=",
"'\\n\\t'",
".",
"join",
"(",
"self",
".",
"servers",
".",
"keys",
"(",
")",
")",
"self",
".",
"conf",
".",
"set",
"(",
"'distutils'",
",",
"'index-servers'",
",",
"index_servers",
")"
] | 54.75
| 13.5
|
def cli(config, server, api_key, all, credentials, project):
"""Create the cli command line."""
# Check first for the pybossa.rc file to configure server and api-key
home = expanduser("~")
if os.path.isfile(os.path.join(home, '.pybossa.cfg')):
config.parser.read(os.path.join(home, '.pybossa.cfg'))
config.server = config.parser.get(credentials,'server')
config.api_key = config.parser.get(credentials, 'apikey')
try:
config.all = config.parser.get(credentials, 'all')
except ConfigParser.NoOptionError:
config.all = None
if server:
config.server = server
if api_key:
config.api_key = api_key
if all:
config.all = all
try:
config.project = json.loads(project.read())
except JSONDecodeError as e:
click.secho("Error: invalid JSON format in project.json:", fg='red')
if e.msg == 'Expecting value':
e.msg += " (if string enclose it with double quotes)"
click.echo("%s\n%s: line %s column %s" % (e.doc, e.msg, e.lineno, e.colno))
raise click.Abort()
try:
project_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"short_name": {"type": "string"},
"description": {"type": "string"}
}
}
jsonschema.validate(config.project, project_schema)
except jsonschema.exceptions.ValidationError as e:
click.secho("Error: invalid type in project.json", fg='red')
click.secho("'%s': %s" % (e.path[0], e.message), fg='yellow')
click.echo("'%s' must be a %s" % (e.path[0], e.validator_value))
raise click.Abort()
config.pbclient = pbclient
config.pbclient.set('endpoint', config.server)
config.pbclient.set('api_key', config.api_key)
|
[
"def",
"cli",
"(",
"config",
",",
"server",
",",
"api_key",
",",
"all",
",",
"credentials",
",",
"project",
")",
":",
"# Check first for the pybossa.rc file to configure server and api-key",
"home",
"=",
"expanduser",
"(",
"\"~\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'.pybossa.cfg'",
")",
")",
":",
"config",
".",
"parser",
".",
"read",
"(",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'.pybossa.cfg'",
")",
")",
"config",
".",
"server",
"=",
"config",
".",
"parser",
".",
"get",
"(",
"credentials",
",",
"'server'",
")",
"config",
".",
"api_key",
"=",
"config",
".",
"parser",
".",
"get",
"(",
"credentials",
",",
"'apikey'",
")",
"try",
":",
"config",
".",
"all",
"=",
"config",
".",
"parser",
".",
"get",
"(",
"credentials",
",",
"'all'",
")",
"except",
"ConfigParser",
".",
"NoOptionError",
":",
"config",
".",
"all",
"=",
"None",
"if",
"server",
":",
"config",
".",
"server",
"=",
"server",
"if",
"api_key",
":",
"config",
".",
"api_key",
"=",
"api_key",
"if",
"all",
":",
"config",
".",
"all",
"=",
"all",
"try",
":",
"config",
".",
"project",
"=",
"json",
".",
"loads",
"(",
"project",
".",
"read",
"(",
")",
")",
"except",
"JSONDecodeError",
"as",
"e",
":",
"click",
".",
"secho",
"(",
"\"Error: invalid JSON format in project.json:\"",
",",
"fg",
"=",
"'red'",
")",
"if",
"e",
".",
"msg",
"==",
"'Expecting value'",
":",
"e",
".",
"msg",
"+=",
"\" (if string enclose it with double quotes)\"",
"click",
".",
"echo",
"(",
"\"%s\\n%s: line %s column %s\"",
"%",
"(",
"e",
".",
"doc",
",",
"e",
".",
"msg",
",",
"e",
".",
"lineno",
",",
"e",
".",
"colno",
")",
")",
"raise",
"click",
".",
"Abort",
"(",
")",
"try",
":",
"project_schema",
"=",
"{",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"\"name\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"short_name\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"description\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
"}",
"}",
"jsonschema",
".",
"validate",
"(",
"config",
".",
"project",
",",
"project_schema",
")",
"except",
"jsonschema",
".",
"exceptions",
".",
"ValidationError",
"as",
"e",
":",
"click",
".",
"secho",
"(",
"\"Error: invalid type in project.json\"",
",",
"fg",
"=",
"'red'",
")",
"click",
".",
"secho",
"(",
"\"'%s': %s\"",
"%",
"(",
"e",
".",
"path",
"[",
"0",
"]",
",",
"e",
".",
"message",
")",
",",
"fg",
"=",
"'yellow'",
")",
"click",
".",
"echo",
"(",
"\"'%s' must be a %s\"",
"%",
"(",
"e",
".",
"path",
"[",
"0",
"]",
",",
"e",
".",
"validator_value",
")",
")",
"raise",
"click",
".",
"Abort",
"(",
")",
"config",
".",
"pbclient",
"=",
"pbclient",
"config",
".",
"pbclient",
".",
"set",
"(",
"'endpoint'",
",",
"config",
".",
"server",
")",
"config",
".",
"pbclient",
".",
"set",
"(",
"'api_key'",
",",
"config",
".",
"api_key",
")"
] | 40.622222
| 19.022222
|
def sequence(self):
"""
Returns the volume group sequence number. This number increases
everytime the volume group is modified.
"""
self.open()
seq = lvm_vg_get_seqno(self.handle)
self.close()
return seq
|
[
"def",
"sequence",
"(",
"self",
")",
":",
"self",
".",
"open",
"(",
")",
"seq",
"=",
"lvm_vg_get_seqno",
"(",
"self",
".",
"handle",
")",
"self",
".",
"close",
"(",
")",
"return",
"seq"
] | 28.777778
| 13.888889
|
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
A handful of member fields are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
self.excluded = self.lines_matching(self.exclude)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
prev_toktype = token.INDENT
first_line = None
empty = True
tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: not covered
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME and ttext == 'class':
# Class definitions look like branches in the byte code, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.classdefs.add(slineno)
elif toktype == token.OP and ttext == ':':
if not excluding and elineno in self.excluded:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
exclude_indent = indent
excluding = True
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
self.docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
rng = (first_line, elineno)
for l in range(first_line, elineno+1):
self.multiline[l] = rng
first_line = None
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
empty = False
if first_line is None:
# The token is not whitespace, and is the first in a
# statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
self.excluded.add(elineno)
prev_toktype = toktype
# Find the starts of the executable statements.
if not empty:
self.statement_starts.update(self.byte_parser._find_statements())
|
[
"def",
"_raw_parse",
"(",
"self",
")",
":",
"# Find lines which match an exclusion pattern.",
"if",
"self",
".",
"exclude",
":",
"self",
".",
"excluded",
"=",
"self",
".",
"lines_matching",
"(",
"self",
".",
"exclude",
")",
"# Tokenize, to find excluded suites, to find docstrings, and to find",
"# multi-line statements.",
"indent",
"=",
"0",
"exclude_indent",
"=",
"0",
"excluding",
"=",
"False",
"prev_toktype",
"=",
"token",
".",
"INDENT",
"first_line",
"=",
"None",
"empty",
"=",
"True",
"tokgen",
"=",
"generate_tokens",
"(",
"self",
".",
"text",
")",
"for",
"toktype",
",",
"ttext",
",",
"(",
"slineno",
",",
"_",
")",
",",
"(",
"elineno",
",",
"_",
")",
",",
"ltext",
"in",
"tokgen",
":",
"if",
"self",
".",
"show_tokens",
":",
"# pragma: not covered",
"print",
"(",
"\"%10s %5s %-20r %r\"",
"%",
"(",
"tokenize",
".",
"tok_name",
".",
"get",
"(",
"toktype",
",",
"toktype",
")",
",",
"nice_pair",
"(",
"(",
"slineno",
",",
"elineno",
")",
")",
",",
"ttext",
",",
"ltext",
")",
")",
"if",
"toktype",
"==",
"token",
".",
"INDENT",
":",
"indent",
"+=",
"1",
"elif",
"toktype",
"==",
"token",
".",
"DEDENT",
":",
"indent",
"-=",
"1",
"elif",
"toktype",
"==",
"token",
".",
"NAME",
"and",
"ttext",
"==",
"'class'",
":",
"# Class definitions look like branches in the byte code, so",
"# we need to exclude them. The simplest way is to note the",
"# lines with the 'class' keyword.",
"self",
".",
"classdefs",
".",
"add",
"(",
"slineno",
")",
"elif",
"toktype",
"==",
"token",
".",
"OP",
"and",
"ttext",
"==",
"':'",
":",
"if",
"not",
"excluding",
"and",
"elineno",
"in",
"self",
".",
"excluded",
":",
"# Start excluding a suite. We trigger off of the colon",
"# token so that the #pragma comment will be recognized on",
"# the same line as the colon.",
"exclude_indent",
"=",
"indent",
"excluding",
"=",
"True",
"elif",
"toktype",
"==",
"token",
".",
"STRING",
"and",
"prev_toktype",
"==",
"token",
".",
"INDENT",
":",
"# Strings that are first on an indented line are docstrings.",
"# (a trick from trace.py in the stdlib.) This works for",
"# 99.9999% of cases. For the rest (!) see:",
"# http://stackoverflow.com/questions/1769332/x/1769794#1769794",
"self",
".",
"docstrings",
".",
"update",
"(",
"range",
"(",
"slineno",
",",
"elineno",
"+",
"1",
")",
")",
"elif",
"toktype",
"==",
"token",
".",
"NEWLINE",
":",
"if",
"first_line",
"is",
"not",
"None",
"and",
"elineno",
"!=",
"first_line",
":",
"# We're at the end of a line, and we've ended on a",
"# different line than the first line of the statement,",
"# so record a multi-line range.",
"rng",
"=",
"(",
"first_line",
",",
"elineno",
")",
"for",
"l",
"in",
"range",
"(",
"first_line",
",",
"elineno",
"+",
"1",
")",
":",
"self",
".",
"multiline",
"[",
"l",
"]",
"=",
"rng",
"first_line",
"=",
"None",
"if",
"ttext",
".",
"strip",
"(",
")",
"and",
"toktype",
"!=",
"tokenize",
".",
"COMMENT",
":",
"# A non-whitespace token.",
"empty",
"=",
"False",
"if",
"first_line",
"is",
"None",
":",
"# The token is not whitespace, and is the first in a",
"# statement.",
"first_line",
"=",
"slineno",
"# Check whether to end an excluded suite.",
"if",
"excluding",
"and",
"indent",
"<=",
"exclude_indent",
":",
"excluding",
"=",
"False",
"if",
"excluding",
":",
"self",
".",
"excluded",
".",
"add",
"(",
"elineno",
")",
"prev_toktype",
"=",
"toktype",
"# Find the starts of the executable statements.",
"if",
"not",
"empty",
":",
"self",
".",
"statement_starts",
".",
"update",
"(",
"self",
".",
"byte_parser",
".",
"_find_statements",
"(",
")",
")"
] | 44.934211
| 18.631579
|
def fft_frequencies(sr=22050, n_fft=2048):
'''Alternative implementation of `np.fft.fftfreq`
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
n_fft : int > 0 [scalar]
FFT window size
Returns
-------
freqs : np.ndarray [shape=(1 + n_fft/2,)]
Frequencies `(0, sr/n_fft, 2*sr/n_fft, ..., sr/2)`
Examples
--------
>>> librosa.fft_frequencies(sr=22050, n_fft=16)
array([ 0. , 1378.125, 2756.25 , 4134.375,
5512.5 , 6890.625, 8268.75 , 9646.875, 11025. ])
'''
return np.linspace(0,
float(sr) / 2,
int(1 + n_fft//2),
endpoint=True)
|
[
"def",
"fft_frequencies",
"(",
"sr",
"=",
"22050",
",",
"n_fft",
"=",
"2048",
")",
":",
"return",
"np",
".",
"linspace",
"(",
"0",
",",
"float",
"(",
"sr",
")",
"/",
"2",
",",
"int",
"(",
"1",
"+",
"n_fft",
"//",
"2",
")",
",",
"endpoint",
"=",
"True",
")"
] | 23.5
| 23.1
|
def getattr(self, name, context=None, class_context=True):
"""Get an attribute from this class, using Python's attribute semantic.
This method doesn't look in the :attr:`instance_attrs` dictionary
since it is done by an :class:`Instance` proxy at inference time.
It may return an :class:`Uninferable` object if
the attribute has not been
found, but a ``__getattr__`` or ``__getattribute__`` method is defined.
If ``class_context`` is given, then it is considered that the
attribute is accessed from a class context,
e.g. ClassDef.attribute, otherwise it might have been accessed
from an instance as well. If ``class_context`` is used in that
case, then a lookup in the implicit metaclass and the explicit
metaclass will be done.
:param name: The attribute to look for.
:type name: str
:param class_context: Whether the attribute can be accessed statically.
:type class_context: bool
:returns: The attribute.
:rtype: list(NodeNG)
:raises AttributeInferenceError: If the attribute cannot be inferred.
"""
values = self.locals.get(name, [])
if name in self.special_attributes and class_context and not values:
result = [self.special_attributes.lookup(name)]
if name == "__bases__":
# Need special treatment, since they are mutable
# and we need to return all the values.
result += values
return result
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if class_context:
values += self._metaclass_lookup_attribute(name, context)
if not values:
raise exceptions.AttributeInferenceError(
target=self, attribute=name, context=context
)
# Look for AnnAssigns, which are not attributes in the purest sense.
for value in values:
if isinstance(value, node_classes.AssignName):
stmt = value.statement()
if isinstance(stmt, node_classes.AnnAssign) and stmt.value is None:
raise exceptions.AttributeInferenceError(
target=self, attribute=name, context=context
)
return values
|
[
"def",
"getattr",
"(",
"self",
",",
"name",
",",
"context",
"=",
"None",
",",
"class_context",
"=",
"True",
")",
":",
"values",
"=",
"self",
".",
"locals",
".",
"get",
"(",
"name",
",",
"[",
"]",
")",
"if",
"name",
"in",
"self",
".",
"special_attributes",
"and",
"class_context",
"and",
"not",
"values",
":",
"result",
"=",
"[",
"self",
".",
"special_attributes",
".",
"lookup",
"(",
"name",
")",
"]",
"if",
"name",
"==",
"\"__bases__\"",
":",
"# Need special treatment, since they are mutable",
"# and we need to return all the values.",
"result",
"+=",
"values",
"return",
"result",
"# don't modify the list in self.locals!",
"values",
"=",
"list",
"(",
"values",
")",
"for",
"classnode",
"in",
"self",
".",
"ancestors",
"(",
"recurs",
"=",
"True",
",",
"context",
"=",
"context",
")",
":",
"values",
"+=",
"classnode",
".",
"locals",
".",
"get",
"(",
"name",
",",
"[",
"]",
")",
"if",
"class_context",
":",
"values",
"+=",
"self",
".",
"_metaclass_lookup_attribute",
"(",
"name",
",",
"context",
")",
"if",
"not",
"values",
":",
"raise",
"exceptions",
".",
"AttributeInferenceError",
"(",
"target",
"=",
"self",
",",
"attribute",
"=",
"name",
",",
"context",
"=",
"context",
")",
"# Look for AnnAssigns, which are not attributes in the purest sense.",
"for",
"value",
"in",
"values",
":",
"if",
"isinstance",
"(",
"value",
",",
"node_classes",
".",
"AssignName",
")",
":",
"stmt",
"=",
"value",
".",
"statement",
"(",
")",
"if",
"isinstance",
"(",
"stmt",
",",
"node_classes",
".",
"AnnAssign",
")",
"and",
"stmt",
".",
"value",
"is",
"None",
":",
"raise",
"exceptions",
".",
"AttributeInferenceError",
"(",
"target",
"=",
"self",
",",
"attribute",
"=",
"name",
",",
"context",
"=",
"context",
")",
"return",
"values"
] | 42.561404
| 22.315789
|
def leaves(value, prefix=None):
"""
LIKE items() BUT RECURSIVE, AND ONLY FOR THE LEAVES (non dict) VALUES
SEE wrap_leaves FOR THE INVERSE
:param value: THE Mapping TO TRAVERSE
:param prefix: OPTIONAL PREFIX GIVEN TO EACH KEY
:return: Data, WHICH EACH KEY BEING A PATH INTO value TREE
"""
prefix = coalesce(prefix, "")
output = []
for k, v in value.items():
try:
if _get(v, CLASS) in data_types:
output.extend(leaves(v, prefix=prefix + literal_field(k) + "."))
else:
output.append((prefix + literal_field(k), unwrap(v)))
except Exception as e:
get_logger().error("Do not know how to handle", cause=e)
return output
|
[
"def",
"leaves",
"(",
"value",
",",
"prefix",
"=",
"None",
")",
":",
"prefix",
"=",
"coalesce",
"(",
"prefix",
",",
"\"\"",
")",
"output",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"try",
":",
"if",
"_get",
"(",
"v",
",",
"CLASS",
")",
"in",
"data_types",
":",
"output",
".",
"extend",
"(",
"leaves",
"(",
"v",
",",
"prefix",
"=",
"prefix",
"+",
"literal_field",
"(",
"k",
")",
"+",
"\".\"",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"(",
"prefix",
"+",
"literal_field",
"(",
"k",
")",
",",
"unwrap",
"(",
"v",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"get_logger",
"(",
")",
".",
"error",
"(",
"\"Do not know how to handle\"",
",",
"cause",
"=",
"e",
")",
"return",
"output"
] | 36.2
| 17.5
|
async def handle_server_error(self, req, res, mw_generator, error, err_count=0):
"""
Entry point for handling an exception that occured during
execution of the middleware chain.
This loops through the middleware generator - which should
produce error-handling functions of signature `(req, res, error)`.
If an error occurs during this method,
There is currently no way to do error recovery of the middleware
chain.
Args:
req (growler.HTTPRequest): The incoming request, containing
all information about the client.
res (growler.HTTPResponse): The outgoing response, containing
methods for sending headers and data back to the client.
mw_generator (Generator): The generator producing middleware.
This has already been 'notified' of the error and should
now only yield error handling middleware.
error (Exception): The exception raised during middleware
processing.
err_count (int): A value for keeping track of recursive calls
to the error handler.
If this value equals self.error_recursion_max_depth, a
new exception is raised, potentially confusing everyone
involved.
"""
if err_count >= self.error_recursion_max_depth:
raise Exception("Too many exceptions:" + error)
for mw in mw_generator:
try:
if inspect.iscoroutinefunction(mw):
await mw(req, res, error)
else:
mw(req, res, error)
except Exception as new_error:
await self.handle_server_error(
req,
res,
mw_generator,
new_error,
err_count + 1,
)
finally:
if res.has_ended:
break
else:
self.default_error_handler(req, res, error)
if not res.has_ended: # noqa pragma: no cover
print("Default error handler did not send a response to "
"client!", file=sys.stderr)
|
[
"async",
"def",
"handle_server_error",
"(",
"self",
",",
"req",
",",
"res",
",",
"mw_generator",
",",
"error",
",",
"err_count",
"=",
"0",
")",
":",
"if",
"err_count",
">=",
"self",
".",
"error_recursion_max_depth",
":",
"raise",
"Exception",
"(",
"\"Too many exceptions:\"",
"+",
"error",
")",
"for",
"mw",
"in",
"mw_generator",
":",
"try",
":",
"if",
"inspect",
".",
"iscoroutinefunction",
"(",
"mw",
")",
":",
"await",
"mw",
"(",
"req",
",",
"res",
",",
"error",
")",
"else",
":",
"mw",
"(",
"req",
",",
"res",
",",
"error",
")",
"except",
"Exception",
"as",
"new_error",
":",
"await",
"self",
".",
"handle_server_error",
"(",
"req",
",",
"res",
",",
"mw_generator",
",",
"new_error",
",",
"err_count",
"+",
"1",
",",
")",
"finally",
":",
"if",
"res",
".",
"has_ended",
":",
"break",
"else",
":",
"self",
".",
"default_error_handler",
"(",
"req",
",",
"res",
",",
"error",
")",
"if",
"not",
"res",
".",
"has_ended",
":",
"# noqa pragma: no cover",
"print",
"(",
"\"Default error handler did not send a response to \"",
"\"client!\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
] | 39.767857
| 20.946429
|
def compile(self, limit=None, params=None):
"""
Compile expression to whatever execution target, to verify
Returns
-------
compiled : value or list
query representation or list thereof
"""
from ibis.client import compile
return compile(self, limit=limit, params=params)
|
[
"def",
"compile",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"from",
"ibis",
".",
"client",
"import",
"compile",
"return",
"compile",
"(",
"self",
",",
"limit",
"=",
"limit",
",",
"params",
"=",
"params",
")"
] | 27.916667
| 15.916667
|
def get_next_seed(key, seed):
"""This takes a seed and generates the next seed in the sequence.
it simply calculates the hmac of the seed with the key. It returns
the next seed
:param key: the key to use for the HMAC
:param seed: the seed to permutate
"""
return hmac.new(key, seed, hashlib.sha256).digest()
|
[
"def",
"get_next_seed",
"(",
"key",
",",
"seed",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"key",
",",
"seed",
",",
"hashlib",
".",
"sha256",
")",
".",
"digest",
"(",
")"
] | 39.666667
| 14.777778
|
def cancel(context, jobs, analysis_id):
"""Cancel all jobs in a run."""
analysis_obj = context.obj['store'].analysis(analysis_id)
if analysis_obj is None:
click.echo('analysis not found')
context.abort()
elif analysis_obj.status != 'running':
click.echo(f"analysis not running: {analysis_obj.status}")
context.abort()
config_path = Path(analysis_obj.config_path)
with config_path.open() as config_stream:
config_raw = ruamel.yaml.safe_load(config_stream)
config_data = parse_config(config_raw)
log_path = Path(f"{config_data['log_path']}")
if not log_path.exists():
click.echo(f"missing MIP log file: {log_path}")
context.abort()
with log_path.open() as log_stream:
all_jobs = job_ids(log_stream)
if jobs:
for job_id in all_jobs:
click.echo(job_id)
else:
for job_id in all_jobs:
LOG.debug(f"cancelling job: {job_id}")
process = subprocess.Popen(['scancel', job_id])
process.wait()
analysis_obj.status = 'canceled'
context.obj['store'].commit()
click.echo('cancelled analysis successfully!')
|
[
"def",
"cancel",
"(",
"context",
",",
"jobs",
",",
"analysis_id",
")",
":",
"analysis_obj",
"=",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"analysis",
"(",
"analysis_id",
")",
"if",
"analysis_obj",
"is",
"None",
":",
"click",
".",
"echo",
"(",
"'analysis not found'",
")",
"context",
".",
"abort",
"(",
")",
"elif",
"analysis_obj",
".",
"status",
"!=",
"'running'",
":",
"click",
".",
"echo",
"(",
"f\"analysis not running: {analysis_obj.status}\"",
")",
"context",
".",
"abort",
"(",
")",
"config_path",
"=",
"Path",
"(",
"analysis_obj",
".",
"config_path",
")",
"with",
"config_path",
".",
"open",
"(",
")",
"as",
"config_stream",
":",
"config_raw",
"=",
"ruamel",
".",
"yaml",
".",
"safe_load",
"(",
"config_stream",
")",
"config_data",
"=",
"parse_config",
"(",
"config_raw",
")",
"log_path",
"=",
"Path",
"(",
"f\"{config_data['log_path']}\"",
")",
"if",
"not",
"log_path",
".",
"exists",
"(",
")",
":",
"click",
".",
"echo",
"(",
"f\"missing MIP log file: {log_path}\"",
")",
"context",
".",
"abort",
"(",
")",
"with",
"log_path",
".",
"open",
"(",
")",
"as",
"log_stream",
":",
"all_jobs",
"=",
"job_ids",
"(",
"log_stream",
")",
"if",
"jobs",
":",
"for",
"job_id",
"in",
"all_jobs",
":",
"click",
".",
"echo",
"(",
"job_id",
")",
"else",
":",
"for",
"job_id",
"in",
"all_jobs",
":",
"LOG",
".",
"debug",
"(",
"f\"cancelling job: {job_id}\"",
")",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'scancel'",
",",
"job_id",
"]",
")",
"process",
".",
"wait",
"(",
")",
"analysis_obj",
".",
"status",
"=",
"'canceled'",
"context",
".",
"obj",
"[",
"'store'",
"]",
".",
"commit",
"(",
")",
"click",
".",
"echo",
"(",
"'cancelled analysis successfully!'",
")"
] | 33.171429
| 15.142857
|
def set_iprouting(self, value=None, default=False, disable=False):
"""Configures the state of global ip routing
EosVersion:
4.13.7M
Args:
value(bool): True if ip routing should be enabled or False if
ip routing should be disabled
default (bool): Controls the use of the default keyword
disable (bool): Controls the use of the no keyword
Returns:
bool: True if the commands completed successfully otherwise False
"""
if value is False:
disable = True
cmd = self.command_builder('ip routing', value=value, default=default,
disable=disable)
return self.configure(cmd)
|
[
"def",
"set_iprouting",
"(",
"self",
",",
"value",
"=",
"None",
",",
"default",
"=",
"False",
",",
"disable",
"=",
"False",
")",
":",
"if",
"value",
"is",
"False",
":",
"disable",
"=",
"True",
"cmd",
"=",
"self",
".",
"command_builder",
"(",
"'ip routing'",
",",
"value",
"=",
"value",
",",
"default",
"=",
"default",
",",
"disable",
"=",
"disable",
")",
"return",
"self",
".",
"configure",
"(",
"cmd",
")"
] | 36.75
| 22.3
|
def start_discovery(add_callback=None, remove_callback=None):
"""
Start discovering chromecasts on the network.
This method will start discovering chromecasts on a separate thread. When
a chromecast is discovered, the callback will be called with the
discovered chromecast's zeroconf name. This is the dictionary key to find
the chromecast metadata in listener.services.
This method returns the CastListener object and the zeroconf ServiceBrowser
object. The CastListener object will contain information for the discovered
chromecasts. To stop discovery, call the stop_discovery method with the
ServiceBrowser object.
"""
listener = CastListener(add_callback, remove_callback)
service_browser = False
try:
service_browser = zeroconf.ServiceBrowser(zeroconf.Zeroconf(),
"_googlecast._tcp.local.",
listener)
except (zeroconf.BadTypeInNameException,
NotImplementedError,
OSError,
socket.error,
zeroconf.NonUniqueNameException):
pass
return listener, service_browser
|
[
"def",
"start_discovery",
"(",
"add_callback",
"=",
"None",
",",
"remove_callback",
"=",
"None",
")",
":",
"listener",
"=",
"CastListener",
"(",
"add_callback",
",",
"remove_callback",
")",
"service_browser",
"=",
"False",
"try",
":",
"service_browser",
"=",
"zeroconf",
".",
"ServiceBrowser",
"(",
"zeroconf",
".",
"Zeroconf",
"(",
")",
",",
"\"_googlecast._tcp.local.\"",
",",
"listener",
")",
"except",
"(",
"zeroconf",
".",
"BadTypeInNameException",
",",
"NotImplementedError",
",",
"OSError",
",",
"socket",
".",
"error",
",",
"zeroconf",
".",
"NonUniqueNameException",
")",
":",
"pass",
"return",
"listener",
",",
"service_browser"
] | 41.642857
| 22.142857
|
def get(self, rule, default=None):
"""Return the existing version of the given rule. If the rule is
not present in the classifier set, return the default. If no
default was given, use None. This is useful for eliminating
duplicate copies of rules.
Usage:
unique_rule = model.get(possible_duplicate, possible_duplicate)
Arguments:
rule: The ClassifierRule instance which may be a duplicate of
another already contained in the classifier set.
default: The value returned if the rule is not a duplicate of
another already contained in the classifier set.
Return:
If the rule is a duplicate of another already contained in the
classifier set, the existing one is returned. Otherwise, the
value of default is returned.
"""
assert isinstance(rule, ClassifierRule)
if (rule.condition not in self._population or
rule.action not in self._population[rule.condition]):
return default
return self._population[rule.condition][rule.action]
|
[
"def",
"get",
"(",
"self",
",",
"rule",
",",
"default",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"rule",
",",
"ClassifierRule",
")",
"if",
"(",
"rule",
".",
"condition",
"not",
"in",
"self",
".",
"_population",
"or",
"rule",
".",
"action",
"not",
"in",
"self",
".",
"_population",
"[",
"rule",
".",
"condition",
"]",
")",
":",
"return",
"default",
"return",
"self",
".",
"_population",
"[",
"rule",
".",
"condition",
"]",
"[",
"rule",
".",
"action",
"]"
] | 44.96
| 22.36
|
def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
"""
if not self.__curcommand.iscomplete():
return True
ctype = self.__curcommand.get_type()
if ctype == "action" or \
(ctype == "control" and
not self.__curcommand.accept_children):
if testsemicolon:
self.__set_expected("semicolon")
return True
while self.__curcommand.parent:
cmd = self.__curcommand
self.__curcommand = self.__curcommand.parent
if self.__curcommand.get_type() in ["control", "test"]:
if self.__curcommand.iscomplete():
if self.__curcommand.get_type() == "control":
break
continue
if not self.__curcommand.check_next_arg("test", cmd, add=False):
return False
if not self.__curcommand.iscomplete():
if self.__curcommand.variable_args_nb:
self.__set_expected("comma", "right_parenthesis")
break
return True
|
[
"def",
"__check_command_completion",
"(",
"self",
",",
"testsemicolon",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"__curcommand",
".",
"iscomplete",
"(",
")",
":",
"return",
"True",
"ctype",
"=",
"self",
".",
"__curcommand",
".",
"get_type",
"(",
")",
"if",
"ctype",
"==",
"\"action\"",
"or",
"(",
"ctype",
"==",
"\"control\"",
"and",
"not",
"self",
".",
"__curcommand",
".",
"accept_children",
")",
":",
"if",
"testsemicolon",
":",
"self",
".",
"__set_expected",
"(",
"\"semicolon\"",
")",
"return",
"True",
"while",
"self",
".",
"__curcommand",
".",
"parent",
":",
"cmd",
"=",
"self",
".",
"__curcommand",
"self",
".",
"__curcommand",
"=",
"self",
".",
"__curcommand",
".",
"parent",
"if",
"self",
".",
"__curcommand",
".",
"get_type",
"(",
")",
"in",
"[",
"\"control\"",
",",
"\"test\"",
"]",
":",
"if",
"self",
".",
"__curcommand",
".",
"iscomplete",
"(",
")",
":",
"if",
"self",
".",
"__curcommand",
".",
"get_type",
"(",
")",
"==",
"\"control\"",
":",
"break",
"continue",
"if",
"not",
"self",
".",
"__curcommand",
".",
"check_next_arg",
"(",
"\"test\"",
",",
"cmd",
",",
"add",
"=",
"False",
")",
":",
"return",
"False",
"if",
"not",
"self",
".",
"__curcommand",
".",
"iscomplete",
"(",
")",
":",
"if",
"self",
".",
"__curcommand",
".",
"variable_args_nb",
":",
"self",
".",
"__set_expected",
"(",
"\"comma\"",
",",
"\"right_parenthesis\"",
")",
"break",
"return",
"True"
] | 40.609756
| 18.195122
|
def List(validator):
"""
Creates a validator that runs the given validator on every item in a list
or other collection. The validator can mutate the values.
Any raised errors will be collected into a single ``Invalid`` error. Their
paths will be replaced with the index of the item. Will raise an error if
the input value is not iterable.
"""
@wraps(List)
def built(value):
if not hasattr(value, '__iter__'):
raise Error("Must be a list")
invalid = Invalid()
for i, item in enumerate(value):
try:
value[i] = validator(item)
except Invalid as e:
for error in e:
error.path.insert(0, i)
invalid.append(error)
except Error as e:
e.path.insert(0, i)
invalid.append(e)
if len(invalid):
raise invalid
return value
return built
|
[
"def",
"List",
"(",
"validator",
")",
":",
"@",
"wraps",
"(",
"List",
")",
"def",
"built",
"(",
"value",
")",
":",
"if",
"not",
"hasattr",
"(",
"value",
",",
"'__iter__'",
")",
":",
"raise",
"Error",
"(",
"\"Must be a list\"",
")",
"invalid",
"=",
"Invalid",
"(",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"value",
")",
":",
"try",
":",
"value",
"[",
"i",
"]",
"=",
"validator",
"(",
"item",
")",
"except",
"Invalid",
"as",
"e",
":",
"for",
"error",
"in",
"e",
":",
"error",
".",
"path",
".",
"insert",
"(",
"0",
",",
"i",
")",
"invalid",
".",
"append",
"(",
"error",
")",
"except",
"Error",
"as",
"e",
":",
"e",
".",
"path",
".",
"insert",
"(",
"0",
",",
"i",
")",
"invalid",
".",
"append",
"(",
"e",
")",
"if",
"len",
"(",
"invalid",
")",
":",
"raise",
"invalid",
"return",
"value",
"return",
"built"
] | 31.266667
| 16
|
def release(major=False, minor=False, patch=True, pypi_index=None):
"""Overall process flow for performing a release"""
relver = next_release(major, minor, patch)
start_rel_branch(relver)
prepare_release(relver)
finish_rel_branch(relver)
publish(pypi_index)
|
[
"def",
"release",
"(",
"major",
"=",
"False",
",",
"minor",
"=",
"False",
",",
"patch",
"=",
"True",
",",
"pypi_index",
"=",
"None",
")",
":",
"relver",
"=",
"next_release",
"(",
"major",
",",
"minor",
",",
"patch",
")",
"start_rel_branch",
"(",
"relver",
")",
"prepare_release",
"(",
"relver",
")",
"finish_rel_branch",
"(",
"relver",
")",
"publish",
"(",
"pypi_index",
")"
] | 39.285714
| 12.285714
|
def assert_is_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is not a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) != -1)
), _assert_fail_message(message, substring, subject, "is not in", extra)
|
[
"def",
"assert_is_substring",
"(",
"substring",
",",
"subject",
",",
"message",
"=",
"None",
",",
"extra",
"=",
"None",
")",
":",
"assert",
"(",
"(",
"subject",
"is",
"not",
"None",
")",
"and",
"(",
"substring",
"is",
"not",
"None",
")",
"and",
"(",
"subject",
".",
"find",
"(",
"substring",
")",
"!=",
"-",
"1",
")",
")",
",",
"_assert_fail_message",
"(",
"message",
",",
"substring",
",",
"subject",
",",
"\"is not in\"",
",",
"extra",
")"
] | 49
| 16.142857
|
def as_struct_array(*columns):
"""pack a sequence of columns into a recarray
Parameters
----------
columns : sequence of key objects
Returns
-------
data : recarray
recarray containing the input columns as struct fields
"""
columns = [np.asarray(c) for c in columns]
rows = len(columns[0])
names = ['f'+str(i) for i in range(len(columns))]
dtype = [(names[i], c.dtype, c.shape[1:]) for i, c in enumerate(columns)]
data = np.empty(rows, dtype)
for i, c in enumerate(columns):
data[names[i]] = c
return data
|
[
"def",
"as_struct_array",
"(",
"*",
"columns",
")",
":",
"columns",
"=",
"[",
"np",
".",
"asarray",
"(",
"c",
")",
"for",
"c",
"in",
"columns",
"]",
"rows",
"=",
"len",
"(",
"columns",
"[",
"0",
"]",
")",
"names",
"=",
"[",
"'f'",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"columns",
")",
")",
"]",
"dtype",
"=",
"[",
"(",
"names",
"[",
"i",
"]",
",",
"c",
".",
"dtype",
",",
"c",
".",
"shape",
"[",
"1",
":",
"]",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"columns",
")",
"]",
"data",
"=",
"np",
".",
"empty",
"(",
"rows",
",",
"dtype",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"columns",
")",
":",
"data",
"[",
"names",
"[",
"i",
"]",
"]",
"=",
"c",
"return",
"data"
] | 26.857143
| 19.428571
|
def make_display_lines(self):
"""
生成输出行
注意: 多线程终端同时输出会有bug, 导致起始位置偏移, 需要在每行加\r
"""
self.screen_height, self.screen_width = self.linesnum() # 屏幕显示行数
display_lines = ['\r']
display_lines.append(self._title + '\r')
top = self.topline
bottom = self.topline + self.screen_height - 3
for index, i in enumerate(self._lines[top:bottom]):
# 箭头指向
if index == self.markline:
prefix = self._prefix_selected
i = color_func(self.c['LINE']['highlight'])(i)
else:
prefix = self._prefix_deselected
# 选择频道
if index + self.topline == self.displayline:
suffix = self._suffix_selected
else:
suffix = self._suffix_deselected
line = '%s %s %s' % (prefix, i, suffix)
line = color_func(self.c['LINE']['line'])(line)
display_lines.append(line + '\r')
return_num = self.screen_height - 3 - len(self._lines)
for _ in range(return_num):
display_lines.append('\r')
self.display_lines = display_lines
|
[
"def",
"make_display_lines",
"(",
"self",
")",
":",
"self",
".",
"screen_height",
",",
"self",
".",
"screen_width",
"=",
"self",
".",
"linesnum",
"(",
")",
"# 屏幕显示行数",
"display_lines",
"=",
"[",
"'\\r'",
"]",
"display_lines",
".",
"append",
"(",
"self",
".",
"_title",
"+",
"'\\r'",
")",
"top",
"=",
"self",
".",
"topline",
"bottom",
"=",
"self",
".",
"topline",
"+",
"self",
".",
"screen_height",
"-",
"3",
"for",
"index",
",",
"i",
"in",
"enumerate",
"(",
"self",
".",
"_lines",
"[",
"top",
":",
"bottom",
"]",
")",
":",
"# 箭头指向",
"if",
"index",
"==",
"self",
".",
"markline",
":",
"prefix",
"=",
"self",
".",
"_prefix_selected",
"i",
"=",
"color_func",
"(",
"self",
".",
"c",
"[",
"'LINE'",
"]",
"[",
"'highlight'",
"]",
")",
"(",
"i",
")",
"else",
":",
"prefix",
"=",
"self",
".",
"_prefix_deselected",
"# 选择频道",
"if",
"index",
"+",
"self",
".",
"topline",
"==",
"self",
".",
"displayline",
":",
"suffix",
"=",
"self",
".",
"_suffix_selected",
"else",
":",
"suffix",
"=",
"self",
".",
"_suffix_deselected",
"line",
"=",
"'%s %s %s'",
"%",
"(",
"prefix",
",",
"i",
",",
"suffix",
")",
"line",
"=",
"color_func",
"(",
"self",
".",
"c",
"[",
"'LINE'",
"]",
"[",
"'line'",
"]",
")",
"(",
"line",
")",
"display_lines",
".",
"append",
"(",
"line",
"+",
"'\\r'",
")",
"return_num",
"=",
"self",
".",
"screen_height",
"-",
"3",
"-",
"len",
"(",
"self",
".",
"_lines",
")",
"for",
"_",
"in",
"range",
"(",
"return_num",
")",
":",
"display_lines",
".",
"append",
"(",
"'\\r'",
")",
"self",
".",
"display_lines",
"=",
"display_lines"
] | 32.742857
| 17.314286
|
def new_tag(self, label, cfrom=-1, cto=-1, tagtype='', **kwargs):
''' Create a sentence-level tag '''
tag_obj = Tag(label, cfrom, cto, tagtype=tagtype, **kwargs)
return self.add_tag(tag_obj)
|
[
"def",
"new_tag",
"(",
"self",
",",
"label",
",",
"cfrom",
"=",
"-",
"1",
",",
"cto",
"=",
"-",
"1",
",",
"tagtype",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"tag_obj",
"=",
"Tag",
"(",
"label",
",",
"cfrom",
",",
"cto",
",",
"tagtype",
"=",
"tagtype",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"add_tag",
"(",
"tag_obj",
")"
] | 52.75
| 14.75
|
def _resolve_non_literal_route(self, method, path):
"""Resolve a request to a wildcard or regex route handler.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request.
"""
for route_dict in (self._wildcard, self._regex):
if method in route_dict:
for route in reversed(route_dict[method]):
callback_data = route.match(path)
if callback_data is not None:
return callback_data
return None
|
[
"def",
"_resolve_non_literal_route",
"(",
"self",
",",
"method",
",",
"path",
")",
":",
"for",
"route_dict",
"in",
"(",
"self",
".",
"_wildcard",
",",
"self",
".",
"_regex",
")",
":",
"if",
"method",
"in",
"route_dict",
":",
"for",
"route",
"in",
"reversed",
"(",
"route_dict",
"[",
"method",
"]",
")",
":",
"callback_data",
"=",
"route",
".",
"match",
"(",
"path",
")",
"if",
"callback_data",
"is",
"not",
"None",
":",
"return",
"callback_data",
"return",
"None"
] | 34.434783
| 15.347826
|
def get_icon_by_extension(fname, scale_factor):
"""Return the icon depending on the file extension"""
application_icons = {}
application_icons.update(BIN_FILES)
application_icons.update(DOCUMENT_FILES)
if osp.isdir(fname):
return icon('DirOpenIcon', scale_factor)
else:
basename = osp.basename(fname)
__, extension = osp.splitext(basename.lower())
mime_type, __ = mime.guess_type(basename)
icon_by_extension = icon('FileIcon', scale_factor)
if extension in OFFICE_FILES:
icon_by_extension = icon(OFFICE_FILES[extension], scale_factor)
if extension in LANGUAGE_ICONS:
icon_by_extension = icon(LANGUAGE_ICONS[extension], scale_factor)
else:
if extension == '.ipynb':
if is_dark_interface():
icon_by_extension = QIcon(
get_image_path('notebook_dark.svg'))
else:
icon_by_extension = QIcon(
get_image_path('notebook_light.svg'))
elif mime_type is not None:
try:
# Fix for issue 5080. Even though
# mimetypes.guess_type documentation states that
# the return value will be None or a tuple of
# the form type/subtype, in the Windows registry,
# .sql has a mimetype of text\plain
# instead of text/plain therefore mimetypes is
# returning it incorrectly.
file_type, bin_name = mime_type.split('/')
except ValueError:
file_type = 'text'
if file_type == 'text':
icon_by_extension = icon('TextFileIcon', scale_factor)
elif file_type == 'audio':
icon_by_extension = icon('AudioFileIcon', scale_factor)
elif file_type == 'video':
icon_by_extension = icon('VideoFileIcon', scale_factor)
elif file_type == 'image':
icon_by_extension = icon('ImageFileIcon', scale_factor)
elif file_type == 'application':
if bin_name in application_icons:
icon_by_extension = icon(
application_icons[bin_name], scale_factor)
return icon_by_extension
|
[
"def",
"get_icon_by_extension",
"(",
"fname",
",",
"scale_factor",
")",
":",
"application_icons",
"=",
"{",
"}",
"application_icons",
".",
"update",
"(",
"BIN_FILES",
")",
"application_icons",
".",
"update",
"(",
"DOCUMENT_FILES",
")",
"if",
"osp",
".",
"isdir",
"(",
"fname",
")",
":",
"return",
"icon",
"(",
"'DirOpenIcon'",
",",
"scale_factor",
")",
"else",
":",
"basename",
"=",
"osp",
".",
"basename",
"(",
"fname",
")",
"__",
",",
"extension",
"=",
"osp",
".",
"splitext",
"(",
"basename",
".",
"lower",
"(",
")",
")",
"mime_type",
",",
"__",
"=",
"mime",
".",
"guess_type",
"(",
"basename",
")",
"icon_by_extension",
"=",
"icon",
"(",
"'FileIcon'",
",",
"scale_factor",
")",
"if",
"extension",
"in",
"OFFICE_FILES",
":",
"icon_by_extension",
"=",
"icon",
"(",
"OFFICE_FILES",
"[",
"extension",
"]",
",",
"scale_factor",
")",
"if",
"extension",
"in",
"LANGUAGE_ICONS",
":",
"icon_by_extension",
"=",
"icon",
"(",
"LANGUAGE_ICONS",
"[",
"extension",
"]",
",",
"scale_factor",
")",
"else",
":",
"if",
"extension",
"==",
"'.ipynb'",
":",
"if",
"is_dark_interface",
"(",
")",
":",
"icon_by_extension",
"=",
"QIcon",
"(",
"get_image_path",
"(",
"'notebook_dark.svg'",
")",
")",
"else",
":",
"icon_by_extension",
"=",
"QIcon",
"(",
"get_image_path",
"(",
"'notebook_light.svg'",
")",
")",
"elif",
"mime_type",
"is",
"not",
"None",
":",
"try",
":",
"# Fix for issue 5080. Even though",
"# mimetypes.guess_type documentation states that",
"# the return value will be None or a tuple of",
"# the form type/subtype, in the Windows registry,",
"# .sql has a mimetype of text\\plain",
"# instead of text/plain therefore mimetypes is",
"# returning it incorrectly.",
"file_type",
",",
"bin_name",
"=",
"mime_type",
".",
"split",
"(",
"'/'",
")",
"except",
"ValueError",
":",
"file_type",
"=",
"'text'",
"if",
"file_type",
"==",
"'text'",
":",
"icon_by_extension",
"=",
"icon",
"(",
"'TextFileIcon'",
",",
"scale_factor",
")",
"elif",
"file_type",
"==",
"'audio'",
":",
"icon_by_extension",
"=",
"icon",
"(",
"'AudioFileIcon'",
",",
"scale_factor",
")",
"elif",
"file_type",
"==",
"'video'",
":",
"icon_by_extension",
"=",
"icon",
"(",
"'VideoFileIcon'",
",",
"scale_factor",
")",
"elif",
"file_type",
"==",
"'image'",
":",
"icon_by_extension",
"=",
"icon",
"(",
"'ImageFileIcon'",
",",
"scale_factor",
")",
"elif",
"file_type",
"==",
"'application'",
":",
"if",
"bin_name",
"in",
"application_icons",
":",
"icon_by_extension",
"=",
"icon",
"(",
"application_icons",
"[",
"bin_name",
"]",
",",
"scale_factor",
")",
"return",
"icon_by_extension"
] | 46.509804
| 15.588235
|
def get_requires_for_build_sdist(config_settings):
"""Invoke the optional get_requires_for_build_wheel hook
Returns [] if the hook is not defined.
"""
backend = _build_backend()
try:
hook = backend.get_requires_for_build_sdist
except AttributeError:
return []
else:
return hook(config_settings)
|
[
"def",
"get_requires_for_build_sdist",
"(",
"config_settings",
")",
":",
"backend",
"=",
"_build_backend",
"(",
")",
"try",
":",
"hook",
"=",
"backend",
".",
"get_requires_for_build_sdist",
"except",
"AttributeError",
":",
"return",
"[",
"]",
"else",
":",
"return",
"hook",
"(",
"config_settings",
")"
] | 28
| 14.75
|
def find_files_cmd(data_path, minutes, start_time, end_time):
"""Find the log files depending on their modification time.
:param data_path: the path to the Kafka data directory
:type data_path: str
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the find command
:rtype: str
"""
if minutes:
return FIND_MINUTES_COMMAND.format(
data_path=data_path,
minutes=minutes,
)
if start_time:
if end_time:
return FIND_RANGE_COMMAND.format(
data_path=data_path,
start_time=start_time,
end_time=end_time,
)
else:
return FIND_START_COMMAND.format(
data_path=data_path,
start_time=start_time,
)
|
[
"def",
"find_files_cmd",
"(",
"data_path",
",",
"minutes",
",",
"start_time",
",",
"end_time",
")",
":",
"if",
"minutes",
":",
"return",
"FIND_MINUTES_COMMAND",
".",
"format",
"(",
"data_path",
"=",
"data_path",
",",
"minutes",
"=",
"minutes",
",",
")",
"if",
"start_time",
":",
"if",
"end_time",
":",
"return",
"FIND_RANGE_COMMAND",
".",
"format",
"(",
"data_path",
"=",
"data_path",
",",
"start_time",
"=",
"start_time",
",",
"end_time",
"=",
"end_time",
",",
")",
"else",
":",
"return",
"FIND_START_COMMAND",
".",
"format",
"(",
"data_path",
"=",
"data_path",
",",
"start_time",
"=",
"start_time",
",",
")"
] | 32.096774
| 15.516129
|
def assign(self, link_type, product, linked_product, data=None,
identifierType=None):
"""
Assign a product link
:param link_type: type of link, one of 'cross_sell', 'up_sell',
'related' or 'grouped'
:param product: ID or SKU of product
:param linked_product: ID or SKU of linked product
:param data: dictionary of link data, (position, qty, etc.)
Example: { 'position': '0', 'qty': 1}
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: boolean
"""
return bool(self.call('catalog_product_link.assign',
[link_type, product, linked_product, data, identifierType]))
|
[
"def",
"assign",
"(",
"self",
",",
"link_type",
",",
"product",
",",
"linked_product",
",",
"data",
"=",
"None",
",",
"identifierType",
"=",
"None",
")",
":",
"return",
"bool",
"(",
"self",
".",
"call",
"(",
"'catalog_product_link.assign'",
",",
"[",
"link_type",
",",
"product",
",",
"linked_product",
",",
"data",
",",
"identifierType",
"]",
")",
")"
] | 43.333333
| 19.111111
|
def hex(x):
'''
x-->bytes | bytearray
Returns-->bytes: hex-encoded
'''
if isinstance(x, bytearray):
x = bytes(x)
return encode(x, 'hex')
|
[
"def",
"hex",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"bytearray",
")",
":",
"x",
"=",
"bytes",
"(",
"x",
")",
"return",
"encode",
"(",
"x",
",",
"'hex'",
")"
] | 20.125
| 19.875
|
def raw_messages(self):
'''A generator yielding a :class:`MacMailMessage` binary
object for each message in this folder, based on message index
information in :class:`MacIndex` and content in
:class:`MacMail`.'''
if self.data:
# offset for first message, at end of Mail data file header
last_offset = 24
self.skipped_chunks = 0
for msginfo in self.index.messages:
msg = self.data.get_message(msginfo.offset, msginfo.size)
# Index file seems to references messages in order by
# offset; check for data skipped between messages.
if msginfo.offset > last_offset:
logger.debug('Skipped %d bytes between %s (%s) and %s (%s)',
msginfo.offset - last_offset,
last_offset, hex(last_offset),
msginfo.offset, hex(msginfo.offset))
self.skipped_chunks += 1
last_offset = msginfo.offset + msginfo.size
yield msg
|
[
"def",
"raw_messages",
"(",
"self",
")",
":",
"if",
"self",
".",
"data",
":",
"# offset for first message, at end of Mail data file header",
"last_offset",
"=",
"24",
"self",
".",
"skipped_chunks",
"=",
"0",
"for",
"msginfo",
"in",
"self",
".",
"index",
".",
"messages",
":",
"msg",
"=",
"self",
".",
"data",
".",
"get_message",
"(",
"msginfo",
".",
"offset",
",",
"msginfo",
".",
"size",
")",
"# Index file seems to references messages in order by",
"# offset; check for data skipped between messages.",
"if",
"msginfo",
".",
"offset",
">",
"last_offset",
":",
"logger",
".",
"debug",
"(",
"'Skipped %d bytes between %s (%s) and %s (%s)'",
",",
"msginfo",
".",
"offset",
"-",
"last_offset",
",",
"last_offset",
",",
"hex",
"(",
"last_offset",
")",
",",
"msginfo",
".",
"offset",
",",
"hex",
"(",
"msginfo",
".",
"offset",
")",
")",
"self",
".",
"skipped_chunks",
"+=",
"1",
"last_offset",
"=",
"msginfo",
".",
"offset",
"+",
"msginfo",
".",
"size",
"yield",
"msg"
] | 45.083333
| 21.75
|
def directional_poisson_ratio(self, n, m, tol=1e-8):
"""
Calculates the poisson ratio for a specific direction
relative to a second, orthogonal direction
Args:
n (3-d vector): principal direction
m (3-d vector): secondary direction orthogonal to n
tol (float): tolerance for testing of orthogonality
"""
n, m = get_uvec(n), get_uvec(m)
if not np.abs(np.dot(n, m)) < tol:
raise ValueError("n and m must be orthogonal")
v = self.compliance_tensor.einsum_sequence([n]*2 + [m]*2)
v *= -1 / self.compliance_tensor.einsum_sequence([n]*4)
return v
|
[
"def",
"directional_poisson_ratio",
"(",
"self",
",",
"n",
",",
"m",
",",
"tol",
"=",
"1e-8",
")",
":",
"n",
",",
"m",
"=",
"get_uvec",
"(",
"n",
")",
",",
"get_uvec",
"(",
"m",
")",
"if",
"not",
"np",
".",
"abs",
"(",
"np",
".",
"dot",
"(",
"n",
",",
"m",
")",
")",
"<",
"tol",
":",
"raise",
"ValueError",
"(",
"\"n and m must be orthogonal\"",
")",
"v",
"=",
"self",
".",
"compliance_tensor",
".",
"einsum_sequence",
"(",
"[",
"n",
"]",
"*",
"2",
"+",
"[",
"m",
"]",
"*",
"2",
")",
"v",
"*=",
"-",
"1",
"/",
"self",
".",
"compliance_tensor",
".",
"einsum_sequence",
"(",
"[",
"n",
"]",
"*",
"4",
")",
"return",
"v"
] | 40.875
| 16
|
def modules_and_args(modules=True, states=False, names_only=False):
'''
Walk the Salt install tree and return a dictionary or a list
of the functions therein as well as their arguments.
:param modules: Walk the modules directory if True
:param states: Walk the states directory if True
:param names_only: Return only a list of the callable functions instead of a dictionary with arguments
:return: An OrderedDict with callable function names as keys and lists of arguments as
values (if ``names_only``==False) or simply an ordered list of callable
function nanes (if ``names_only``==True).
CLI Example:
(example truncated for brevity)
.. code-block:: bash
salt myminion baredoc.modules_and_args
myminion:
----------
[...]
at.atrm:
at.jobcheck:
at.mod_watch:
- name
at.present:
- unique_tag
- name
- timespec
- job
- tag
- user
at.watch:
- unique_tag
- name
- timespec
- job
- tag
- user
[...]
'''
dirs = []
module_dir = os.path.dirname(os.path.realpath(__file__))
state_dir = os.path.join(os.path.dirname(module_dir), 'states')
if modules:
dirs.append(module_dir)
if states:
dirs.append(state_dir)
ret = _mods_with_args(dirs)
if names_only:
return sorted(ret.keys())
else:
return OrderedDict(sorted(ret.items()))
|
[
"def",
"modules_and_args",
"(",
"modules",
"=",
"True",
",",
"states",
"=",
"False",
",",
"names_only",
"=",
"False",
")",
":",
"dirs",
"=",
"[",
"]",
"module_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"state_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"module_dir",
")",
",",
"'states'",
")",
"if",
"modules",
":",
"dirs",
".",
"append",
"(",
"module_dir",
")",
"if",
"states",
":",
"dirs",
".",
"append",
"(",
"state_dir",
")",
"ret",
"=",
"_mods_with_args",
"(",
"dirs",
")",
"if",
"names_only",
":",
"return",
"sorted",
"(",
"ret",
".",
"keys",
"(",
")",
")",
"else",
":",
"return",
"OrderedDict",
"(",
"sorted",
"(",
"ret",
".",
"items",
"(",
")",
")",
")"
] | 28.75
| 22.928571
|
def get_media_list(self, media_type, offset, count):
"""
获取素材列表。
:param media_type: 素材的类型,图片(image)、视频(video)、语音 (voice)、图文(news)
:param offset: 从全部素材的该偏移位置开始返回,0表示从第一个素材返回
:param count: 返回素材的数量,取值在1到20之间
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/material/batchget_material",
data={
"type": media_type,
"offset": offset,
"count": count
}
)
|
[
"def",
"get_media_list",
"(",
"self",
",",
"media_type",
",",
"offset",
",",
"count",
")",
":",
"return",
"self",
".",
"post",
"(",
"url",
"=",
"\"https://api.weixin.qq.com/cgi-bin/material/batchget_material\"",
",",
"data",
"=",
"{",
"\"type\"",
":",
"media_type",
",",
"\"offset\"",
":",
"offset",
",",
"\"count\"",
":",
"count",
"}",
")"
] | 30.647059
| 16.882353
|
def sum_over_energy(self):
""" Reduce a 3D counts cube to a 2D counts map
"""
# Note that the array is using the opposite convention from WCS
# so we sum over axis 0 in the array, but drop axis 2 in the WCS object
return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))
|
[
"def",
"sum_over_energy",
"(",
"self",
")",
":",
"# Note that the array is using the opposite convention from WCS",
"# so we sum over axis 0 in the array, but drop axis 2 in the WCS object",
"return",
"Map",
"(",
"np",
".",
"sum",
"(",
"self",
".",
"counts",
",",
"axis",
"=",
"0",
")",
",",
"self",
".",
"wcs",
".",
"dropaxis",
"(",
"2",
")",
")"
] | 51.666667
| 18.833333
|
def load(self, commit=None):
"""Load a result from the database."""
git_info = self.record_git_info(commit)
LOGGER.info("Loading result from '%s'.", git_info.hexsha)
result = MemoteResult(
self.session.query(Result.memote_result).
filter_by(hexsha=git_info.hexsha).
one().memote_result)
# Add git info so the object is equivalent to the one returned by the
# RepoResultManager.
self.add_git(result.meta, git_info)
return result
|
[
"def",
"load",
"(",
"self",
",",
"commit",
"=",
"None",
")",
":",
"git_info",
"=",
"self",
".",
"record_git_info",
"(",
"commit",
")",
"LOGGER",
".",
"info",
"(",
"\"Loading result from '%s'.\"",
",",
"git_info",
".",
"hexsha",
")",
"result",
"=",
"MemoteResult",
"(",
"self",
".",
"session",
".",
"query",
"(",
"Result",
".",
"memote_result",
")",
".",
"filter_by",
"(",
"hexsha",
"=",
"git_info",
".",
"hexsha",
")",
".",
"one",
"(",
")",
".",
"memote_result",
")",
"# Add git info so the object is equivalent to the one returned by the",
"# RepoResultManager.",
"self",
".",
"add_git",
"(",
"result",
".",
"meta",
",",
"git_info",
")",
"return",
"result"
] | 43.083333
| 12.583333
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.