text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
|---|---|---|---|
def timed_call(self, ms, callback, *args, **kwargs):
""" Invoke a callable on the main event loop thread at a
specified time in the future.
Parameters
----------
ms : int
The time to delay, in milliseconds, before executing the
callable.
callback : callable
The callable object to execute at some point in the future.
*args, **kwargs
Any additional positional and keyword arguments to pass to
the callback.
"""
return self.loop.timed_call(ms, callback, *args, **kwargs)
|
[
"def",
"timed_call",
"(",
"self",
",",
"ms",
",",
"callback",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"loop",
".",
"timed_call",
"(",
"ms",
",",
"callback",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 30.894737
| 22.210526
|
def do_it(self, dbg):
''' Converts request into python variable '''
try:
try:
# don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
|
[
"def",
"do_it",
"(",
"self",
",",
"dbg",
")",
":",
"try",
":",
"try",
":",
"# don't trace new threads created by console command",
"disable_trace_thread_modules",
"(",
")",
"result",
"=",
"pydevconsole",
".",
"console_exec",
"(",
"self",
".",
"thread_id",
",",
"self",
".",
"frame_id",
",",
"self",
".",
"expression",
",",
"dbg",
")",
"xml",
"=",
"\"<xml>\"",
"xml",
"+=",
"pydevd_xml",
".",
"var_to_xml",
"(",
"result",
",",
"\"\"",
")",
"xml",
"+=",
"\"</xml>\"",
"cmd",
"=",
"dbg",
".",
"cmd_factory",
".",
"make_evaluate_expression_message",
"(",
"self",
".",
"sequence",
",",
"xml",
")",
"dbg",
".",
"writer",
".",
"add_command",
"(",
"cmd",
")",
"except",
":",
"exc",
"=",
"get_exception_traceback_str",
"(",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"'%s\\n'",
"%",
"(",
"exc",
",",
")",
")",
"cmd",
"=",
"dbg",
".",
"cmd_factory",
".",
"make_error_message",
"(",
"self",
".",
"sequence",
",",
"\"Error evaluating console expression \"",
"+",
"exc",
")",
"dbg",
".",
"writer",
".",
"add_command",
"(",
"cmd",
")",
"finally",
":",
"enable_trace_thread_modules",
"(",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | 41.913043
| 22.434783
|
def get_conn(self):
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
:rtype: google.cloud.vision_v1.ProductSearchClient
"""
if not self._client:
self._client = ProductSearchClient(credentials=self._get_credentials())
return self._client
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_client",
":",
"self",
".",
"_client",
"=",
"ProductSearchClient",
"(",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
")",
"return",
"self",
".",
"_client"
] | 33.3
| 16.3
|
def stringify_device_meta(device_object):
""" Input: Portals device object.
Output: The same device object with the device meta
converted to a python string. """
try:
if isinstance(device_object['info']['description']['meta'], dict):
device_object['info']['description']['meta'] =\
json.dumps(device_object['info']['description']['meta'])
except ValueError as err:
print("stringify: {0}".format(err))
return device_object
|
[
"def",
"stringify_device_meta",
"(",
"device_object",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"device_object",
"[",
"'info'",
"]",
"[",
"'description'",
"]",
"[",
"'meta'",
"]",
",",
"dict",
")",
":",
"device_object",
"[",
"'info'",
"]",
"[",
"'description'",
"]",
"[",
"'meta'",
"]",
"=",
"json",
".",
"dumps",
"(",
"device_object",
"[",
"'info'",
"]",
"[",
"'description'",
"]",
"[",
"'meta'",
"]",
")",
"except",
"ValueError",
"as",
"err",
":",
"print",
"(",
"\"stringify: {0}\"",
".",
"format",
"(",
"err",
")",
")",
"return",
"device_object"
] | 45
| 15.181818
|
def str_to_bool(value):
"""
Converts string truthy/falsey strings to a bool
Empty strings are false
"""
if isinstance(value, basestring):
value = value.strip().lower()
if value in ['true', 't', 'yes', 'y']:
return True
elif value in ['false', 'f', 'no', 'n', '']:
return False
else:
raise NotImplementedError("Unknown bool %s" % value)
return value
|
[
"def",
"str_to_bool",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"value",
"=",
"value",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"value",
"in",
"[",
"'true'",
",",
"'t'",
",",
"'yes'",
",",
"'y'",
"]",
":",
"return",
"True",
"elif",
"value",
"in",
"[",
"'false'",
",",
"'f'",
",",
"'no'",
",",
"'n'",
",",
"''",
"]",
":",
"return",
"False",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Unknown bool %s\"",
"%",
"value",
")",
"return",
"value"
] | 28.466667
| 14.2
|
def save_or_update(self, cluster):
"""Save or update the cluster to persistent state.
:param cluster: cluster to save or update
:type cluster: :py:class:`elasticluster.cluster.Cluster`
"""
if not os.path.exists(self.storage_path):
os.makedirs(self.storage_path)
path = self._get_cluster_storage_path(cluster.name)
cluster.storage_file = path
with open(path, 'wb') as storage:
self.dump(cluster, storage)
|
[
"def",
"save_or_update",
"(",
"self",
",",
"cluster",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"storage_path",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"storage_path",
")",
"path",
"=",
"self",
".",
"_get_cluster_storage_path",
"(",
"cluster",
".",
"name",
")",
"cluster",
".",
"storage_file",
"=",
"path",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"storage",
":",
"self",
".",
"dump",
"(",
"cluster",
",",
"storage",
")"
] | 37
| 12
|
def async_process(fn):
""" Decorator function to launch a function as a separate process """
def run(*args, **kwargs):
proc = mp.Process(target=fn, args=args, kwargs=kwargs)
proc.start()
return proc
return run
|
[
"def",
"async_process",
"(",
"fn",
")",
":",
"def",
"run",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"proc",
"=",
"mp",
".",
"Process",
"(",
"target",
"=",
"fn",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"proc",
".",
"start",
"(",
")",
"return",
"proc",
"return",
"run"
] | 26.555556
| 22
|
def run(self, command, variables=[], board=None, packages=[]):
"""Executes scons for building"""
# -- Check for the SConstruct file
if not isfile(util.safe_join(util.get_project_dir(), 'SConstruct')):
variables += ['-f']
variables += [util.safe_join(
util.get_folder('resources'), 'SConstruct')]
else:
click.secho('Info: use custom SConstruct file')
# -- Resolve packages
if self.profile.check_exe_default():
# Run on `default` config mode
if not util.resolve_packages(
packages,
self.profile.packages,
self.resources.distribution.get('packages')
):
# Exit if a package is not installed
raise Exception
else:
click.secho('Info: native config mode')
# -- Execute scons
return self._execute_scons(command, variables, board)
|
[
"def",
"run",
"(",
"self",
",",
"command",
",",
"variables",
"=",
"[",
"]",
",",
"board",
"=",
"None",
",",
"packages",
"=",
"[",
"]",
")",
":",
"# -- Check for the SConstruct file",
"if",
"not",
"isfile",
"(",
"util",
".",
"safe_join",
"(",
"util",
".",
"get_project_dir",
"(",
")",
",",
"'SConstruct'",
")",
")",
":",
"variables",
"+=",
"[",
"'-f'",
"]",
"variables",
"+=",
"[",
"util",
".",
"safe_join",
"(",
"util",
".",
"get_folder",
"(",
"'resources'",
")",
",",
"'SConstruct'",
")",
"]",
"else",
":",
"click",
".",
"secho",
"(",
"'Info: use custom SConstruct file'",
")",
"# -- Resolve packages",
"if",
"self",
".",
"profile",
".",
"check_exe_default",
"(",
")",
":",
"# Run on `default` config mode",
"if",
"not",
"util",
".",
"resolve_packages",
"(",
"packages",
",",
"self",
".",
"profile",
".",
"packages",
",",
"self",
".",
"resources",
".",
"distribution",
".",
"get",
"(",
"'packages'",
")",
")",
":",
"# Exit if a package is not installed",
"raise",
"Exception",
"else",
":",
"click",
".",
"secho",
"(",
"'Info: native config mode'",
")",
"# -- Execute scons",
"return",
"self",
".",
"_execute_scons",
"(",
"command",
",",
"variables",
",",
"board",
")"
] | 36.384615
| 16.576923
|
async def get_details(self):
"""Get machine details information.
:returns: Mapping of hardware details.
"""
data = await self._handler.details(system_id=self.system_id)
return bson.decode_all(data)[0]
|
[
"async",
"def",
"get_details",
"(",
"self",
")",
":",
"data",
"=",
"await",
"self",
".",
"_handler",
".",
"details",
"(",
"system_id",
"=",
"self",
".",
"system_id",
")",
"return",
"bson",
".",
"decode_all",
"(",
"data",
")",
"[",
"0",
"]"
] | 33.571429
| 12.428571
|
def new_graph(self, name, data=None, **attr):
"""Return a new instance of type Graph, initialized with the given
data if provided.
:arg name: a name for the graph
:arg data: dictionary or NetworkX graph object providing initial state
"""
self._init_graph(name, 'Graph')
g = Graph(self, name, data, **attr)
self._graph_objs[name] = g
return g
|
[
"def",
"new_graph",
"(",
"self",
",",
"name",
",",
"data",
"=",
"None",
",",
"*",
"*",
"attr",
")",
":",
"self",
".",
"_init_graph",
"(",
"name",
",",
"'Graph'",
")",
"g",
"=",
"Graph",
"(",
"self",
",",
"name",
",",
"data",
",",
"*",
"*",
"attr",
")",
"self",
".",
"_graph_objs",
"[",
"name",
"]",
"=",
"g",
"return",
"g"
] | 33.666667
| 14.416667
|
def mountain_car_trajectories(num_traj):
'''Collect data using random hard-coded policies on MountainCar.
num_traj : int, number of trajectories to collect
Returns (trajectories, traces)
'''
domain = MountainCar()
slopes = np.random.normal(0, 0.01, size=num_traj)
v0s = np.random.normal(0, 0.005, size=num_traj)
trajectories = []
traces = []
norm = np.array((domain.MAX_POS-domain.MIN_POS,
domain.MAX_VEL-domain.MIN_VEL))
for m,b in zip(slopes, v0s):
mcar_policy = lambda s: 0 if s[0]*m + s[1] + b > 0 else 2
start = (np.random.uniform(domain.MIN_POS,domain.MAX_POS),
np.random.uniform(domain.MIN_VEL,domain.MAX_VEL))
samples = _run_episode(mcar_policy, domain, start, max_iters=40)
# normalize
samples.state /= norm
samples.next_state /= norm
traces.append(samples)
if samples.reward[-1] == 0:
# Don't include the warp to the final state.
trajectories.append(samples.state[:-1])
else:
trajectories.append(samples.state)
return trajectories, traces
|
[
"def",
"mountain_car_trajectories",
"(",
"num_traj",
")",
":",
"domain",
"=",
"MountainCar",
"(",
")",
"slopes",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"0",
",",
"0.01",
",",
"size",
"=",
"num_traj",
")",
"v0s",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"0",
",",
"0.005",
",",
"size",
"=",
"num_traj",
")",
"trajectories",
"=",
"[",
"]",
"traces",
"=",
"[",
"]",
"norm",
"=",
"np",
".",
"array",
"(",
"(",
"domain",
".",
"MAX_POS",
"-",
"domain",
".",
"MIN_POS",
",",
"domain",
".",
"MAX_VEL",
"-",
"domain",
".",
"MIN_VEL",
")",
")",
"for",
"m",
",",
"b",
"in",
"zip",
"(",
"slopes",
",",
"v0s",
")",
":",
"mcar_policy",
"=",
"lambda",
"s",
":",
"0",
"if",
"s",
"[",
"0",
"]",
"*",
"m",
"+",
"s",
"[",
"1",
"]",
"+",
"b",
">",
"0",
"else",
"2",
"start",
"=",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"domain",
".",
"MIN_POS",
",",
"domain",
".",
"MAX_POS",
")",
",",
"np",
".",
"random",
".",
"uniform",
"(",
"domain",
".",
"MIN_VEL",
",",
"domain",
".",
"MAX_VEL",
")",
")",
"samples",
"=",
"_run_episode",
"(",
"mcar_policy",
",",
"domain",
",",
"start",
",",
"max_iters",
"=",
"40",
")",
"# normalize",
"samples",
".",
"state",
"/=",
"norm",
"samples",
".",
"next_state",
"/=",
"norm",
"traces",
".",
"append",
"(",
"samples",
")",
"if",
"samples",
".",
"reward",
"[",
"-",
"1",
"]",
"==",
"0",
":",
"# Don't include the warp to the final state.",
"trajectories",
".",
"append",
"(",
"samples",
".",
"state",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"trajectories",
".",
"append",
"(",
"samples",
".",
"state",
")",
"return",
"trajectories",
",",
"traces"
] | 34.4
| 17.866667
|
def git_pull(repo_dir, remote="origin", ref=None, update_head_ok=False):
"""Do a git pull of `ref` from `remote`."""
command = ['git', 'pull']
if update_head_ok:
command.append('--update-head-ok')
command.append(pipes.quote(remote))
if ref:
command.append(ref)
return execute_git_command(command, repo_dir=repo_dir)
|
[
"def",
"git_pull",
"(",
"repo_dir",
",",
"remote",
"=",
"\"origin\"",
",",
"ref",
"=",
"None",
",",
"update_head_ok",
"=",
"False",
")",
":",
"command",
"=",
"[",
"'git'",
",",
"'pull'",
"]",
"if",
"update_head_ok",
":",
"command",
".",
"append",
"(",
"'--update-head-ok'",
")",
"command",
".",
"append",
"(",
"pipes",
".",
"quote",
"(",
"remote",
")",
")",
"if",
"ref",
":",
"command",
".",
"append",
"(",
"ref",
")",
"return",
"execute_git_command",
"(",
"command",
",",
"repo_dir",
"=",
"repo_dir",
")"
] | 38.555556
| 13.777778
|
def task_list(task_array):
"""Return a task list.
The task_array should be 2-dimensional; the first item should be the task
text, and the second the boolean completion state.
>>> task_list([["Be born", True], ["Be dead", False]])
'- [X] Be born\\n- [ ] Be dead'
When displayed using `print`, this will appear as:
- [X] Be born
- [ ] Be dead
"""
tasks = []
for item, completed in task_array:
task = "- [ ] " + esc_format(item)
if completed:
task = task[:3] + "X" + task[4:]
tasks.append(task)
return "\n".join(tasks)
|
[
"def",
"task_list",
"(",
"task_array",
")",
":",
"tasks",
"=",
"[",
"]",
"for",
"item",
",",
"completed",
"in",
"task_array",
":",
"task",
"=",
"\"- [ ] \"",
"+",
"esc_format",
"(",
"item",
")",
"if",
"completed",
":",
"task",
"=",
"task",
"[",
":",
"3",
"]",
"+",
"\"X\"",
"+",
"task",
"[",
"4",
":",
"]",
"tasks",
".",
"append",
"(",
"task",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"tasks",
")"
] | 28.142857
| 18.095238
|
def flush(self):
"""Flush message queue if there's an active connection running"""
self._pending_flush = False
if self.handler is None or not self.handler.active or not self.send_queue:
return
self.handler.send_pack('a[%s]' % self.send_queue)
self.send_queue = ''
|
[
"def",
"flush",
"(",
"self",
")",
":",
"self",
".",
"_pending_flush",
"=",
"False",
"if",
"self",
".",
"handler",
"is",
"None",
"or",
"not",
"self",
".",
"handler",
".",
"active",
"or",
"not",
"self",
".",
"send_queue",
":",
"return",
"self",
".",
"handler",
".",
"send_pack",
"(",
"'a[%s]'",
"%",
"self",
".",
"send_queue",
")",
"self",
".",
"send_queue",
"=",
"''"
] | 34.333333
| 22.444444
|
def _run_eos_cmds(self, commands, commands_to_log=None):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_log : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
"""
# Always figure out who is master (starting with the last known val)
try:
if self._get_eos_master() is None:
msg = "Failed to identify CVX master"
self.set_cvx_unavailable()
raise arista_exc.AristaRpcError(msg=msg)
except Exception:
self.set_cvx_unavailable()
raise
self.set_cvx_available()
log_cmds = commands
if commands_to_log:
log_cmds = commands_to_log
LOG.info(_LI('Executing command on Arista EOS: %s'), log_cmds)
# this returns array of return values for every command in
# full_command list
try:
response = self._send_eapi_req(cmds=commands,
commands_to_log=log_cmds)
if response is None:
# Reset the server as we failed communicating with it
self._server_ip = None
self.set_cvx_unavailable()
msg = "Failed to communicate with CVX master"
raise arista_exc.AristaRpcError(msg=msg)
return response
except arista_exc.AristaRpcError:
raise
|
[
"def",
"_run_eos_cmds",
"(",
"self",
",",
"commands",
",",
"commands_to_log",
"=",
"None",
")",
":",
"# Always figure out who is master (starting with the last known val)",
"try",
":",
"if",
"self",
".",
"_get_eos_master",
"(",
")",
"is",
"None",
":",
"msg",
"=",
"\"Failed to identify CVX master\"",
"self",
".",
"set_cvx_unavailable",
"(",
")",
"raise",
"arista_exc",
".",
"AristaRpcError",
"(",
"msg",
"=",
"msg",
")",
"except",
"Exception",
":",
"self",
".",
"set_cvx_unavailable",
"(",
")",
"raise",
"self",
".",
"set_cvx_available",
"(",
")",
"log_cmds",
"=",
"commands",
"if",
"commands_to_log",
":",
"log_cmds",
"=",
"commands_to_log",
"LOG",
".",
"info",
"(",
"_LI",
"(",
"'Executing command on Arista EOS: %s'",
")",
",",
"log_cmds",
")",
"# this returns array of return values for every command in",
"# full_command list",
"try",
":",
"response",
"=",
"self",
".",
"_send_eapi_req",
"(",
"cmds",
"=",
"commands",
",",
"commands_to_log",
"=",
"log_cmds",
")",
"if",
"response",
"is",
"None",
":",
"# Reset the server as we failed communicating with it",
"self",
".",
"_server_ip",
"=",
"None",
"self",
".",
"set_cvx_unavailable",
"(",
")",
"msg",
"=",
"\"Failed to communicate with CVX master\"",
"raise",
"arista_exc",
".",
"AristaRpcError",
"(",
"msg",
"=",
"msg",
")",
"return",
"response",
"except",
"arista_exc",
".",
"AristaRpcError",
":",
"raise"
] | 40.166667
| 18.97619
|
def _RunScript(self, script):
"""Run a script and log the streamed script output.
Args:
script: string, the file location of an executable script.
"""
process = subprocess.Popen(
script, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
while True:
for line in iter(process.stdout.readline, b''):
self.logger.info(line.decode('utf-8').rstrip('\n'))
if process.poll() is not None:
break
|
[
"def",
"_RunScript",
"(",
"self",
",",
"script",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"script",
",",
"shell",
"=",
"True",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"while",
"True",
":",
"for",
"line",
"in",
"iter",
"(",
"process",
".",
"stdout",
".",
"readline",
",",
"b''",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"line",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"rstrip",
"(",
"'\\n'",
")",
")",
"if",
"process",
".",
"poll",
"(",
")",
"is",
"not",
"None",
":",
"break"
] | 34.461538
| 18.461538
|
def calcdelay(self, ant1, ant2, skyfreq, pol):
""" Calculates the relative delay (d1-d2) for a pair of antennas in ns.
"""
select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]]
ind1 = n.where(ant1 == self.antnum[select])
ind2 = n.where(ant2 == self.antnum[select])
d1 = self.delay[select][ind1]
d2 = self.delay[select][ind2]
if len(d1-d2) > 0:
return d1-d2
else:
return n.array([0])
|
[
"def",
"calcdelay",
"(",
"self",
",",
"ant1",
",",
"ant2",
",",
"skyfreq",
",",
"pol",
")",
":",
"select",
"=",
"self",
".",
"select",
"[",
"n",
".",
"where",
"(",
"(",
"self",
".",
"skyfreq",
"[",
"self",
".",
"select",
"]",
"==",
"skyfreq",
")",
"&",
"(",
"self",
".",
"polarization",
"[",
"self",
".",
"select",
"]",
"==",
"pol",
")",
")",
"[",
"0",
"]",
"]",
"ind1",
"=",
"n",
".",
"where",
"(",
"ant1",
"==",
"self",
".",
"antnum",
"[",
"select",
"]",
")",
"ind2",
"=",
"n",
".",
"where",
"(",
"ant2",
"==",
"self",
".",
"antnum",
"[",
"select",
"]",
")",
"d1",
"=",
"self",
".",
"delay",
"[",
"select",
"]",
"[",
"ind1",
"]",
"d2",
"=",
"self",
".",
"delay",
"[",
"select",
"]",
"[",
"ind2",
"]",
"if",
"len",
"(",
"d1",
"-",
"d2",
")",
">",
"0",
":",
"return",
"d1",
"-",
"d2",
"else",
":",
"return",
"n",
".",
"array",
"(",
"[",
"0",
"]",
")"
] | 37.857143
| 18.857143
|
def start(builtins=False, profile_threads=True):
"""Starts profiling all threads and all greenlets.
This function can be called from any thread at any time.
Resumes profiling if stop() was called previously.
* `builtins`: Profile builtin functions used by standart Python modules.
* `profile_threads`: Profile all threads if ``True``, else profile only the
calling thread.
"""
# TODO: what about builtins False or profile_threads False?
_vendorized_yappi.yappi.set_context_id_callback(
lambda: greenlet and id(greenlet.getcurrent()) or 0)
_vendorized_yappi.yappi.set_context_name_callback(
lambda: greenlet and greenlet.getcurrent().__class__.__name__ or '')
_vendorized_yappi.yappi.start(builtins, profile_threads)
|
[
"def",
"start",
"(",
"builtins",
"=",
"False",
",",
"profile_threads",
"=",
"True",
")",
":",
"# TODO: what about builtins False or profile_threads False?",
"_vendorized_yappi",
".",
"yappi",
".",
"set_context_id_callback",
"(",
"lambda",
":",
"greenlet",
"and",
"id",
"(",
"greenlet",
".",
"getcurrent",
"(",
")",
")",
"or",
"0",
")",
"_vendorized_yappi",
".",
"yappi",
".",
"set_context_name_callback",
"(",
"lambda",
":",
"greenlet",
"and",
"greenlet",
".",
"getcurrent",
"(",
")",
".",
"__class__",
".",
"__name__",
"or",
"''",
")",
"_vendorized_yappi",
".",
"yappi",
".",
"start",
"(",
"builtins",
",",
"profile_threads",
")"
] | 42.444444
| 23.388889
|
def recv_rpc(self, context, payload):
"""Call from any thread"""
logger.debug("Adding RPC payload to ControlBuffer queue: %s", payload)
self.buf.put(('rpc', (context, payload)))
with self.cv:
self.cv.notifyAll()
|
[
"def",
"recv_rpc",
"(",
"self",
",",
"context",
",",
"payload",
")",
":",
"logger",
".",
"debug",
"(",
"\"Adding RPC payload to ControlBuffer queue: %s\"",
",",
"payload",
")",
"self",
".",
"buf",
".",
"put",
"(",
"(",
"'rpc'",
",",
"(",
"context",
",",
"payload",
")",
")",
")",
"with",
"self",
".",
"cv",
":",
"self",
".",
"cv",
".",
"notifyAll",
"(",
")"
] | 41.666667
| 13
|
def exit_statistics(hostname, start_time, count_sent, count_received, min_time, avg_time, max_time, deviation):
"""
Print ping exit statistics
"""
end_time = datetime.datetime.now()
duration = end_time - start_time
duration_sec = float(duration.seconds * 1000)
duration_ms = float(duration.microseconds / 1000)
duration = duration_sec + duration_ms
package_loss = 100 - ((float(count_received) / float(count_sent)) * 100)
print(f'\b\b--- {hostname} ping statistics ---')
try:
print(f'{count_sent} packages transmitted, {count_received} received, {package_loss}% package loss, time {duration}ms')
except ZeroDivisionError:
print(f'{count_sent} packets transmitted, {count_received} received, 100% packet loss, time {duration}ms')
print(
'rtt min/avg/max/dev = %.2f/%.2f/%.2f/%.2f ms' % (
min_time.seconds*1000 + float(min_time.microseconds)/1000,
float(avg_time) / 1000,
max_time.seconds*1000 + float(max_time.microseconds)/1000,
float(deviation)
)
)
|
[
"def",
"exit_statistics",
"(",
"hostname",
",",
"start_time",
",",
"count_sent",
",",
"count_received",
",",
"min_time",
",",
"avg_time",
",",
"max_time",
",",
"deviation",
")",
":",
"end_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"duration",
"=",
"end_time",
"-",
"start_time",
"duration_sec",
"=",
"float",
"(",
"duration",
".",
"seconds",
"*",
"1000",
")",
"duration_ms",
"=",
"float",
"(",
"duration",
".",
"microseconds",
"/",
"1000",
")",
"duration",
"=",
"duration_sec",
"+",
"duration_ms",
"package_loss",
"=",
"100",
"-",
"(",
"(",
"float",
"(",
"count_received",
")",
"/",
"float",
"(",
"count_sent",
")",
")",
"*",
"100",
")",
"print",
"(",
"f'\\b\\b--- {hostname} ping statistics ---'",
")",
"try",
":",
"print",
"(",
"f'{count_sent} packages transmitted, {count_received} received, {package_loss}% package loss, time {duration}ms'",
")",
"except",
"ZeroDivisionError",
":",
"print",
"(",
"f'{count_sent} packets transmitted, {count_received} received, 100% packet loss, time {duration}ms'",
")",
"print",
"(",
"'rtt min/avg/max/dev = %.2f/%.2f/%.2f/%.2f ms'",
"%",
"(",
"min_time",
".",
"seconds",
"*",
"1000",
"+",
"float",
"(",
"min_time",
".",
"microseconds",
")",
"/",
"1000",
",",
"float",
"(",
"avg_time",
")",
"/",
"1000",
",",
"max_time",
".",
"seconds",
"*",
"1000",
"+",
"float",
"(",
"max_time",
".",
"microseconds",
")",
"/",
"1000",
",",
"float",
"(",
"deviation",
")",
")",
")"
] | 46.217391
| 24.043478
|
def prepare(self):
"""Prepare for monitoring - install agents etc"""
# Parse config
agent_configs = []
if self.config:
agent_configs = self.config_manager.getconfig(
self.config, self.default_target)
# Creating agent for hosts
for config in agent_configs:
if config['host'] in ['localhost', '127.0.0.1', '::1']:
client = self.clients['localhost'](
config, self.old_style_configs, kill_old=self.kill_old)
else:
client = self.clients['ssh'](
config, self.old_style_configs, timeout=5, kill_old=self.kill_old)
logger.debug('Installing monitoring agent. Host: %s', client.host)
agent_config, startup_config, customs_script = client.install()
if agent_config:
self.agents.append(client)
self.artifact_files.append(agent_config)
if startup_config:
self.artifact_files.append(startup_config)
if customs_script:
self.artifact_files.append(customs_script)
|
[
"def",
"prepare",
"(",
"self",
")",
":",
"# Parse config",
"agent_configs",
"=",
"[",
"]",
"if",
"self",
".",
"config",
":",
"agent_configs",
"=",
"self",
".",
"config_manager",
".",
"getconfig",
"(",
"self",
".",
"config",
",",
"self",
".",
"default_target",
")",
"# Creating agent for hosts",
"for",
"config",
"in",
"agent_configs",
":",
"if",
"config",
"[",
"'host'",
"]",
"in",
"[",
"'localhost'",
",",
"'127.0.0.1'",
",",
"'::1'",
"]",
":",
"client",
"=",
"self",
".",
"clients",
"[",
"'localhost'",
"]",
"(",
"config",
",",
"self",
".",
"old_style_configs",
",",
"kill_old",
"=",
"self",
".",
"kill_old",
")",
"else",
":",
"client",
"=",
"self",
".",
"clients",
"[",
"'ssh'",
"]",
"(",
"config",
",",
"self",
".",
"old_style_configs",
",",
"timeout",
"=",
"5",
",",
"kill_old",
"=",
"self",
".",
"kill_old",
")",
"logger",
".",
"debug",
"(",
"'Installing monitoring agent. Host: %s'",
",",
"client",
".",
"host",
")",
"agent_config",
",",
"startup_config",
",",
"customs_script",
"=",
"client",
".",
"install",
"(",
")",
"if",
"agent_config",
":",
"self",
".",
"agents",
".",
"append",
"(",
"client",
")",
"self",
".",
"artifact_files",
".",
"append",
"(",
"agent_config",
")",
"if",
"startup_config",
":",
"self",
".",
"artifact_files",
".",
"append",
"(",
"startup_config",
")",
"if",
"customs_script",
":",
"self",
".",
"artifact_files",
".",
"append",
"(",
"customs_script",
")"
] | 43.038462
| 19
|
def sort_query(self, query, sort_info):
"""Sort query according to jsonapi 1.0
:param Query query: sqlalchemy query to sort
:param list sort_info: sort information
:return Query: the sorted query
"""
for sort_opt in sort_info:
field = sort_opt['field']
if not hasattr(self.model, field):
raise InvalidSort("{} has no attribute {}".format(self.model.__name__, field))
query = query.order_by(getattr(getattr(self.model, field), sort_opt['order'])())
return query
|
[
"def",
"sort_query",
"(",
"self",
",",
"query",
",",
"sort_info",
")",
":",
"for",
"sort_opt",
"in",
"sort_info",
":",
"field",
"=",
"sort_opt",
"[",
"'field'",
"]",
"if",
"not",
"hasattr",
"(",
"self",
".",
"model",
",",
"field",
")",
":",
"raise",
"InvalidSort",
"(",
"\"{} has no attribute {}\"",
".",
"format",
"(",
"self",
".",
"model",
".",
"__name__",
",",
"field",
")",
")",
"query",
"=",
"query",
".",
"order_by",
"(",
"getattr",
"(",
"getattr",
"(",
"self",
".",
"model",
",",
"field",
")",
",",
"sort_opt",
"[",
"'order'",
"]",
")",
"(",
")",
")",
"return",
"query"
] | 42.846154
| 15.538462
|
def select_profile(self, index):
"""Select a given profile by index.
Slot for when profile is selected.
:param index: The selected item's index
:type index: int
"""
new_profile = self.profile_combo.itemText(index)
self.resources_list.clear()
self.minimum_needs.load_profile(new_profile)
self.clear_resource_list()
self.populate_resource_list()
self.minimum_needs.save()
|
[
"def",
"select_profile",
"(",
"self",
",",
"index",
")",
":",
"new_profile",
"=",
"self",
".",
"profile_combo",
".",
"itemText",
"(",
"index",
")",
"self",
".",
"resources_list",
".",
"clear",
"(",
")",
"self",
".",
"minimum_needs",
".",
"load_profile",
"(",
"new_profile",
")",
"self",
".",
"clear_resource_list",
"(",
")",
"self",
".",
"populate_resource_list",
"(",
")",
"self",
".",
"minimum_needs",
".",
"save",
"(",
")"
] | 31.857143
| 11.571429
|
def calc_gradient(beta,
design,
alt_IDs,
rows_to_obs,
rows_to_alts,
choice_vector,
utility_transform,
transform_first_deriv_c,
transform_first_deriv_v,
transform_deriv_alpha,
intercept_params,
shape_params,
ridge,
weights):
"""
Parameters
----------
beta : 1D ndarray.
All elements should by ints, floats, or longs. Should have 1 element
for each utility coefficient being estimated (i.e. num_features).
design : 2D ndarray.
Tjere should be one row per observation per available alternative.
There should be one column per utility coefficient being estimated. All
elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_obs : 2D scipy sparse array.
There should be one row per observation per available alternative and
one column per observation. This matrix maps the rows of the design
matrix to the unique observations (on the columns).
rows_to_alts : 2D scipy sparse array
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
choice_vector : 1D ndarray.
All elements should be either ones or zeros. There should be one row
per observation per available alternative for the given observation.
Elements denote the alternative which is chosen by the given
observation with a 1 and a zero otherwise.
utility_transform : callable.
Must accept a 1D array of systematic utility values, a 1D array of
alternative IDs, and miscellaneous args and kwargs. Should return a 1D
array whose elements contain the appropriately transformed systematic
utility values, based on the current model being evaluated.
transform_first_deriv_c : callable.
Must accept a 1D array of systematic utility values, a 1D array of
alternative IDs, the `rows_to_alts` array, (shape parameters if there
are any) and miscellaneous args and kwargs. Should return a 2D matrix
or sparse array whose elements contain the derivative of the tranformed
utility vector with respect to the vector of shape parameters. The
dimensions of the returned vector should be
`(design.shape[0], num_alternatives)`. If there are no shape parameters
then the callable should return None.
transform_first_deriv_v : callable.
Must accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of systematic utilities. The dimensions of the returned vector should
be `(design.shape[0], design.shape[0])`.
transform_deriv_alpha : callable.
Must accept a 1D array of systematic utility values, a 1D array of
alternative IDs, the `rows_to_alts` array, (intercept parameters if
there are any) and miscellaneous args and kwargs. Should return a 2D
array whose elements contain the derivative of the tranformed utility
vector with respect to the vector of shape parameters. The dimensions
of the returned vector should be
`(design.shape[0], num_alternatives - 1)`. If there are no intercept
parameters, the callable should return None.
intercept_params : 1D numpy array or None.
If an array, each element should be an int, float, or long. For
identifiability, there should be J- 1 elements where J is the total
number of observed alternatives for this dataset. Default == None.
shape_params : 1D ndarray or None.
If an array, each element should be an int, float, or long. There should
be one value per shape parameter of the model being used.
Default == None.
ridge : int, float, long, or None.
Determines whether or not ridge regression is performed. If an int,
float or long is passed, then that scalar determines the ridge penalty
for the optimization. Default = None.
weights : 1D ndarray or None.
Allows for the calculation of weighted log-likelihoods. The weights can
represent various things. In stratified samples, the weights may be
the proportion of the observations in a given strata for a sample in
relation to the proportion of observations in that strata in the
population. In latent class models, the weights may be the probability
of being a particular class.
Returns
-------
gradient : 1D ndarray.
It's shape is (beta.shape[0], ). It is the second derivative of the log-
likelihood with respect to beta.
"""
# Calculate the systematic utility for each alternative for each individual
sys_utilities = design.dot(beta)
# Calculate the probability of each individual choosing each available
# alternative for that individual.
long_probs = calc_probabilities(beta,
design,
alt_IDs,
rows_to_obs,
rows_to_alts,
utility_transform,
intercept_params=intercept_params,
shape_params=shape_params,
return_long_probs=True)
# Calculate the weights for the sample
if weights is None:
weights = 1
##########
# Get the required matrices
##########
# Differentiate the transformed utilities with respect to the shape params
# Note that dh_dc should be a sparse array
dh_dc = transform_first_deriv_c(sys_utilities, alt_IDs,
rows_to_alts, shape_params)
# Differentiate the transformed utilities by the intercept params
# Note that dh_d_alpha should be a sparse array
dh_d_alpha = transform_deriv_alpha(sys_utilities, alt_IDs,
rows_to_alts, intercept_params)
# Differentiate the transformed utilities with respect to the systematic
# utilities. Note that dh_dv should be a sparse matrix
dh_dv = transform_first_deriv_v(sys_utilities, alt_IDs,
rows_to_alts, shape_params)
# Differentiate the transformed utilities with respect to the utility
# coefficients. Note that dh_db should be a dense **matrix**, not a dense
# 2D array. This is because the dot product of a 2D scipy sparse array and
# a 2D dense numpy array yields a 2D dense numpy matrix
dh_db = dh_dv.dot(design)
# Differentiate the log likelihood w/ respect to the transformed utilities
# Note that d_ll_dh will be a dense 2D numpy array.
d_ll_dh = np.multiply(weights, choice_vector - long_probs)[np.newaxis, :]
# Calculate the gradient of the log-likelihood with respect to the betas
d_ll_d_beta = d_ll_dh.dot(dh_db)
##########
# Form and return the gradient
##########
if shape_params is not None and intercept_params is not None:
# Note that we use d_ll_dh * dh_dc and d_ll_dh * dh_d_alpha because
# that is how one computes the dot product between a dense 2D numpy
# array and a 2D sparse matrix. This is due to numpy ndarrays and
# scipy sparse matrices not playing nicely together. However, numpy
# ndarrays and numpy matrices can be dot producted together,
# hence d_ll_dh.dot(dh_db).
# Note that the 'np.asarray' is because dll_dh * dh_dc will be a row
# matrix, but we want a 1D numpy array.
gradient = np.concatenate((np.asarray(d_ll_dh * hstack((dh_dc,
dh_d_alpha),
format='csr')),
d_ll_d_beta), axis=1).ravel()
params = np.concatenate((shape_params, intercept_params, beta),
axis=0)
elif shape_params is not None and intercept_params is None:
# Note that we use d_ll_dh * dh_dc because that is how one computes
# the dot product between a dense 2D numpy array and a 2D sparse matrix
# This is due to numpy ndarrays and scipy sparse matrices not playing
# nicely together. However, numpy ndarrays and numpy matrices can be
# dot producted together, hence d_ll_dh.dot(dh_db).
# Note that the 'np.asarray' is because dll_dh * dh_dc will be a row
# matrix, but we want a 1D numpy array.
gradient = np.concatenate((np.asarray(d_ll_dh * dh_dc), d_ll_d_beta),
axis=1).ravel()
params = np.concatenate((shape_params, beta), axis=0)
elif shape_params is None and intercept_params is not None:
# Note that we use d_ll_dh * dh_d_alpha because that's how one computes
# the dot product between a dense 2D numpy array and a 2D sparse matrix
# This is due to numpy ndarrays and scipy sparse matrices not playing
# nicely together. However, numpy ndarrays and numpy matrices can be
# dot producted together, hence d_ll_dh.dot(dh_db).
# Note 'np.asarray' is used because dll_dh * dh_d_alpha will be a row
# matrix, but we want a 1D numpy array.
gradient = np.concatenate((np.asarray(d_ll_dh * dh_d_alpha),
d_ll_d_beta), axis=1).ravel()
params = np.concatenate((intercept_params, beta), axis=0)
else:
gradient = d_ll_d_beta.ravel()
params = beta
if ridge is not None:
gradient -= 2 * ridge * params
return gradient
|
[
"def",
"calc_gradient",
"(",
"beta",
",",
"design",
",",
"alt_IDs",
",",
"rows_to_obs",
",",
"rows_to_alts",
",",
"choice_vector",
",",
"utility_transform",
",",
"transform_first_deriv_c",
",",
"transform_first_deriv_v",
",",
"transform_deriv_alpha",
",",
"intercept_params",
",",
"shape_params",
",",
"ridge",
",",
"weights",
")",
":",
"# Calculate the systematic utility for each alternative for each individual",
"sys_utilities",
"=",
"design",
".",
"dot",
"(",
"beta",
")",
"# Calculate the probability of each individual choosing each available",
"# alternative for that individual.",
"long_probs",
"=",
"calc_probabilities",
"(",
"beta",
",",
"design",
",",
"alt_IDs",
",",
"rows_to_obs",
",",
"rows_to_alts",
",",
"utility_transform",
",",
"intercept_params",
"=",
"intercept_params",
",",
"shape_params",
"=",
"shape_params",
",",
"return_long_probs",
"=",
"True",
")",
"# Calculate the weights for the sample",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"1",
"##########",
"# Get the required matrices",
"##########",
"# Differentiate the transformed utilities with respect to the shape params",
"# Note that dh_dc should be a sparse array",
"dh_dc",
"=",
"transform_first_deriv_c",
"(",
"sys_utilities",
",",
"alt_IDs",
",",
"rows_to_alts",
",",
"shape_params",
")",
"# Differentiate the transformed utilities by the intercept params",
"# Note that dh_d_alpha should be a sparse array",
"dh_d_alpha",
"=",
"transform_deriv_alpha",
"(",
"sys_utilities",
",",
"alt_IDs",
",",
"rows_to_alts",
",",
"intercept_params",
")",
"# Differentiate the transformed utilities with respect to the systematic",
"# utilities. Note that dh_dv should be a sparse matrix",
"dh_dv",
"=",
"transform_first_deriv_v",
"(",
"sys_utilities",
",",
"alt_IDs",
",",
"rows_to_alts",
",",
"shape_params",
")",
"# Differentiate the transformed utilities with respect to the utility",
"# coefficients. Note that dh_db should be a dense **matrix**, not a dense",
"# 2D array. This is because the dot product of a 2D scipy sparse array and",
"# a 2D dense numpy array yields a 2D dense numpy matrix",
"dh_db",
"=",
"dh_dv",
".",
"dot",
"(",
"design",
")",
"# Differentiate the log likelihood w/ respect to the transformed utilities",
"# Note that d_ll_dh will be a dense 2D numpy array.",
"d_ll_dh",
"=",
"np",
".",
"multiply",
"(",
"weights",
",",
"choice_vector",
"-",
"long_probs",
")",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"# Calculate the gradient of the log-likelihood with respect to the betas",
"d_ll_d_beta",
"=",
"d_ll_dh",
".",
"dot",
"(",
"dh_db",
")",
"##########",
"# Form and return the gradient",
"##########",
"if",
"shape_params",
"is",
"not",
"None",
"and",
"intercept_params",
"is",
"not",
"None",
":",
"# Note that we use d_ll_dh * dh_dc and d_ll_dh * dh_d_alpha because",
"# that is how one computes the dot product between a dense 2D numpy",
"# array and a 2D sparse matrix. This is due to numpy ndarrays and",
"# scipy sparse matrices not playing nicely together. However, numpy",
"# ndarrays and numpy matrices can be dot producted together,",
"# hence d_ll_dh.dot(dh_db).",
"# Note that the 'np.asarray' is because dll_dh * dh_dc will be a row",
"# matrix, but we want a 1D numpy array.",
"gradient",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"asarray",
"(",
"d_ll_dh",
"*",
"hstack",
"(",
"(",
"dh_dc",
",",
"dh_d_alpha",
")",
",",
"format",
"=",
"'csr'",
")",
")",
",",
"d_ll_d_beta",
")",
",",
"axis",
"=",
"1",
")",
".",
"ravel",
"(",
")",
"params",
"=",
"np",
".",
"concatenate",
"(",
"(",
"shape_params",
",",
"intercept_params",
",",
"beta",
")",
",",
"axis",
"=",
"0",
")",
"elif",
"shape_params",
"is",
"not",
"None",
"and",
"intercept_params",
"is",
"None",
":",
"# Note that we use d_ll_dh * dh_dc because that is how one computes",
"# the dot product between a dense 2D numpy array and a 2D sparse matrix",
"# This is due to numpy ndarrays and scipy sparse matrices not playing",
"# nicely together. However, numpy ndarrays and numpy matrices can be",
"# dot producted together, hence d_ll_dh.dot(dh_db).",
"# Note that the 'np.asarray' is because dll_dh * dh_dc will be a row",
"# matrix, but we want a 1D numpy array.",
"gradient",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"asarray",
"(",
"d_ll_dh",
"*",
"dh_dc",
")",
",",
"d_ll_d_beta",
")",
",",
"axis",
"=",
"1",
")",
".",
"ravel",
"(",
")",
"params",
"=",
"np",
".",
"concatenate",
"(",
"(",
"shape_params",
",",
"beta",
")",
",",
"axis",
"=",
"0",
")",
"elif",
"shape_params",
"is",
"None",
"and",
"intercept_params",
"is",
"not",
"None",
":",
"# Note that we use d_ll_dh * dh_d_alpha because that's how one computes",
"# the dot product between a dense 2D numpy array and a 2D sparse matrix",
"# This is due to numpy ndarrays and scipy sparse matrices not playing",
"# nicely together. However, numpy ndarrays and numpy matrices can be",
"# dot producted together, hence d_ll_dh.dot(dh_db).",
"# Note 'np.asarray' is used because dll_dh * dh_d_alpha will be a row",
"# matrix, but we want a 1D numpy array.",
"gradient",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"asarray",
"(",
"d_ll_dh",
"*",
"dh_d_alpha",
")",
",",
"d_ll_d_beta",
")",
",",
"axis",
"=",
"1",
")",
".",
"ravel",
"(",
")",
"params",
"=",
"np",
".",
"concatenate",
"(",
"(",
"intercept_params",
",",
"beta",
")",
",",
"axis",
"=",
"0",
")",
"else",
":",
"gradient",
"=",
"d_ll_d_beta",
".",
"ravel",
"(",
")",
"params",
"=",
"beta",
"if",
"ridge",
"is",
"not",
"None",
":",
"gradient",
"-=",
"2",
"*",
"ridge",
"*",
"params",
"return",
"gradient"
] | 51.639594
| 23.964467
|
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
if len(client_address) > 2:
client_address = (client_address[0], client_address[1])
except socket.timeout:
continue
except Exception as e:
if self._cb_ignore_listen_exception is not None and isinstance(self._cb_ignore_listen_exception, collections.Callable):
if self._cb_ignore_listen_exception(e, self):
continue
raise
try:
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
rst.mid = self._messageLayer.fetch_mid()
self.send_datagram(rst)
continue
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated, transaction completed")
if transaction.response is not None:
self.send_datagram(transaction.response)
continue
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated, transaction NOT completed")
self._send_ack(transaction)
continue
args = (transaction, )
t = threading.Thread(target=self.receive_request, args=args)
t.start()
# self.receive_datagram(data, client_address)
elif isinstance(message, Response):
logger.error("Received response from %s", message.source)
else: # is Message
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
with transaction:
self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
except RuntimeError:
logger.exception("Exception with Executor")
self._socket.close()
|
[
"def",
"listen",
"(",
"self",
",",
"timeout",
"=",
"10",
")",
":",
"self",
".",
"_socket",
".",
"settimeout",
"(",
"float",
"(",
"timeout",
")",
")",
"while",
"not",
"self",
".",
"stopped",
".",
"isSet",
"(",
")",
":",
"try",
":",
"data",
",",
"client_address",
"=",
"self",
".",
"_socket",
".",
"recvfrom",
"(",
"4096",
")",
"if",
"len",
"(",
"client_address",
")",
">",
"2",
":",
"client_address",
"=",
"(",
"client_address",
"[",
"0",
"]",
",",
"client_address",
"[",
"1",
"]",
")",
"except",
"socket",
".",
"timeout",
":",
"continue",
"except",
"Exception",
"as",
"e",
":",
"if",
"self",
".",
"_cb_ignore_listen_exception",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"self",
".",
"_cb_ignore_listen_exception",
",",
"collections",
".",
"Callable",
")",
":",
"if",
"self",
".",
"_cb_ignore_listen_exception",
"(",
"e",
",",
"self",
")",
":",
"continue",
"raise",
"try",
":",
"serializer",
"=",
"Serializer",
"(",
")",
"message",
"=",
"serializer",
".",
"deserialize",
"(",
"data",
",",
"client_address",
")",
"if",
"isinstance",
"(",
"message",
",",
"int",
")",
":",
"logger",
".",
"error",
"(",
"\"receive_datagram - BAD REQUEST\"",
")",
"rst",
"=",
"Message",
"(",
")",
"rst",
".",
"destination",
"=",
"client_address",
"rst",
".",
"type",
"=",
"defines",
".",
"Types",
"[",
"\"RST\"",
"]",
"rst",
".",
"code",
"=",
"message",
"rst",
".",
"mid",
"=",
"self",
".",
"_messageLayer",
".",
"fetch_mid",
"(",
")",
"self",
".",
"send_datagram",
"(",
"rst",
")",
"continue",
"logger",
".",
"debug",
"(",
"\"receive_datagram - \"",
"+",
"str",
"(",
"message",
")",
")",
"if",
"isinstance",
"(",
"message",
",",
"Request",
")",
":",
"transaction",
"=",
"self",
".",
"_messageLayer",
".",
"receive_request",
"(",
"message",
")",
"if",
"transaction",
".",
"request",
".",
"duplicated",
"and",
"transaction",
".",
"completed",
":",
"logger",
".",
"debug",
"(",
"\"message duplicated, transaction completed\"",
")",
"if",
"transaction",
".",
"response",
"is",
"not",
"None",
":",
"self",
".",
"send_datagram",
"(",
"transaction",
".",
"response",
")",
"continue",
"elif",
"transaction",
".",
"request",
".",
"duplicated",
"and",
"not",
"transaction",
".",
"completed",
":",
"logger",
".",
"debug",
"(",
"\"message duplicated, transaction NOT completed\"",
")",
"self",
".",
"_send_ack",
"(",
"transaction",
")",
"continue",
"args",
"=",
"(",
"transaction",
",",
")",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"receive_request",
",",
"args",
"=",
"args",
")",
"t",
".",
"start",
"(",
")",
"# self.receive_datagram(data, client_address)",
"elif",
"isinstance",
"(",
"message",
",",
"Response",
")",
":",
"logger",
".",
"error",
"(",
"\"Received response from %s\"",
",",
"message",
".",
"source",
")",
"else",
":",
"# is Message",
"transaction",
"=",
"self",
".",
"_messageLayer",
".",
"receive_empty",
"(",
"message",
")",
"if",
"transaction",
"is",
"not",
"None",
":",
"with",
"transaction",
":",
"self",
".",
"_blockLayer",
".",
"receive_empty",
"(",
"message",
",",
"transaction",
")",
"self",
".",
"_observeLayer",
".",
"receive_empty",
"(",
"message",
",",
"transaction",
")",
"except",
"RuntimeError",
":",
"logger",
".",
"exception",
"(",
"\"Exception with Executor\"",
")",
"self",
".",
"_socket",
".",
"close",
"(",
")"
] | 47.709677
| 21.096774
|
def select(self, selector):
"""
Like :meth:`find_all`, but takes a CSS selector string as input.
"""
op = operator.methodcaller('select', selector)
return self._wrap_multi(op)
|
[
"def",
"select",
"(",
"self",
",",
"selector",
")",
":",
"op",
"=",
"operator",
".",
"methodcaller",
"(",
"'select'",
",",
"selector",
")",
"return",
"self",
".",
"_wrap_multi",
"(",
"op",
")"
] | 35
| 10.666667
|
def get_conn(self):
"""
Retrieves connection to Cloud Speech.
:return: Google Cloud Speech client object.
:rtype: google.cloud.speech_v1.SpeechClient
"""
if not self._client:
self._client = SpeechClient(credentials=self._get_credentials())
return self._client
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_client",
":",
"self",
".",
"_client",
"=",
"SpeechClient",
"(",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
")",
"return",
"self",
".",
"_client"
] | 31.9
| 14.9
|
def run_transaction(self, command_list, do_commit=True):
'''This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful
if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur
if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids
the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.'''
pass
# I decided against creating this for now.
# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure
# in the DDGadmin project and then use callproc
for c in command_list:
if c.find(";") != -1 or c.find("\\G") != -1:
# Catches *some* injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c)
if do_commit:
sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n".join(command_list)
else:
sql = "START TRANSACTION;\n%s;" % "\n".join(command_list)
#print(sql)
return
|
[
"def",
"run_transaction",
"(",
"self",
",",
"command_list",
",",
"do_commit",
"=",
"True",
")",
":",
"pass",
"# I decided against creating this for now.",
"# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure",
"# in the DDGadmin project and then use callproc",
"for",
"c",
"in",
"command_list",
":",
"if",
"c",
".",
"find",
"(",
"\";\"",
")",
"!=",
"-",
"1",
"or",
"c",
".",
"find",
"(",
"\"\\\\G\"",
")",
"!=",
"-",
"1",
":",
"# Catches *some* injections",
"raise",
"Exception",
"(",
"\"The SQL command '%s' contains a semi-colon or \\\\G. This is a potential SQL injection.\"",
"%",
"c",
")",
"if",
"do_commit",
":",
"sql",
"=",
"\"START TRANSACTION;\\n%s;\\nCOMMIT\"",
"%",
"\"\\n\"",
".",
"join",
"(",
"command_list",
")",
"else",
":",
"sql",
"=",
"\"START TRANSACTION;\\n%s;\"",
"%",
"\"\\n\"",
".",
"join",
"(",
"command_list",
")",
"#print(sql)",
"return"
] | 59.285714
| 39.380952
|
def get(self, name):
"""Get variable `name` from MATLAB workspace.
Parameters
----------
name : str
Name of the variable in MATLAB workspace.
Returns
-------
array_like
Value of the variable `name`.
"""
pm = self._libeng.engGetVariable(self._ep, name)
out = mxarray_to_ndarray(self._libmx, pm)
self._libmx.mxDestroyArray(pm)
return out
|
[
"def",
"get",
"(",
"self",
",",
"name",
")",
":",
"pm",
"=",
"self",
".",
"_libeng",
".",
"engGetVariable",
"(",
"self",
".",
"_ep",
",",
"name",
")",
"out",
"=",
"mxarray_to_ndarray",
"(",
"self",
".",
"_libmx",
",",
"pm",
")",
"self",
".",
"_libmx",
".",
"mxDestroyArray",
"(",
"pm",
")",
"return",
"out"
] | 20.045455
| 22.772727
|
def to_python(self):
"""The string ``'True'`` (case insensitive) will be converted
to ``True``, as will any positive integers.
"""
if isinstance(self.data, str):
return self.data.strip().lower() == 'true'
if isinstance(self.data, int):
return self.data > 0
return bool(self.data)
|
[
"def",
"to_python",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"data",
",",
"str",
")",
":",
"return",
"self",
".",
"data",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'true'",
"if",
"isinstance",
"(",
"self",
".",
"data",
",",
"int",
")",
":",
"return",
"self",
".",
"data",
">",
"0",
"return",
"bool",
"(",
"self",
".",
"data",
")"
] | 34.3
| 10.7
|
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
shutil.rmtree(self._file_path, ignore_errors=False)
self.remove_all_properties(True)
self.remove_all_locks(True)
|
[
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"provider",
".",
"readonly",
":",
"raise",
"DAVError",
"(",
"HTTP_FORBIDDEN",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"_file_path",
",",
"ignore_errors",
"=",
"False",
")",
"self",
".",
"remove_all_properties",
"(",
"True",
")",
"self",
".",
"remove_all_locks",
"(",
"True",
")"
] | 32.8
| 10.3
|
def __add_query_comment(sql):
"""
Adds a comment line to the query to be executed containing the line number of the calling
function. This is useful for debugging slow queries, as the comment will show in the slow
query log
@type sql: str
@param sql: sql needing comment
@return:
"""
# Inspect the call stack for the originating call
file_name = ''
line_number = ''
caller_frames = inspect.getouterframes(inspect.currentframe())
for frame in caller_frames:
if "ShapewaysDb" not in frame[1]:
file_name = frame[1]
line_number = str(frame[2])
break
comment = "/*COYOTE: Q_SRC: {file}:{line} */\n".format(file=file_name, line=line_number)
return comment + sql,
|
[
"def",
"__add_query_comment",
"(",
"sql",
")",
":",
"# Inspect the call stack for the originating call",
"file_name",
"=",
"''",
"line_number",
"=",
"''",
"caller_frames",
"=",
"inspect",
".",
"getouterframes",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
"for",
"frame",
"in",
"caller_frames",
":",
"if",
"\"ShapewaysDb\"",
"not",
"in",
"frame",
"[",
"1",
"]",
":",
"file_name",
"=",
"frame",
"[",
"1",
"]",
"line_number",
"=",
"str",
"(",
"frame",
"[",
"2",
"]",
")",
"break",
"comment",
"=",
"\"/*COYOTE: Q_SRC: {file}:{line} */\\n\"",
".",
"format",
"(",
"file",
"=",
"file_name",
",",
"line",
"=",
"line_number",
")",
"return",
"comment",
"+",
"sql",
","
] | 37.181818
| 20.727273
|
async def check_ping_timeout(self):
"""Make sure the client is still sending pings.
This helps detect disconnections for long-polling clients.
"""
if self.closed:
raise exceptions.SocketIsClosedError()
if time.time() - self.last_ping > self.server.ping_interval + 5:
self.server.logger.info('%s: Client is gone, closing socket',
self.sid)
# Passing abort=False here will cause close() to write a
# CLOSE packet. This has the effect of updating half-open sockets
# to their correct state of disconnected
await self.close(wait=False, abort=False)
return False
return True
|
[
"async",
"def",
"check_ping_timeout",
"(",
"self",
")",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"exceptions",
".",
"SocketIsClosedError",
"(",
")",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"last_ping",
">",
"self",
".",
"server",
".",
"ping_interval",
"+",
"5",
":",
"self",
".",
"server",
".",
"logger",
".",
"info",
"(",
"'%s: Client is gone, closing socket'",
",",
"self",
".",
"sid",
")",
"# Passing abort=False here will cause close() to write a",
"# CLOSE packet. This has the effect of updating half-open sockets",
"# to their correct state of disconnected",
"await",
"self",
".",
"close",
"(",
"wait",
"=",
"False",
",",
"abort",
"=",
"False",
")",
"return",
"False",
"return",
"True"
] | 45.1875
| 18.4375
|
def summary(self):
"""Get Crates.io summary"""
path = urijoin(CRATES_API_URL, CATEGORY_SUMMARY)
raw_content = self.fetch(path)
return raw_content
|
[
"def",
"summary",
"(",
"self",
")",
":",
"path",
"=",
"urijoin",
"(",
"CRATES_API_URL",
",",
"CATEGORY_SUMMARY",
")",
"raw_content",
"=",
"self",
".",
"fetch",
"(",
"path",
")",
"return",
"raw_content"
] | 24.714286
| 19.142857
|
def _get_model_cost(self, formula, model):
"""
Given a WCNF formula and a model, the method computes the MaxSAT
cost of the model, i.e. the sum of weights of soft clauses that are
unsatisfied by the model.
:param formula: an input MaxSAT formula
:param model: a satisfying assignment
:type formula: :class:`.WCNF`
:type model: list(int)
:rtype: int
"""
model_set = set(model)
cost = 0
for i, cl in enumerate(formula.soft):
cost += formula.wght[i] if all(l not in model_set for l in filter(lambda l: abs(l) <= self.formula.nv, cl)) else 0
return cost
|
[
"def",
"_get_model_cost",
"(",
"self",
",",
"formula",
",",
"model",
")",
":",
"model_set",
"=",
"set",
"(",
"model",
")",
"cost",
"=",
"0",
"for",
"i",
",",
"cl",
"in",
"enumerate",
"(",
"formula",
".",
"soft",
")",
":",
"cost",
"+=",
"formula",
".",
"wght",
"[",
"i",
"]",
"if",
"all",
"(",
"l",
"not",
"in",
"model_set",
"for",
"l",
"in",
"filter",
"(",
"lambda",
"l",
":",
"abs",
"(",
"l",
")",
"<=",
"self",
".",
"formula",
".",
"nv",
",",
"cl",
")",
")",
"else",
"0",
"return",
"cost"
] | 31.363636
| 23.181818
|
def delete_expired_requests():
"""Delete expired inclusion requests."""
InclusionRequest.query.filter_by(
InclusionRequest.expiry_date > datetime.utcnow()).delete()
db.session.commit()
|
[
"def",
"delete_expired_requests",
"(",
")",
":",
"InclusionRequest",
".",
"query",
".",
"filter_by",
"(",
"InclusionRequest",
".",
"expiry_date",
">",
"datetime",
".",
"utcnow",
"(",
")",
")",
".",
"delete",
"(",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] | 40
| 11.2
|
def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close()
|
[
"def",
"draw_capitan_stroke_images",
"(",
"self",
",",
"symbols",
":",
"List",
"[",
"CapitanSymbol",
"]",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
")",
"->",
"None",
":",
"total_number_of_symbols",
"=",
"len",
"(",
"symbols",
")",
"*",
"len",
"(",
"stroke_thicknesses",
")",
"output",
"=",
"\"Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})\"",
".",
"format",
"(",
"total_number_of_symbols",
",",
"len",
"(",
"symbols",
")",
",",
"len",
"(",
"stroke_thicknesses",
")",
",",
"stroke_thicknesses",
")",
"print",
"(",
"output",
")",
"print",
"(",
"\"In directory {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"destination_directory",
")",
")",
",",
"flush",
"=",
"True",
")",
"progress_bar",
"=",
"tqdm",
"(",
"total",
"=",
"total_number_of_symbols",
",",
"mininterval",
"=",
"0.25",
",",
"desc",
"=",
"\"Rendering strokes\"",
")",
"capitan_file_name_counter",
"=",
"0",
"for",
"symbol",
"in",
"symbols",
":",
"capitan_file_name_counter",
"+=",
"1",
"target_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
")",
"os",
".",
"makedirs",
"(",
"target_directory",
",",
"exist_ok",
"=",
"True",
")",
"raw_file_name_without_extension",
"=",
"\"capitan-{0}-{1}-stroke\"",
".",
"format",
"(",
"symbol",
".",
"symbol_class",
",",
"capitan_file_name_counter",
")",
"for",
"stroke_thickness",
"in",
"stroke_thicknesses",
":",
"export_path",
"=",
"ExportPath",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
",",
"raw_file_name_without_extension",
",",
"'png'",
",",
"stroke_thickness",
")",
"symbol",
".",
"draw_capitan_stroke_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"0",
")",
"progress_bar",
".",
"update",
"(",
"1",
")",
"progress_bar",
".",
"close",
"(",
")"
] | 59.230769
| 37.435897
|
def autoconf(self):
"""Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
"""
serverInfo = MemcachedInfo(self._host, self._port, self._socket_file)
return (serverInfo is not None)
|
[
"def",
"autoconf",
"(",
"self",
")",
":",
"serverInfo",
"=",
"MemcachedInfo",
"(",
"self",
".",
"_host",
",",
"self",
".",
"_port",
",",
"self",
".",
"_socket_file",
")",
"return",
"(",
"serverInfo",
"is",
"not",
"None",
")"
] | 38.25
| 18.5
|
def signed_token_generator(private_pem, **kwargs):
"""
:param private_pem:
"""
def signed_token_generator(request):
request.claims = kwargs
return common.generate_signed_token(private_pem, request)
return signed_token_generator
|
[
"def",
"signed_token_generator",
"(",
"private_pem",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"signed_token_generator",
"(",
"request",
")",
":",
"request",
".",
"claims",
"=",
"kwargs",
"return",
"common",
".",
"generate_signed_token",
"(",
"private_pem",
",",
"request",
")",
"return",
"signed_token_generator"
] | 28.444444
| 12
|
def ChunkedAttentionSelector(x, params, selector=None, **kwargs):
"""Select which chunks to attend to in chunked attention.
Args:
x: inputs, a list of elements of the form (q, k, v), mask for each chunk.
params: parameters (unused).
selector: a function from chunk_number -> list of chunk numbers that says
which other chunks should be appended to the given one (previous if None).
**kwargs: unused other arguments.
Returns:
a list of elements of the form (q, k', v'), mask' where k', v' and mask' are
concatenations of k, v and identity-extended masks from selected chunks.
"""
del params, kwargs
selector = selector or (lambda x: [] if x < 1 else [x-1])
triples, masks = zip(*x)
(queries, keys, values) = zip(*triples)
result = []
for i in range(len(x)):
selected = selector(i)
# Since keys and values are [batch, length, depth] we concatenate on axis=1.
# We also always include the current key or value at the end.
new_key_list = [keys[j] for j in selected]
new_key = np.concatenate(new_key_list + [keys[i]], axis=1)
new_value = np.concatenate(
[values[j] for j in selected] + [values[i]], axis=1)
# Masks are (1, query-len, key-len) so we concatenate on axis=2.
new_mask_shapes = [(1, queries[i].shape[1], key.shape[1])
for key in new_key_list]
cur_mask = masks[i]
# Masks are all-1 for the added chunks (no masking).
new_mask_list = [np.ones(s, dtype=cur_mask.dtype) for s in new_mask_shapes]
# We still use the current (often causal) mask for the final chunk.
new_mask = np.concatenate(new_mask_list + [cur_mask], axis=2)
result.append(((queries[i], new_key, new_value), new_mask))
return tuple(result)
|
[
"def",
"ChunkedAttentionSelector",
"(",
"x",
",",
"params",
",",
"selector",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"params",
",",
"kwargs",
"selector",
"=",
"selector",
"or",
"(",
"lambda",
"x",
":",
"[",
"]",
"if",
"x",
"<",
"1",
"else",
"[",
"x",
"-",
"1",
"]",
")",
"triples",
",",
"masks",
"=",
"zip",
"(",
"*",
"x",
")",
"(",
"queries",
",",
"keys",
",",
"values",
")",
"=",
"zip",
"(",
"*",
"triples",
")",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"x",
")",
")",
":",
"selected",
"=",
"selector",
"(",
"i",
")",
"# Since keys and values are [batch, length, depth] we concatenate on axis=1.",
"# We also always include the current key or value at the end.",
"new_key_list",
"=",
"[",
"keys",
"[",
"j",
"]",
"for",
"j",
"in",
"selected",
"]",
"new_key",
"=",
"np",
".",
"concatenate",
"(",
"new_key_list",
"+",
"[",
"keys",
"[",
"i",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"new_value",
"=",
"np",
".",
"concatenate",
"(",
"[",
"values",
"[",
"j",
"]",
"for",
"j",
"in",
"selected",
"]",
"+",
"[",
"values",
"[",
"i",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"# Masks are (1, query-len, key-len) so we concatenate on axis=2.",
"new_mask_shapes",
"=",
"[",
"(",
"1",
",",
"queries",
"[",
"i",
"]",
".",
"shape",
"[",
"1",
"]",
",",
"key",
".",
"shape",
"[",
"1",
"]",
")",
"for",
"key",
"in",
"new_key_list",
"]",
"cur_mask",
"=",
"masks",
"[",
"i",
"]",
"# Masks are all-1 for the added chunks (no masking).",
"new_mask_list",
"=",
"[",
"np",
".",
"ones",
"(",
"s",
",",
"dtype",
"=",
"cur_mask",
".",
"dtype",
")",
"for",
"s",
"in",
"new_mask_shapes",
"]",
"# We still use the current (often causal) mask for the final chunk.",
"new_mask",
"=",
"np",
".",
"concatenate",
"(",
"new_mask_list",
"+",
"[",
"cur_mask",
"]",
",",
"axis",
"=",
"2",
")",
"result",
".",
"append",
"(",
"(",
"(",
"queries",
"[",
"i",
"]",
",",
"new_key",
",",
"new_value",
")",
",",
"new_mask",
")",
")",
"return",
"tuple",
"(",
"result",
")"
] | 46.324324
| 22.324324
|
def impact_table_pdf_extractor(impact_report, component_metadata):
"""Extracting impact summary of the impact layer.
For PDF generations
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
# QGIS Composer needed certain context to generate the output
# - Map Settings
# - Substitution maps
# - Element settings, such as icon for picture file or image source
context = QGISComposerContext()
extra_args = component_metadata.extra_args
html_report_component_key = resolve_from_dictionary(
extra_args, ['html_report_component_key'])
# we only have html elements for this
html_frame_elements = [
{
'id': 'impact-report',
'mode': 'text',
'text': jinja2_output_as_string(
impact_report, html_report_component_key),
'margin_left': 10,
'margin_top': 10,
}
]
context.html_frame_elements = html_frame_elements
return context
|
[
"def",
"impact_table_pdf_extractor",
"(",
"impact_report",
",",
"component_metadata",
")",
":",
"# QGIS Composer needed certain context to generate the output",
"# - Map Settings",
"# - Substitution maps",
"# - Element settings, such as icon for picture file or image source",
"context",
"=",
"QGISComposerContext",
"(",
")",
"extra_args",
"=",
"component_metadata",
".",
"extra_args",
"html_report_component_key",
"=",
"resolve_from_dictionary",
"(",
"extra_args",
",",
"[",
"'html_report_component_key'",
"]",
")",
"# we only have html elements for this",
"html_frame_elements",
"=",
"[",
"{",
"'id'",
":",
"'impact-report'",
",",
"'mode'",
":",
"'text'",
",",
"'text'",
":",
"jinja2_output_as_string",
"(",
"impact_report",
",",
"html_report_component_key",
")",
",",
"'margin_left'",
":",
"10",
",",
"'margin_top'",
":",
"10",
",",
"}",
"]",
"context",
".",
"html_frame_elements",
"=",
"html_frame_elements",
"return",
"context"
] | 32
| 20.186047
|
def get_all_response(self, sort_order=None, sort_target='key',
keys_only=False):
"""Get all keys currently stored in etcd."""
range_request = self._build_get_range_request(
key=b'\0',
range_end=b'\0',
sort_order=sort_order,
sort_target=sort_target,
keys_only=keys_only,
)
return self.kvstub.Range(
range_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
|
[
"def",
"get_all_response",
"(",
"self",
",",
"sort_order",
"=",
"None",
",",
"sort_target",
"=",
"'key'",
",",
"keys_only",
"=",
"False",
")",
":",
"range_request",
"=",
"self",
".",
"_build_get_range_request",
"(",
"key",
"=",
"b'\\0'",
",",
"range_end",
"=",
"b'\\0'",
",",
"sort_order",
"=",
"sort_order",
",",
"sort_target",
"=",
"sort_target",
",",
"keys_only",
"=",
"keys_only",
",",
")",
"return",
"self",
".",
"kvstub",
".",
"Range",
"(",
"range_request",
",",
"self",
".",
"timeout",
",",
"credentials",
"=",
"self",
".",
"call_credentials",
",",
"metadata",
"=",
"self",
".",
"metadata",
")"
] | 32
| 13.882353
|
def is_expired(self, time_offset_seconds=0):
"""
Checks to see if the Session Token is expired or not. By default
it will check to see if the Session Token is expired as of the
moment the method is called. However, you can supply an
optional parameter which is the number of seconds of offset
into the future for the check. For example, if you supply
a value of 5, this method will return a True if the Session
Token will be expired 5 seconds from this moment.
:type time_offset_seconds: int
:param time_offset_seconds: The number of seconds into the future
to test the Session Token for expiration.
"""
now = datetime.datetime.utcnow()
if time_offset_seconds:
now = now + datetime.timedelta(seconds=time_offset_seconds)
ts = boto.utils.parse_ts(self.expiration)
delta = ts - now
return delta.total_seconds() <= 0
|
[
"def",
"is_expired",
"(",
"self",
",",
"time_offset_seconds",
"=",
"0",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"time_offset_seconds",
":",
"now",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"time_offset_seconds",
")",
"ts",
"=",
"boto",
".",
"utils",
".",
"parse_ts",
"(",
"self",
".",
"expiration",
")",
"delta",
"=",
"ts",
"-",
"now",
"return",
"delta",
".",
"total_seconds",
"(",
")",
"<=",
"0"
] | 47.5
| 17.1
|
def set_game_score(
self,
user_id: Union[int, str],
score: int,
force: bool = None,
disable_edit_message: bool = None,
chat_id: Union[int, str] = None,
message_id: int = None
):
# inline_message_id: str = None): TODO Add inline_message_id
"""Use this method to set the score of the specified user in a game.
Args:
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
score (``int``):
New score, must be non-negative.
force (``bool``, *optional*):
Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters.
disable_edit_message (``bool``, *optional*):
Pass True, if the game message should not be automatically edited to include the current scoreboard.
chat_id (``int`` | ``str``, *optional*):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
Required if inline_message_id is not specified.
message_id (``int``, *optional*):
Identifier of the sent message.
Required if inline_message_id is not specified.
Returns:
On success, if the message was sent by the bot, returns the edited :obj:`Message <pyrogram.Message>`,
otherwise returns True.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
:class:`BotScoreNotModified` if the new score is not greater than the user's current score in the chat and force is False.
"""
r = self.send(
functions.messages.SetGameScore(
peer=self.resolve_peer(chat_id),
score=score,
id=message_id,
user_id=self.resolve_peer(user_id),
force=force or None,
edit_message=not disable_edit_message or None
)
)
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
)
return True
|
[
"def",
"set_game_score",
"(",
"self",
",",
"user_id",
":",
"Union",
"[",
"int",
",",
"str",
"]",
",",
"score",
":",
"int",
",",
"force",
":",
"bool",
"=",
"None",
",",
"disable_edit_message",
":",
"bool",
"=",
"None",
",",
"chat_id",
":",
"Union",
"[",
"int",
",",
"str",
"]",
"=",
"None",
",",
"message_id",
":",
"int",
"=",
"None",
")",
":",
"# inline_message_id: str = None): TODO Add inline_message_id",
"r",
"=",
"self",
".",
"send",
"(",
"functions",
".",
"messages",
".",
"SetGameScore",
"(",
"peer",
"=",
"self",
".",
"resolve_peer",
"(",
"chat_id",
")",
",",
"score",
"=",
"score",
",",
"id",
"=",
"message_id",
",",
"user_id",
"=",
"self",
".",
"resolve_peer",
"(",
"user_id",
")",
",",
"force",
"=",
"force",
"or",
"None",
",",
"edit_message",
"=",
"not",
"disable_edit_message",
"or",
"None",
")",
")",
"for",
"i",
"in",
"r",
".",
"updates",
":",
"if",
"isinstance",
"(",
"i",
",",
"(",
"types",
".",
"UpdateEditMessage",
",",
"types",
".",
"UpdateEditChannelMessage",
")",
")",
":",
"return",
"pyrogram",
".",
"Message",
".",
"_parse",
"(",
"self",
",",
"i",
".",
"message",
",",
"{",
"i",
".",
"id",
":",
"i",
"for",
"i",
"in",
"r",
".",
"users",
"}",
",",
"{",
"i",
".",
"id",
":",
"i",
"for",
"i",
"in",
"r",
".",
"chats",
"}",
")",
"return",
"True"
] | 41.742424
| 25.545455
|
def LockScanNode(self, path_spec):
"""Marks a scan node as locked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists.
"""
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
raise KeyError('Scan node does not exist.')
self._locked_scan_nodes[path_spec] = scan_node
|
[
"def",
"LockScanNode",
"(",
"self",
",",
"path_spec",
")",
":",
"scan_node",
"=",
"self",
".",
"_scan_nodes",
".",
"get",
"(",
"path_spec",
",",
"None",
")",
"if",
"not",
"scan_node",
":",
"raise",
"KeyError",
"(",
"'Scan node does not exist.'",
")",
"self",
".",
"_locked_scan_nodes",
"[",
"path_spec",
"]",
"=",
"scan_node"
] | 26.071429
| 18.071429
|
def GetMemBalloonMaxMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemBalloonMaxMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
[
"def",
"GetMemBalloonMaxMB",
"(",
"self",
")",
":",
"counter",
"=",
"c_uint",
"(",
")",
"ret",
"=",
"vmGuestLib",
".",
"VMGuestLib_GetMemBalloonMaxMB",
"(",
"self",
".",
"handle",
".",
"value",
",",
"byref",
"(",
"counter",
")",
")",
"if",
"ret",
"!=",
"VMGUESTLIB_ERROR_SUCCESS",
":",
"raise",
"VMGuestLibException",
"(",
"ret",
")",
"return",
"counter",
".",
"value"
] | 45.5
| 22.166667
|
def _write_string(self, string, pos_x, pos_y, height, color, bold=False, align_right=False, depth=0.):
"""Write a string
Writes a string with a simple OpenGL method in the given size at the given position.
:param string: The string to draw
:param pos_x: x starting position
:param pos_y: y starting position
:param height: desired height
:param bold: flag whether to use a bold font
:param depth: the Z layer
"""
stroke_width = height / 8.
if bold:
stroke_width = height / 5.
color.set()
self._set_closest_stroke_width(stroke_width)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
pos_y -= height
if not align_right:
glTranslatef(pos_x, pos_y, depth)
else:
width = self._string_width(string, height)
glTranslatef(pos_x - width, pos_y, depth)
font_height = 119.5 # According to https://www.opengl.org/resources/libraries/glut/spec3/node78.html
scale_factor = height / font_height
glScalef(scale_factor, scale_factor, scale_factor)
for c in string:
# glTranslatef(0, 0, 0)
glutStrokeCharacter(GLUT_STROKE_ROMAN, ord(c))
# width = glutStrokeWidth(GLUT_STROKE_ROMAN, ord(c))
glPopMatrix()
|
[
"def",
"_write_string",
"(",
"self",
",",
"string",
",",
"pos_x",
",",
"pos_y",
",",
"height",
",",
"color",
",",
"bold",
"=",
"False",
",",
"align_right",
"=",
"False",
",",
"depth",
"=",
"0.",
")",
":",
"stroke_width",
"=",
"height",
"/",
"8.",
"if",
"bold",
":",
"stroke_width",
"=",
"height",
"/",
"5.",
"color",
".",
"set",
"(",
")",
"self",
".",
"_set_closest_stroke_width",
"(",
"stroke_width",
")",
"glMatrixMode",
"(",
"GL_MODELVIEW",
")",
"glPushMatrix",
"(",
")",
"pos_y",
"-=",
"height",
"if",
"not",
"align_right",
":",
"glTranslatef",
"(",
"pos_x",
",",
"pos_y",
",",
"depth",
")",
"else",
":",
"width",
"=",
"self",
".",
"_string_width",
"(",
"string",
",",
"height",
")",
"glTranslatef",
"(",
"pos_x",
"-",
"width",
",",
"pos_y",
",",
"depth",
")",
"font_height",
"=",
"119.5",
"# According to https://www.opengl.org/resources/libraries/glut/spec3/node78.html",
"scale_factor",
"=",
"height",
"/",
"font_height",
"glScalef",
"(",
"scale_factor",
",",
"scale_factor",
",",
"scale_factor",
")",
"for",
"c",
"in",
"string",
":",
"# glTranslatef(0, 0, 0)",
"glutStrokeCharacter",
"(",
"GLUT_STROKE_ROMAN",
",",
"ord",
"(",
"c",
")",
")",
"# width = glutStrokeWidth(GLUT_STROKE_ROMAN, ord(c))",
"glPopMatrix",
"(",
")"
] | 38.735294
| 17.911765
|
def mtxmg(m1, m2, ncol1, nr1r2, ncol2):
"""
Multiply the transpose of a matrix with
another matrix, both of arbitrary size.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxmg_c.html
:param m1: nr1r2 X ncol1 double precision matrix.
:type m1: NxM-Element Array of floats
:param m2: nr1r2 X ncol2 double precision matrix.
:type m2: NxM-Element Array of floats
:param ncol1: Column dimension of m1 and row dimension of mout.
:type ncol1: int
:param nr1r2: Row dimension of m1 and m2.
:type nr1r2: int
:param ncol2: Column dimension of m2.
:type ncol2: int
:return: Transpose of m1 times m2.
:rtype: NxM-Element Array of floats
"""
m1 = stypes.toDoubleMatrix(m1)
m2 = stypes.toDoubleMatrix(m2)
mout = stypes.emptyDoubleMatrix(x=ncol2, y=ncol1)
ncol1 = ctypes.c_int(ncol1)
nr1r2 = ctypes.c_int(nr1r2)
ncol2 = ctypes.c_int(ncol2)
libspice.mtxmg_c(m1, m2, ncol1, nr1r2, ncol2, mout)
return stypes.cMatrixToNumpy(mout)
|
[
"def",
"mtxmg",
"(",
"m1",
",",
"m2",
",",
"ncol1",
",",
"nr1r2",
",",
"ncol2",
")",
":",
"m1",
"=",
"stypes",
".",
"toDoubleMatrix",
"(",
"m1",
")",
"m2",
"=",
"stypes",
".",
"toDoubleMatrix",
"(",
"m2",
")",
"mout",
"=",
"stypes",
".",
"emptyDoubleMatrix",
"(",
"x",
"=",
"ncol2",
",",
"y",
"=",
"ncol1",
")",
"ncol1",
"=",
"ctypes",
".",
"c_int",
"(",
"ncol1",
")",
"nr1r2",
"=",
"ctypes",
".",
"c_int",
"(",
"nr1r2",
")",
"ncol2",
"=",
"ctypes",
".",
"c_int",
"(",
"ncol2",
")",
"libspice",
".",
"mtxmg_c",
"(",
"m1",
",",
"m2",
",",
"ncol1",
",",
"nr1r2",
",",
"ncol2",
",",
"mout",
")",
"return",
"stypes",
".",
"cMatrixToNumpy",
"(",
"mout",
")"
] | 35.571429
| 11.142857
|
def gapsplit(args):
"""
%prog gapsplit gffile > split.gff
Read in the gff (normally generated by GMAP) and print it out after splitting
each feature into one parent and multiple child features based on alignment
information encoded in CIGAR string.
"""
p = OptionParser(gapsplit.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
gff = Gff(gffile)
for g in gff:
if re.match("EST_match", g.type):
"""
hacky implementation:
since the standard urlparse.parse_qsl() replaces all "+" symbols with spaces
we will write a regex to check either for a "-" or a " " (space)
"""
match = re.search(r'\S+ (\d+) \d+ ([\s{1}\-])', g.attributes["Target"][0])
if match.group(2) == "-":
strand = match.group(2)
else:
strand = "+"
g.attributes["Target"][0] = " ".join(str(x) \
for x in [g.attributes["Target"][0].rstrip(), strand])
if g.strand == "?":
g.strand = strand
else:
match = re.match(r'\S+ (\d+) \d+', g.attributes["Target"][0])
target_start = int(match.group(1))
re_cigar = re.compile(r'(\D+)(\d+)');
cigar = g.attributes["Gap"][0].split(" ")
g.attributes["Gap"] = None
parts = []
if g.strand == "+":
for event in cigar:
match = re_cigar.match(event)
op, count = match.group(1), int(match.group(2))
if op in "IHS":
target_start += count
elif op in "DN":
g.start += count
elif op == "P":
continue
else:
parts.append([g.start, g.start + count - 1, \
target_start, target_start + count - 1])
g.start += count
target_start += count
else:
for event in cigar:
match = re_cigar.match(event)
op, count = match.group(1), int(match.group(2))
if op in "IHS":
target_start += count
elif op in "DN":
g.end -= count
elif op == "P":
continue
else:
parts.append([g.end - count + 1, g.end, \
target_start, target_start + count - 1])
g.end -= count
target_start += count
g.update_attributes()
print(g)
parent = g.attributes["Name"][0]
g.type = "match_part"
g.attributes.clear()
for part in parts:
g.start, g.end = part[0], part[1]
g.score, g.strand, g.phase = ".", g.strand, "."
if re.match("EST", g.type):
target_list = [parent, part[2], part[3], g.strand]
else:
target_list = [parent, part[2], part[3]]
target = " ".join(str(x) for x in target_list)
g.attributes["Parent"] = [parent]
g.attributes["Target"] = [target]
g.update_attributes()
print(g)
|
[
"def",
"gapsplit",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"gapsplit",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"gffile",
",",
"=",
"args",
"gff",
"=",
"Gff",
"(",
"gffile",
")",
"for",
"g",
"in",
"gff",
":",
"if",
"re",
".",
"match",
"(",
"\"EST_match\"",
",",
"g",
".",
"type",
")",
":",
"\"\"\"\n hacky implementation:\n since the standard urlparse.parse_qsl() replaces all \"+\" symbols with spaces\n we will write a regex to check either for a \"-\" or a \" \" (space)\n \"\"\"",
"match",
"=",
"re",
".",
"search",
"(",
"r'\\S+ (\\d+) \\d+ ([\\s{1}\\-])'",
",",
"g",
".",
"attributes",
"[",
"\"Target\"",
"]",
"[",
"0",
"]",
")",
"if",
"match",
".",
"group",
"(",
"2",
")",
"==",
"\"-\"",
":",
"strand",
"=",
"match",
".",
"group",
"(",
"2",
")",
"else",
":",
"strand",
"=",
"\"+\"",
"g",
".",
"attributes",
"[",
"\"Target\"",
"]",
"[",
"0",
"]",
"=",
"\" \"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"g",
".",
"attributes",
"[",
"\"Target\"",
"]",
"[",
"0",
"]",
".",
"rstrip",
"(",
")",
",",
"strand",
"]",
")",
"if",
"g",
".",
"strand",
"==",
"\"?\"",
":",
"g",
".",
"strand",
"=",
"strand",
"else",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'\\S+ (\\d+) \\d+'",
",",
"g",
".",
"attributes",
"[",
"\"Target\"",
"]",
"[",
"0",
"]",
")",
"target_start",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"re_cigar",
"=",
"re",
".",
"compile",
"(",
"r'(\\D+)(\\d+)'",
")",
"cigar",
"=",
"g",
".",
"attributes",
"[",
"\"Gap\"",
"]",
"[",
"0",
"]",
".",
"split",
"(",
"\" \"",
")",
"g",
".",
"attributes",
"[",
"\"Gap\"",
"]",
"=",
"None",
"parts",
"=",
"[",
"]",
"if",
"g",
".",
"strand",
"==",
"\"+\"",
":",
"for",
"event",
"in",
"cigar",
":",
"match",
"=",
"re_cigar",
".",
"match",
"(",
"event",
")",
"op",
",",
"count",
"=",
"match",
".",
"group",
"(",
"1",
")",
",",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"if",
"op",
"in",
"\"IHS\"",
":",
"target_start",
"+=",
"count",
"elif",
"op",
"in",
"\"DN\"",
":",
"g",
".",
"start",
"+=",
"count",
"elif",
"op",
"==",
"\"P\"",
":",
"continue",
"else",
":",
"parts",
".",
"append",
"(",
"[",
"g",
".",
"start",
",",
"g",
".",
"start",
"+",
"count",
"-",
"1",
",",
"target_start",
",",
"target_start",
"+",
"count",
"-",
"1",
"]",
")",
"g",
".",
"start",
"+=",
"count",
"target_start",
"+=",
"count",
"else",
":",
"for",
"event",
"in",
"cigar",
":",
"match",
"=",
"re_cigar",
".",
"match",
"(",
"event",
")",
"op",
",",
"count",
"=",
"match",
".",
"group",
"(",
"1",
")",
",",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"if",
"op",
"in",
"\"IHS\"",
":",
"target_start",
"+=",
"count",
"elif",
"op",
"in",
"\"DN\"",
":",
"g",
".",
"end",
"-=",
"count",
"elif",
"op",
"==",
"\"P\"",
":",
"continue",
"else",
":",
"parts",
".",
"append",
"(",
"[",
"g",
".",
"end",
"-",
"count",
"+",
"1",
",",
"g",
".",
"end",
",",
"target_start",
",",
"target_start",
"+",
"count",
"-",
"1",
"]",
")",
"g",
".",
"end",
"-=",
"count",
"target_start",
"+=",
"count",
"g",
".",
"update_attributes",
"(",
")",
"print",
"(",
"g",
")",
"parent",
"=",
"g",
".",
"attributes",
"[",
"\"Name\"",
"]",
"[",
"0",
"]",
"g",
".",
"type",
"=",
"\"match_part\"",
"g",
".",
"attributes",
".",
"clear",
"(",
")",
"for",
"part",
"in",
"parts",
":",
"g",
".",
"start",
",",
"g",
".",
"end",
"=",
"part",
"[",
"0",
"]",
",",
"part",
"[",
"1",
"]",
"g",
".",
"score",
",",
"g",
".",
"strand",
",",
"g",
".",
"phase",
"=",
"\".\"",
",",
"g",
".",
"strand",
",",
"\".\"",
"if",
"re",
".",
"match",
"(",
"\"EST\"",
",",
"g",
".",
"type",
")",
":",
"target_list",
"=",
"[",
"parent",
",",
"part",
"[",
"2",
"]",
",",
"part",
"[",
"3",
"]",
",",
"g",
".",
"strand",
"]",
"else",
":",
"target_list",
"=",
"[",
"parent",
",",
"part",
"[",
"2",
"]",
",",
"part",
"[",
"3",
"]",
"]",
"target",
"=",
"\" \"",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"target_list",
")",
"g",
".",
"attributes",
"[",
"\"Parent\"",
"]",
"=",
"[",
"parent",
"]",
"g",
".",
"attributes",
"[",
"\"Target\"",
"]",
"=",
"[",
"target",
"]",
"g",
".",
"update_attributes",
"(",
")",
"print",
"(",
"g",
")"
] | 33.298969
| 17.360825
|
def process(self, salt_data, token, opts):
'''
Process events and publish data
'''
parts = salt_data['tag'].split('/')
if len(parts) < 2:
return
# TBD: Simplify these conditional expressions
if parts[1] == 'job':
if parts[3] == 'new':
self.process_new_job_event(salt_data)
if salt_data['data']['fun'] == 'grains.items':
self.minions = {}
elif parts[3] == 'ret':
self.process_ret_job_event(salt_data)
if salt_data['data']['fun'] == 'grains.items':
self.process_minion_update(salt_data)
if parts[1] == 'key':
self.process_key_event(salt_data)
if parts[1] == 'presence':
self.process_presence_events(salt_data, token, opts)
|
[
"def",
"process",
"(",
"self",
",",
"salt_data",
",",
"token",
",",
"opts",
")",
":",
"parts",
"=",
"salt_data",
"[",
"'tag'",
"]",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"parts",
")",
"<",
"2",
":",
"return",
"# TBD: Simplify these conditional expressions",
"if",
"parts",
"[",
"1",
"]",
"==",
"'job'",
":",
"if",
"parts",
"[",
"3",
"]",
"==",
"'new'",
":",
"self",
".",
"process_new_job_event",
"(",
"salt_data",
")",
"if",
"salt_data",
"[",
"'data'",
"]",
"[",
"'fun'",
"]",
"==",
"'grains.items'",
":",
"self",
".",
"minions",
"=",
"{",
"}",
"elif",
"parts",
"[",
"3",
"]",
"==",
"'ret'",
":",
"self",
".",
"process_ret_job_event",
"(",
"salt_data",
")",
"if",
"salt_data",
"[",
"'data'",
"]",
"[",
"'fun'",
"]",
"==",
"'grains.items'",
":",
"self",
".",
"process_minion_update",
"(",
"salt_data",
")",
"if",
"parts",
"[",
"1",
"]",
"==",
"'key'",
":",
"self",
".",
"process_key_event",
"(",
"salt_data",
")",
"if",
"parts",
"[",
"1",
"]",
"==",
"'presence'",
":",
"self",
".",
"process_presence_events",
"(",
"salt_data",
",",
"token",
",",
"opts",
")"
] | 38
| 14.181818
|
def _set_Cpu(self, v, load=False):
"""
Setter method for Cpu, mapped from YANG variable /rbridge_id/threshold_monitor/Cpu (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_Cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_Cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=Cpu.Cpu, is_container='container', presence=False, yang_name="Cpu", rest_name="Cpu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure settings for component:CPU', u'cli-compact-syntax': None, u'callpoint': u'CpuMonitor', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """Cpu must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=Cpu.Cpu, is_container='container', presence=False, yang_name="Cpu", rest_name="Cpu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure settings for component:CPU', u'cli-compact-syntax': None, u'callpoint': u'CpuMonitor', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""",
})
self.__Cpu = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_Cpu",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"Cpu",
".",
"Cpu",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"Cpu\"",
",",
"rest_name",
"=",
"\"Cpu\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Configure settings for component:CPU'",
",",
"u'cli-compact-syntax'",
":",
"None",
",",
"u'callpoint'",
":",
"u'CpuMonitor'",
",",
"u'cli-incomplete-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-threshold-monitor'",
",",
"defining_module",
"=",
"'brocade-threshold-monitor'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"Cpu must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=Cpu.Cpu, is_container='container', presence=False, yang_name=\"Cpu\", rest_name=\"Cpu\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure settings for component:CPU', u'cli-compact-syntax': None, u'callpoint': u'CpuMonitor', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__Cpu",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | 79.363636
| 37.5
|
def increase_writes_in_percent(
current_provisioning, percent, max_provisioned_writes,
consumed_write_units_percent, log_tag):
""" Increase the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log
"""
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
percent = float(percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
increase = int(
math.ceil(consumption_based_current_provisioning*(percent/100)))
updated_provisioning = consumption_based_current_provisioning + increase
else:
increase = int(math.ceil(current_provisioning*(float(percent)/100)))
updated_provisioning = current_provisioning + increase
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes max limit: {1}'.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
'{0} - Write provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning
|
[
"def",
"increase_writes_in_percent",
"(",
"current_provisioning",
",",
"percent",
",",
"max_provisioned_writes",
",",
"consumed_write_units_percent",
",",
"log_tag",
")",
":",
"current_provisioning",
"=",
"float",
"(",
"current_provisioning",
")",
"consumed_write_units_percent",
"=",
"float",
"(",
"consumed_write_units_percent",
")",
"percent",
"=",
"float",
"(",
"percent",
")",
"consumption_based_current_provisioning",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"current_provisioning",
"*",
"(",
"consumed_write_units_percent",
"/",
"100",
")",
")",
")",
"if",
"consumption_based_current_provisioning",
">",
"current_provisioning",
":",
"increase",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"consumption_based_current_provisioning",
"*",
"(",
"percent",
"/",
"100",
")",
")",
")",
"updated_provisioning",
"=",
"consumption_based_current_provisioning",
"+",
"increase",
"else",
":",
"increase",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"current_provisioning",
"*",
"(",
"float",
"(",
"percent",
")",
"/",
"100",
")",
")",
")",
"updated_provisioning",
"=",
"current_provisioning",
"+",
"increase",
"if",
"max_provisioned_writes",
">",
"0",
":",
"if",
"updated_provisioning",
">",
"max_provisioned_writes",
":",
"logger",
".",
"info",
"(",
"'{0} - Reached provisioned writes max limit: {1}'",
".",
"format",
"(",
"log_tag",
",",
"max_provisioned_writes",
")",
")",
"return",
"max_provisioned_writes",
"logger",
".",
"debug",
"(",
"'{0} - Write provisioning will be increased to {1:d} units'",
".",
"format",
"(",
"log_tag",
",",
"int",
"(",
"updated_provisioning",
")",
")",
")",
"return",
"updated_provisioning"
] | 39.489362
| 20.361702
|
def setup_symbol_list(self, filter_text, current_path):
"""Setup list widget content for symbol list display."""
# Get optional symbol name
filter_text, symbol_text = filter_text.split('@')
# Fetch the Outline explorer data, get the icons and values
oedata = self.get_symbol_list()
icons = get_python_symbol_icons(oedata)
# The list of paths here is needed in order to have the same
# point of measurement for the list widget size as in the file list
# See issue 4648
paths = self.paths
# Update list size
self.fix_size(paths)
symbol_list = process_python_symbol_data(oedata)
line_fold_token = [(item[0], item[2], item[3]) for item in symbol_list]
choices = [item[1] for item in symbol_list]
scores = get_search_scores(symbol_text, choices, template="<b>{0}</b>")
# Build the text that will appear on the list widget
results = []
lines = []
self.filtered_symbol_lines = []
for index, score in enumerate(scores):
text, rich_text, score_value = score
line, fold_level, token = line_fold_token[index]
lines.append(text)
if score_value != -1:
results.append((score_value, line, text, rich_text,
fold_level, icons[index], token))
template = '{{0}}<span style="color:{0}">{{1}}</span>'.format(
ima.MAIN_FG_COLOR)
for (score, line, text, rich_text, fold_level, icon,
token) in sorted(results):
fold_space = ' '*(fold_level)
line_number = line + 1
self.filtered_symbol_lines.append(line_number)
textline = template.format(fold_space, rich_text)
item = QListWidgetItem(icon, textline)
item.setSizeHint(QSize(0, 16))
self.list.addItem(item)
# To adjust the delegate layout for KDE themes
self.list.files_list = False
# Select edit line when using symbol search initially.
# See issue 5661
self.edit.setFocus()
|
[
"def",
"setup_symbol_list",
"(",
"self",
",",
"filter_text",
",",
"current_path",
")",
":",
"# Get optional symbol name",
"filter_text",
",",
"symbol_text",
"=",
"filter_text",
".",
"split",
"(",
"'@'",
")",
"# Fetch the Outline explorer data, get the icons and values",
"oedata",
"=",
"self",
".",
"get_symbol_list",
"(",
")",
"icons",
"=",
"get_python_symbol_icons",
"(",
"oedata",
")",
"# The list of paths here is needed in order to have the same",
"# point of measurement for the list widget size as in the file list",
"# See issue 4648",
"paths",
"=",
"self",
".",
"paths",
"# Update list size",
"self",
".",
"fix_size",
"(",
"paths",
")",
"symbol_list",
"=",
"process_python_symbol_data",
"(",
"oedata",
")",
"line_fold_token",
"=",
"[",
"(",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"2",
"]",
",",
"item",
"[",
"3",
"]",
")",
"for",
"item",
"in",
"symbol_list",
"]",
"choices",
"=",
"[",
"item",
"[",
"1",
"]",
"for",
"item",
"in",
"symbol_list",
"]",
"scores",
"=",
"get_search_scores",
"(",
"symbol_text",
",",
"choices",
",",
"template",
"=",
"\"<b>{0}</b>\"",
")",
"# Build the text that will appear on the list widget",
"results",
"=",
"[",
"]",
"lines",
"=",
"[",
"]",
"self",
".",
"filtered_symbol_lines",
"=",
"[",
"]",
"for",
"index",
",",
"score",
"in",
"enumerate",
"(",
"scores",
")",
":",
"text",
",",
"rich_text",
",",
"score_value",
"=",
"score",
"line",
",",
"fold_level",
",",
"token",
"=",
"line_fold_token",
"[",
"index",
"]",
"lines",
".",
"append",
"(",
"text",
")",
"if",
"score_value",
"!=",
"-",
"1",
":",
"results",
".",
"append",
"(",
"(",
"score_value",
",",
"line",
",",
"text",
",",
"rich_text",
",",
"fold_level",
",",
"icons",
"[",
"index",
"]",
",",
"token",
")",
")",
"template",
"=",
"'{{0}}<span style=\"color:{0}\">{{1}}</span>'",
".",
"format",
"(",
"ima",
".",
"MAIN_FG_COLOR",
")",
"for",
"(",
"score",
",",
"line",
",",
"text",
",",
"rich_text",
",",
"fold_level",
",",
"icon",
",",
"token",
")",
"in",
"sorted",
"(",
"results",
")",
":",
"fold_space",
"=",
"' '",
"*",
"(",
"fold_level",
")",
"line_number",
"=",
"line",
"+",
"1",
"self",
".",
"filtered_symbol_lines",
".",
"append",
"(",
"line_number",
")",
"textline",
"=",
"template",
".",
"format",
"(",
"fold_space",
",",
"rich_text",
")",
"item",
"=",
"QListWidgetItem",
"(",
"icon",
",",
"textline",
")",
"item",
".",
"setSizeHint",
"(",
"QSize",
"(",
"0",
",",
"16",
")",
")",
"self",
".",
"list",
".",
"addItem",
"(",
"item",
")",
"# To adjust the delegate layout for KDE themes",
"self",
".",
"list",
".",
"files_list",
"=",
"False",
"# Select edit line when using symbol search initially.",
"# See issue 5661",
"self",
".",
"edit",
".",
"setFocus",
"(",
")"
] | 40.269231
| 18.769231
|
def repository_data(self, repo):
"""
Grap data packages
"""
sum_pkgs, size, unsize, last_upd = 0, [], [], ""
for line in (Utils().read_file(
self.meta.lib_path + repo + "_repo/PACKAGES.TXT").splitlines()):
if line.startswith("PACKAGES.TXT;"):
last_upd = line[14:].strip()
if line.startswith("PACKAGE NAME:"):
sum_pkgs += 1
if line.startswith("PACKAGE SIZE (compressed): "):
size.append(line[28:-2].strip())
if line.startswith("PACKAGE SIZE (uncompressed): "):
unsize.append(line[30:-2].strip())
if repo in ["salix", "slackl"]:
log = Utils().read_file(
self.meta.log_path + "{0}/ChangeLog.txt".format(repo))
last_upd = log.split("\n", 1)[0]
return [sum_pkgs, size, unsize, last_upd]
|
[
"def",
"repository_data",
"(",
"self",
",",
"repo",
")",
":",
"sum_pkgs",
",",
"size",
",",
"unsize",
",",
"last_upd",
"=",
"0",
",",
"[",
"]",
",",
"[",
"]",
",",
"\"\"",
"for",
"line",
"in",
"(",
"Utils",
"(",
")",
".",
"read_file",
"(",
"self",
".",
"meta",
".",
"lib_path",
"+",
"repo",
"+",
"\"_repo/PACKAGES.TXT\"",
")",
".",
"splitlines",
"(",
")",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"PACKAGES.TXT;\"",
")",
":",
"last_upd",
"=",
"line",
"[",
"14",
":",
"]",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\"PACKAGE NAME:\"",
")",
":",
"sum_pkgs",
"+=",
"1",
"if",
"line",
".",
"startswith",
"(",
"\"PACKAGE SIZE (compressed): \"",
")",
":",
"size",
".",
"append",
"(",
"line",
"[",
"28",
":",
"-",
"2",
"]",
".",
"strip",
"(",
")",
")",
"if",
"line",
".",
"startswith",
"(",
"\"PACKAGE SIZE (uncompressed): \"",
")",
":",
"unsize",
".",
"append",
"(",
"line",
"[",
"30",
":",
"-",
"2",
"]",
".",
"strip",
"(",
")",
")",
"if",
"repo",
"in",
"[",
"\"salix\"",
",",
"\"slackl\"",
"]",
":",
"log",
"=",
"Utils",
"(",
")",
".",
"read_file",
"(",
"self",
".",
"meta",
".",
"log_path",
"+",
"\"{0}/ChangeLog.txt\"",
".",
"format",
"(",
"repo",
")",
")",
"last_upd",
"=",
"log",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"[",
"0",
"]",
"return",
"[",
"sum_pkgs",
",",
"size",
",",
"unsize",
",",
"last_upd",
"]"
] | 44.4
| 11.2
|
def create_subnet(network, cidr, name=None,
ip_version=4, profile=None):
'''
Creates a new subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.create_subnet network-name 192.168.1.0/24
:param network: Network ID or name this subnet belongs to
:param cidr: CIDR of subnet to create (Ex. '192.168.1.0/24')
:param name: Name of the subnet to create (Optional)
:param ip_version: Version to use, default is 4(IPv4) (Optional)
:param profile: Profile to build on (Optional)
:return: Created subnet information
'''
conn = _auth(profile)
return conn.create_subnet(network, cidr, name, ip_version)
|
[
"def",
"create_subnet",
"(",
"network",
",",
"cidr",
",",
"name",
"=",
"None",
",",
"ip_version",
"=",
"4",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
")",
"return",
"conn",
".",
"create_subnet",
"(",
"network",
",",
"cidr",
",",
"name",
",",
"ip_version",
")"
] | 32.9
| 22.7
|
def resume(self):
"""Resume process execution."""
# safety measure in case the current process has been killed in
# meantime and the kernel reused its PID
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
# windows
if hasattr(self._platform_impl, "resume_process"):
self._platform_impl.resume_process()
else:
# posix
self.send_signal(signal.SIGCONT)
|
[
"def",
"resume",
"(",
"self",
")",
":",
"# safety measure in case the current process has been killed in",
"# meantime and the kernel reused its PID",
"if",
"not",
"self",
".",
"is_running",
"(",
")",
":",
"name",
"=",
"self",
".",
"_platform_impl",
".",
"_process_name",
"raise",
"NoSuchProcess",
"(",
"self",
".",
"pid",
",",
"name",
")",
"# windows",
"if",
"hasattr",
"(",
"self",
".",
"_platform_impl",
",",
"\"resume_process\"",
")",
":",
"self",
".",
"_platform_impl",
".",
"resume_process",
"(",
")",
"else",
":",
"# posix",
"self",
".",
"send_signal",
"(",
"signal",
".",
"SIGCONT",
")"
] | 38.923077
| 14.538462
|
def initialize(self, **kwargs):
"""!
@brief Calculates initial centers using K-Means++ method.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'return_index').
<b>Keyword Args:</b><br>
- return_index (bool): If True then returns indexes of points from input data instead of points itself.
@return (list) List of initialized initial centers.
If argument 'return_index' is False then returns list of points.
If argument 'return_index' is True then returns list of indexes.
"""
return_index = kwargs.get('return_index', False)
index_point = self.__get_initial_center(True)
centers = [index_point]
self.__free_indexes.remove(index_point)
# For each next center
for _ in range(1, self.__amount):
index_point = self.__get_next_center(centers, True)
centers.append(index_point)
self.__free_indexes.remove(index_point)
if not return_index:
centers = [self.__data[index] for index in centers]
return centers
|
[
"def",
"initialize",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return_index",
"=",
"kwargs",
".",
"get",
"(",
"'return_index'",
",",
"False",
")",
"index_point",
"=",
"self",
".",
"__get_initial_center",
"(",
"True",
")",
"centers",
"=",
"[",
"index_point",
"]",
"self",
".",
"__free_indexes",
".",
"remove",
"(",
"index_point",
")",
"# For each next center",
"for",
"_",
"in",
"range",
"(",
"1",
",",
"self",
".",
"__amount",
")",
":",
"index_point",
"=",
"self",
".",
"__get_next_center",
"(",
"centers",
",",
"True",
")",
"centers",
".",
"append",
"(",
"index_point",
")",
"self",
".",
"__free_indexes",
".",
"remove",
"(",
"index_point",
")",
"if",
"not",
"return_index",
":",
"centers",
"=",
"[",
"self",
".",
"__data",
"[",
"index",
"]",
"for",
"index",
"in",
"centers",
"]",
"return",
"centers"
] | 36
| 24.870968
|
def get_content_unscoped_package(self, feed_id, package_name, package_version, **kwargs):
"""GetContentUnscopedPackage.
[Preview API] Get an unscoped npm package.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
:rtype: object
"""
route_values = {}
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='75caa482-cb1e-47cd-9f2c-c048a4b7a43e',
version='5.0-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
|
[
"def",
"get_content_unscoped_package",
"(",
"self",
",",
"feed_id",
",",
"package_name",
",",
"package_version",
",",
"*",
"*",
"kwargs",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"feed_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'feedId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'feed_id'",
",",
"feed_id",
",",
"'str'",
")",
"if",
"package_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'packageName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'package_name'",
",",
"package_name",
",",
"'str'",
")",
"if",
"package_version",
"is",
"not",
"None",
":",
"route_values",
"[",
"'packageVersion'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'package_version'",
",",
"package_version",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'75caa482-cb1e-47cd-9f2c-c048a4b7a43e'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"accept_media_type",
"=",
"'application/octet-stream'",
")",
"if",
"\"callback\"",
"in",
"kwargs",
":",
"callback",
"=",
"kwargs",
"[",
"\"callback\"",
"]",
"else",
":",
"callback",
"=",
"None",
"return",
"self",
".",
"_client",
".",
"stream_download",
"(",
"response",
",",
"callback",
"=",
"callback",
")"
] | 51.6
| 20.48
|
def montage(data, ref_chan=None, ref_to_avg=False, bipolar=None,
method='average'):
"""Apply linear transformation to the channels.
Parameters
----------
data : instance of DataRaw
the data to filter
ref_chan : list of str
list of channels used as reference
ref_to_avg : bool
if re-reference to average or not
bipolar : float
distance in mm to consider two channels as neighbors and then compute
the bipolar montage between them.
method : str
'average' or 'regression'. 'average' takes the
average across the channels selected as reference (it can be all) and
subtract it from each channel. 'regression' keeps the residuals after
regressing out the mean across channels.
Returns
-------
filtered_data : instance of DataRaw
filtered data
Notes
-----
If you don't change anything, it returns the same instance of data.
"""
if ref_to_avg and ref_chan is not None:
raise TypeError('You cannot specify reference to the average and '
'the channels to use as reference')
if ref_chan is not None:
if (not isinstance(ref_chan, (list, tuple)) or
not all(isinstance(x, str) for x in ref_chan)):
raise TypeError('chan should be a list of strings')
if ref_chan is None:
ref_chan = [] # TODO: check bool for ref_chan
if bipolar:
if not data.attr['chan']:
raise ValueError('Data should have Chan information in attr')
_assert_equal_channels(data.axis['chan'])
chan_in_data = data.axis['chan'][0]
chan = data.attr['chan']
chan = chan(lambda x: x.label in chan_in_data)
chan, trans = create_bipolar_chan(chan, bipolar)
data.attr['chan'] = chan
if ref_to_avg or ref_chan or bipolar:
mdata = data._copy()
idx_chan = mdata.index_of('chan')
for i in range(mdata.number_of('trial')):
if ref_to_avg or ref_chan:
if ref_to_avg:
ref_chan = data.axis['chan'][i]
ref_data = data(trial=i, chan=ref_chan)
if method == 'average':
mdata.data[i] = (data(trial=i) - mean(ref_data, axis=idx_chan))
elif method == 'regression':
mdata.data[i] = compute_average_regress(data(trial=i), idx_chan)
elif bipolar:
if not data.index_of('chan') == 0:
raise ValueError('For matrix multiplication to work, '
'the first dimension should be chan')
mdata.data[i] = dot(trans, data(trial=i))
mdata.axis['chan'][i] = asarray(chan.return_label(),
dtype='U')
else:
mdata = data
return mdata
|
[
"def",
"montage",
"(",
"data",
",",
"ref_chan",
"=",
"None",
",",
"ref_to_avg",
"=",
"False",
",",
"bipolar",
"=",
"None",
",",
"method",
"=",
"'average'",
")",
":",
"if",
"ref_to_avg",
"and",
"ref_chan",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"'You cannot specify reference to the average and '",
"'the channels to use as reference'",
")",
"if",
"ref_chan",
"is",
"not",
"None",
":",
"if",
"(",
"not",
"isinstance",
"(",
"ref_chan",
",",
"(",
"list",
",",
"tuple",
")",
")",
"or",
"not",
"all",
"(",
"isinstance",
"(",
"x",
",",
"str",
")",
"for",
"x",
"in",
"ref_chan",
")",
")",
":",
"raise",
"TypeError",
"(",
"'chan should be a list of strings'",
")",
"if",
"ref_chan",
"is",
"None",
":",
"ref_chan",
"=",
"[",
"]",
"# TODO: check bool for ref_chan",
"if",
"bipolar",
":",
"if",
"not",
"data",
".",
"attr",
"[",
"'chan'",
"]",
":",
"raise",
"ValueError",
"(",
"'Data should have Chan information in attr'",
")",
"_assert_equal_channels",
"(",
"data",
".",
"axis",
"[",
"'chan'",
"]",
")",
"chan_in_data",
"=",
"data",
".",
"axis",
"[",
"'chan'",
"]",
"[",
"0",
"]",
"chan",
"=",
"data",
".",
"attr",
"[",
"'chan'",
"]",
"chan",
"=",
"chan",
"(",
"lambda",
"x",
":",
"x",
".",
"label",
"in",
"chan_in_data",
")",
"chan",
",",
"trans",
"=",
"create_bipolar_chan",
"(",
"chan",
",",
"bipolar",
")",
"data",
".",
"attr",
"[",
"'chan'",
"]",
"=",
"chan",
"if",
"ref_to_avg",
"or",
"ref_chan",
"or",
"bipolar",
":",
"mdata",
"=",
"data",
".",
"_copy",
"(",
")",
"idx_chan",
"=",
"mdata",
".",
"index_of",
"(",
"'chan'",
")",
"for",
"i",
"in",
"range",
"(",
"mdata",
".",
"number_of",
"(",
"'trial'",
")",
")",
":",
"if",
"ref_to_avg",
"or",
"ref_chan",
":",
"if",
"ref_to_avg",
":",
"ref_chan",
"=",
"data",
".",
"axis",
"[",
"'chan'",
"]",
"[",
"i",
"]",
"ref_data",
"=",
"data",
"(",
"trial",
"=",
"i",
",",
"chan",
"=",
"ref_chan",
")",
"if",
"method",
"==",
"'average'",
":",
"mdata",
".",
"data",
"[",
"i",
"]",
"=",
"(",
"data",
"(",
"trial",
"=",
"i",
")",
"-",
"mean",
"(",
"ref_data",
",",
"axis",
"=",
"idx_chan",
")",
")",
"elif",
"method",
"==",
"'regression'",
":",
"mdata",
".",
"data",
"[",
"i",
"]",
"=",
"compute_average_regress",
"(",
"data",
"(",
"trial",
"=",
"i",
")",
",",
"idx_chan",
")",
"elif",
"bipolar",
":",
"if",
"not",
"data",
".",
"index_of",
"(",
"'chan'",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'For matrix multiplication to work, '",
"'the first dimension should be chan'",
")",
"mdata",
".",
"data",
"[",
"i",
"]",
"=",
"dot",
"(",
"trans",
",",
"data",
"(",
"trial",
"=",
"i",
")",
")",
"mdata",
".",
"axis",
"[",
"'chan'",
"]",
"[",
"i",
"]",
"=",
"asarray",
"(",
"chan",
".",
"return_label",
"(",
")",
",",
"dtype",
"=",
"'U'",
")",
"else",
":",
"mdata",
"=",
"data",
"return",
"mdata"
] | 34.5
| 21.47561
|
def calculate_integral(self, T1, T2, method):
r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Implements the
analytical integrals of all available methods except for tabular data,
the case of multiple coefficient sets needed to encompass the temperature
range of any of the ZABRANSKY methods, and the CSP methods using the
vapor phase properties.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
'''
if method == ZABRANSKY_SPLINE:
return self.Zabransky_spline.calculate_integral(T1, T2)
elif method == ZABRANSKY_SPLINE_C:
return self.Zabransky_spline_iso.calculate_integral(T1, T2)
elif method == ZABRANSKY_SPLINE_SAT:
return self.Zabransky_spline_sat.calculate_integral(T1, T2)
elif method == ZABRANSKY_QUASIPOLYNOMIAL:
return self.Zabransky_quasipolynomial.calculate_integral(T1, T2)
elif method == ZABRANSKY_QUASIPOLYNOMIAL_C:
return self.Zabransky_quasipolynomial_iso.calculate_integral(T1, T2)
elif method == ZABRANSKY_QUASIPOLYNOMIAL_SAT:
return self.Zabransky_quasipolynomial_sat.calculate_integral(T1, T2)
elif method == POLING_CONST:
return (T2 - T1)*self.POLING_constant
elif method == CRCSTD:
return (T2 - T1)*self.CRCSTD_constant
elif method == DADGOSTAR_SHAW:
dH = (Dadgostar_Shaw_integral(T2, self.similarity_variable)
- Dadgostar_Shaw_integral(T1, self.similarity_variable))
return property_mass_to_molar(dH, self.MW)
elif method in self.tabular_data or method == COOLPROP or method in [ROWLINSON_POLING, ROWLINSON_BONDI]:
return float(quad(self.calculate, T1, T2, args=(method))[0])
else:
raise Exception('Method not valid')
|
[
"def",
"calculate_integral",
"(",
"self",
",",
"T1",
",",
"T2",
",",
"method",
")",
":",
"if",
"method",
"==",
"ZABRANSKY_SPLINE",
":",
"return",
"self",
".",
"Zabransky_spline",
".",
"calculate_integral",
"(",
"T1",
",",
"T2",
")",
"elif",
"method",
"==",
"ZABRANSKY_SPLINE_C",
":",
"return",
"self",
".",
"Zabransky_spline_iso",
".",
"calculate_integral",
"(",
"T1",
",",
"T2",
")",
"elif",
"method",
"==",
"ZABRANSKY_SPLINE_SAT",
":",
"return",
"self",
".",
"Zabransky_spline_sat",
".",
"calculate_integral",
"(",
"T1",
",",
"T2",
")",
"elif",
"method",
"==",
"ZABRANSKY_QUASIPOLYNOMIAL",
":",
"return",
"self",
".",
"Zabransky_quasipolynomial",
".",
"calculate_integral",
"(",
"T1",
",",
"T2",
")",
"elif",
"method",
"==",
"ZABRANSKY_QUASIPOLYNOMIAL_C",
":",
"return",
"self",
".",
"Zabransky_quasipolynomial_iso",
".",
"calculate_integral",
"(",
"T1",
",",
"T2",
")",
"elif",
"method",
"==",
"ZABRANSKY_QUASIPOLYNOMIAL_SAT",
":",
"return",
"self",
".",
"Zabransky_quasipolynomial_sat",
".",
"calculate_integral",
"(",
"T1",
",",
"T2",
")",
"elif",
"method",
"==",
"POLING_CONST",
":",
"return",
"(",
"T2",
"-",
"T1",
")",
"*",
"self",
".",
"POLING_constant",
"elif",
"method",
"==",
"CRCSTD",
":",
"return",
"(",
"T2",
"-",
"T1",
")",
"*",
"self",
".",
"CRCSTD_constant",
"elif",
"method",
"==",
"DADGOSTAR_SHAW",
":",
"dH",
"=",
"(",
"Dadgostar_Shaw_integral",
"(",
"T2",
",",
"self",
".",
"similarity_variable",
")",
"-",
"Dadgostar_Shaw_integral",
"(",
"T1",
",",
"self",
".",
"similarity_variable",
")",
")",
"return",
"property_mass_to_molar",
"(",
"dH",
",",
"self",
".",
"MW",
")",
"elif",
"method",
"in",
"self",
".",
"tabular_data",
"or",
"method",
"==",
"COOLPROP",
"or",
"method",
"in",
"[",
"ROWLINSON_POLING",
",",
"ROWLINSON_BONDI",
"]",
":",
"return",
"float",
"(",
"quad",
"(",
"self",
".",
"calculate",
",",
"T1",
",",
"T2",
",",
"args",
"=",
"(",
"method",
")",
")",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Method not valid'",
")"
] | 46.851064
| 21.914894
|
def generate_submission_id():
"""
Executor for `globus task generate-submission-id`
"""
client = get_client()
res = client.get_submission_id()
formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="value")
|
[
"def",
"generate_submission_id",
"(",
")",
":",
"client",
"=",
"get_client",
"(",
")",
"res",
"=",
"client",
".",
"get_submission_id",
"(",
")",
"formatted_print",
"(",
"res",
",",
"text_format",
"=",
"FORMAT_TEXT_RAW",
",",
"response_key",
"=",
"\"value\"",
")"
] | 29
| 14.75
|
def printUniqueTFAM(tfam, samples, prefix):
"""Prints a new TFAM with only unique samples.
:param tfam: a representation of a TFAM file.
:param samples: the position of the samples
:param prefix: the prefix of the output file name
:type tfam: list
:type samples: dict
:type prefix: str
"""
fileName = prefix + ".unique_samples.tfam"
try:
with open(fileName, "w") as outputFile:
for i in sorted(samples.values()):
print >>outputFile, "\t".join(tfam[i])
except IOError:
msg = "%(fileName)s: no such file"
raise ProgramError(msg)
|
[
"def",
"printUniqueTFAM",
"(",
"tfam",
",",
"samples",
",",
"prefix",
")",
":",
"fileName",
"=",
"prefix",
"+",
"\".unique_samples.tfam\"",
"try",
":",
"with",
"open",
"(",
"fileName",
",",
"\"w\"",
")",
"as",
"outputFile",
":",
"for",
"i",
"in",
"sorted",
"(",
"samples",
".",
"values",
"(",
")",
")",
":",
"print",
">>",
"outputFile",
",",
"\"\\t\"",
".",
"join",
"(",
"tfam",
"[",
"i",
"]",
")",
"except",
"IOError",
":",
"msg",
"=",
"\"%(fileName)s: no such file\"",
"raise",
"ProgramError",
"(",
"msg",
")"
] | 30.3
| 15.25
|
def to_list(self, csv_file):
""" 串接每日資料 舊→新
:param csv csv_file: csv files
:rtype: list
"""
tolist = []
for i in csv_file:
i = [value.strip().replace(',', '') for value in i]
try:
for value in (1, 2, 3, 4, 5, 6, 8):
i[value] = float(i[value])
except (IndexError, ValueError):
pass
tolist.append(i)
if self._twse:
if tolist:
_stock_info = tolist[0][0].split(' ')[1].strip()
self.__info = (_stock_info[:4],
_stock_info[4:].decode('utf-8'))
self.__raw_rows_name = tolist[1]
return tuple(tolist[2:])
return tuple([])
else:
if len(tolist) > 6:
self.__raw_rows_name = tolist[4]
self.__info = (self.__get_no, OTCNo().all_stock[self.__get_no])
if len(tolist[5:]) > 1:
return tuple(tolist[5:-1])
return tuple([])
|
[
"def",
"to_list",
"(",
"self",
",",
"csv_file",
")",
":",
"tolist",
"=",
"[",
"]",
"for",
"i",
"in",
"csv_file",
":",
"i",
"=",
"[",
"value",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"','",
",",
"''",
")",
"for",
"value",
"in",
"i",
"]",
"try",
":",
"for",
"value",
"in",
"(",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
",",
"8",
")",
":",
"i",
"[",
"value",
"]",
"=",
"float",
"(",
"i",
"[",
"value",
"]",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"pass",
"tolist",
".",
"append",
"(",
"i",
")",
"if",
"self",
".",
"_twse",
":",
"if",
"tolist",
":",
"_stock_info",
"=",
"tolist",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"self",
".",
"__info",
"=",
"(",
"_stock_info",
"[",
":",
"4",
"]",
",",
"_stock_info",
"[",
"4",
":",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"self",
".",
"__raw_rows_name",
"=",
"tolist",
"[",
"1",
"]",
"return",
"tuple",
"(",
"tolist",
"[",
"2",
":",
"]",
")",
"return",
"tuple",
"(",
"[",
"]",
")",
"else",
":",
"if",
"len",
"(",
"tolist",
")",
">",
"6",
":",
"self",
".",
"__raw_rows_name",
"=",
"tolist",
"[",
"4",
"]",
"self",
".",
"__info",
"=",
"(",
"self",
".",
"__get_no",
",",
"OTCNo",
"(",
")",
".",
"all_stock",
"[",
"self",
".",
"__get_no",
"]",
")",
"if",
"len",
"(",
"tolist",
"[",
"5",
":",
"]",
")",
">",
"1",
":",
"return",
"tuple",
"(",
"tolist",
"[",
"5",
":",
"-",
"1",
"]",
")",
"return",
"tuple",
"(",
"[",
"]",
")"
] | 35.266667
| 13.9
|
def cmd_rollback(context):
"""
Roll back by finding the most recent "stable" tagged version, and putting it again, so that
it's the new "current" version.
Args:
context: a populated EFVersionContext object
"""
last_stable = get_versions(context, return_stable=True)
if len(last_stable) != 1:
fail("Didn't find a version marked stable for key: {} in env/service: {}/{}".format(
context.key, context.env, context.service_name))
context.value = last_stable[0].value
context.commit_hash = last_stable[0].commit_hash
context.build_number = last_stable[0].build_number
context.location = last_stable[0].location
context.stable = True
cmd_set(context)
|
[
"def",
"cmd_rollback",
"(",
"context",
")",
":",
"last_stable",
"=",
"get_versions",
"(",
"context",
",",
"return_stable",
"=",
"True",
")",
"if",
"len",
"(",
"last_stable",
")",
"!=",
"1",
":",
"fail",
"(",
"\"Didn't find a version marked stable for key: {} in env/service: {}/{}\"",
".",
"format",
"(",
"context",
".",
"key",
",",
"context",
".",
"env",
",",
"context",
".",
"service_name",
")",
")",
"context",
".",
"value",
"=",
"last_stable",
"[",
"0",
"]",
".",
"value",
"context",
".",
"commit_hash",
"=",
"last_stable",
"[",
"0",
"]",
".",
"commit_hash",
"context",
".",
"build_number",
"=",
"last_stable",
"[",
"0",
"]",
".",
"build_number",
"context",
".",
"location",
"=",
"last_stable",
"[",
"0",
"]",
".",
"location",
"context",
".",
"stable",
"=",
"True",
"cmd_set",
"(",
"context",
")"
] | 39.470588
| 16.294118
|
def get_printable(iterable):
"""
Get printable characters from the specified string.
Note that str.isprintable() is not available in Python 2.
"""
if iterable:
return ''.join(i for i in iterable if i in string.printable)
return ''
|
[
"def",
"get_printable",
"(",
"iterable",
")",
":",
"if",
"iterable",
":",
"return",
"''",
".",
"join",
"(",
"i",
"for",
"i",
"in",
"iterable",
"if",
"i",
"in",
"string",
".",
"printable",
")",
"return",
"''"
] | 31.875
| 15.875
|
def facade(factory):
"""Declare a method as a facade factory."""
wrapper = FacadeDescriptor(factory.__name__, factory)
return update_wrapper(wrapper, factory)
|
[
"def",
"facade",
"(",
"factory",
")",
":",
"wrapper",
"=",
"FacadeDescriptor",
"(",
"factory",
".",
"__name__",
",",
"factory",
")",
"return",
"update_wrapper",
"(",
"wrapper",
",",
"factory",
")"
] | 41.75
| 10
|
def _print_unix(objects, sep, end, file, flush):
"""A print_() implementation which writes bytes"""
encoding = _encoding
if isinstance(sep, text_type):
sep = sep.encode(encoding, "replace")
if not isinstance(sep, bytes):
raise TypeError
if isinstance(end, text_type):
end = end.encode(encoding, "replace")
if not isinstance(end, bytes):
raise TypeError
if end == b"\n":
end = os.linesep
if PY3:
end = end.encode("ascii")
parts = []
for obj in objects:
if not isinstance(obj, text_type) and not isinstance(obj, bytes):
obj = text_type(obj)
if isinstance(obj, text_type):
if PY2:
obj = obj.encode(encoding, "replace")
else:
try:
obj = obj.encode(encoding, "surrogateescape")
except UnicodeEncodeError:
obj = obj.encode(encoding, "replace")
assert isinstance(obj, bytes)
parts.append(obj)
data = sep.join(parts) + end
assert isinstance(data, bytes)
file = getattr(file, "buffer", file)
try:
file.write(data)
except TypeError:
if PY3:
# For StringIO, first try with surrogates
surr_data = data.decode(encoding, "surrogateescape")
try:
file.write(surr_data)
except (TypeError, ValueError):
file.write(data.decode(encoding, "replace"))
else:
# for file like objects with don't support bytes
file.write(data.decode(encoding, "replace"))
if flush:
file.flush()
|
[
"def",
"_print_unix",
"(",
"objects",
",",
"sep",
",",
"end",
",",
"file",
",",
"flush",
")",
":",
"encoding",
"=",
"_encoding",
"if",
"isinstance",
"(",
"sep",
",",
"text_type",
")",
":",
"sep",
"=",
"sep",
".",
"encode",
"(",
"encoding",
",",
"\"replace\"",
")",
"if",
"not",
"isinstance",
"(",
"sep",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"if",
"isinstance",
"(",
"end",
",",
"text_type",
")",
":",
"end",
"=",
"end",
".",
"encode",
"(",
"encoding",
",",
"\"replace\"",
")",
"if",
"not",
"isinstance",
"(",
"end",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"if",
"end",
"==",
"b\"\\n\"",
":",
"end",
"=",
"os",
".",
"linesep",
"if",
"PY3",
":",
"end",
"=",
"end",
".",
"encode",
"(",
"\"ascii\"",
")",
"parts",
"=",
"[",
"]",
"for",
"obj",
"in",
"objects",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"text_type",
")",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"bytes",
")",
":",
"obj",
"=",
"text_type",
"(",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"text_type",
")",
":",
"if",
"PY2",
":",
"obj",
"=",
"obj",
".",
"encode",
"(",
"encoding",
",",
"\"replace\"",
")",
"else",
":",
"try",
":",
"obj",
"=",
"obj",
".",
"encode",
"(",
"encoding",
",",
"\"surrogateescape\"",
")",
"except",
"UnicodeEncodeError",
":",
"obj",
"=",
"obj",
".",
"encode",
"(",
"encoding",
",",
"\"replace\"",
")",
"assert",
"isinstance",
"(",
"obj",
",",
"bytes",
")",
"parts",
".",
"append",
"(",
"obj",
")",
"data",
"=",
"sep",
".",
"join",
"(",
"parts",
")",
"+",
"end",
"assert",
"isinstance",
"(",
"data",
",",
"bytes",
")",
"file",
"=",
"getattr",
"(",
"file",
",",
"\"buffer\"",
",",
"file",
")",
"try",
":",
"file",
".",
"write",
"(",
"data",
")",
"except",
"TypeError",
":",
"if",
"PY3",
":",
"# For StringIO, first try with surrogates",
"surr_data",
"=",
"data",
".",
"decode",
"(",
"encoding",
",",
"\"surrogateescape\"",
")",
"try",
":",
"file",
".",
"write",
"(",
"surr_data",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"file",
".",
"write",
"(",
"data",
".",
"decode",
"(",
"encoding",
",",
"\"replace\"",
")",
")",
"else",
":",
"# for file like objects with don't support bytes",
"file",
".",
"write",
"(",
"data",
".",
"decode",
"(",
"encoding",
",",
"\"replace\"",
")",
")",
"if",
"flush",
":",
"file",
".",
"flush",
"(",
")"
] | 28.892857
| 18.642857
|
def groupedby(collection, fn):
"""
same like itertools.groupby
:note: This function does not needs initial sorting like itertools.groupby
:attention: Order of pairs is not deterministic.
"""
d = {}
for item in collection:
k = fn(item)
try:
arr = d[k]
except KeyError:
arr = []
d[k] = arr
arr.append(item)
yield from d.items()
|
[
"def",
"groupedby",
"(",
"collection",
",",
"fn",
")",
":",
"d",
"=",
"{",
"}",
"for",
"item",
"in",
"collection",
":",
"k",
"=",
"fn",
"(",
"item",
")",
"try",
":",
"arr",
"=",
"d",
"[",
"k",
"]",
"except",
"KeyError",
":",
"arr",
"=",
"[",
"]",
"d",
"[",
"k",
"]",
"=",
"arr",
"arr",
".",
"append",
"(",
"item",
")",
"yield",
"from",
"d",
".",
"items",
"(",
")"
] | 21.578947
| 20.210526
|
def replay_sgf(sgf_contents):
"""Wrapper for sgf files, returning go.PositionWithContext instances.
It does NOT return the very final position, as there is no follow up.
To get the final position, call pwc.position.play_move(pwc.next_move)
on the last PositionWithContext returned.
Example usage:
with open(filename) as f:
for position_w_context in replay_sgf(f.read()):
print(position_w_context.position)
"""
root_node = get_sgf_root_node(sgf_contents)
props = root_node.properties
assert int(sgf_prop(props.get('GM', ['1']))) == 1, "Not a Go SGF!"
komi = 0
if props.get('KM') is not None:
komi = float(sgf_prop(props.get('KM')))
result = utils.parse_game_result(sgf_prop(props.get('RE', '')))
pos = Position(komi=komi)
current_node = root_node
while pos is not None and current_node.next is not None:
pos = handle_node(pos, current_node)
maybe_correct_next(pos, current_node.next)
next_move = get_next_move(current_node)
yield PositionWithContext(pos, next_move, result)
current_node = current_node.next
|
[
"def",
"replay_sgf",
"(",
"sgf_contents",
")",
":",
"root_node",
"=",
"get_sgf_root_node",
"(",
"sgf_contents",
")",
"props",
"=",
"root_node",
".",
"properties",
"assert",
"int",
"(",
"sgf_prop",
"(",
"props",
".",
"get",
"(",
"'GM'",
",",
"[",
"'1'",
"]",
")",
")",
")",
"==",
"1",
",",
"\"Not a Go SGF!\"",
"komi",
"=",
"0",
"if",
"props",
".",
"get",
"(",
"'KM'",
")",
"is",
"not",
"None",
":",
"komi",
"=",
"float",
"(",
"sgf_prop",
"(",
"props",
".",
"get",
"(",
"'KM'",
")",
")",
")",
"result",
"=",
"utils",
".",
"parse_game_result",
"(",
"sgf_prop",
"(",
"props",
".",
"get",
"(",
"'RE'",
",",
"''",
")",
")",
")",
"pos",
"=",
"Position",
"(",
"komi",
"=",
"komi",
")",
"current_node",
"=",
"root_node",
"while",
"pos",
"is",
"not",
"None",
"and",
"current_node",
".",
"next",
"is",
"not",
"None",
":",
"pos",
"=",
"handle_node",
"(",
"pos",
",",
"current_node",
")",
"maybe_correct_next",
"(",
"pos",
",",
"current_node",
".",
"next",
")",
"next_move",
"=",
"get_next_move",
"(",
"current_node",
")",
"yield",
"PositionWithContext",
"(",
"pos",
",",
"next_move",
",",
"result",
")",
"current_node",
"=",
"current_node",
".",
"next"
] | 38.37931
| 16.862069
|
def all_minutes(self):
"""
Returns a DatetimeIndex representing all the minutes in this calendar.
"""
opens_in_ns = self._opens.values.astype(
'datetime64[ns]',
).view('int64')
closes_in_ns = self._closes.values.astype(
'datetime64[ns]',
).view('int64')
return DatetimeIndex(
compute_all_minutes(opens_in_ns, closes_in_ns),
tz=UTC,
)
|
[
"def",
"all_minutes",
"(",
"self",
")",
":",
"opens_in_ns",
"=",
"self",
".",
"_opens",
".",
"values",
".",
"astype",
"(",
"'datetime64[ns]'",
",",
")",
".",
"view",
"(",
"'int64'",
")",
"closes_in_ns",
"=",
"self",
".",
"_closes",
".",
"values",
".",
"astype",
"(",
"'datetime64[ns]'",
",",
")",
".",
"view",
"(",
"'int64'",
")",
"return",
"DatetimeIndex",
"(",
"compute_all_minutes",
"(",
"opens_in_ns",
",",
"closes_in_ns",
")",
",",
"tz",
"=",
"UTC",
",",
")"
] | 27.5
| 18.25
|
async def edit_message_caption(self, chat_id: typing.Union[base.Integer, base.String, None] = None,
message_id: typing.Union[base.Integer, None] = None,
inline_message_id: typing.Union[base.String, None] = None,
caption: typing.Union[base.String, None] = None,
parse_mode: typing.Union[base.String, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup,
None] = None) -> types.Message or base.Boolean:
"""
Use this method to edit captions of messages sent by the bot or via the bot (for inline bots).
Source: https://core.telegram.org/bots/api#editmessagecaption
:param chat_id: Required if inline_message_id is not specified
Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String, None]`
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:type message_id: :obj:`typing.Union[base.Integer, None]`
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:type inline_message_id: :obj:`typing.Union[base.String, None]`
:param caption: New caption of the message
:type caption: :obj:`typing.Union[base.String, None]`
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,
fixed-width text or inline URLs in your bot's message.
:type parse_mode: :obj:`typing.Union[base.String, None]`
:param reply_markup: A JSON-serialized object for an inline keyboard
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]`
:return: On success, if edited message is sent by the bot, the edited Message is returned,
otherwise True is returned.
:rtype: :obj:`typing.Union[types.Message, base.Boolean]`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals())
if self.parse_mode:
payload.setdefault('parse_mode', self.parse_mode)
result = await self.request(api.Methods.EDIT_MESSAGE_CAPTION, payload)
if isinstance(result, bool):
return result
return types.Message(**result)
|
[
"async",
"def",
"edit_message_caption",
"(",
"self",
",",
"chat_id",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"Integer",
",",
"base",
".",
"String",
",",
"None",
"]",
"=",
"None",
",",
"message_id",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"Integer",
",",
"None",
"]",
"=",
"None",
",",
"inline_message_id",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"String",
",",
"None",
"]",
"=",
"None",
",",
"caption",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"String",
",",
"None",
"]",
"=",
"None",
",",
"parse_mode",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"String",
",",
"None",
"]",
"=",
"None",
",",
"reply_markup",
":",
"typing",
".",
"Union",
"[",
"types",
".",
"InlineKeyboardMarkup",
",",
"None",
"]",
"=",
"None",
")",
"->",
"types",
".",
"Message",
"or",
"base",
".",
"Boolean",
":",
"reply_markup",
"=",
"prepare_arg",
"(",
"reply_markup",
")",
"payload",
"=",
"generate_payload",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"if",
"self",
".",
"parse_mode",
":",
"payload",
".",
"setdefault",
"(",
"'parse_mode'",
",",
"self",
".",
"parse_mode",
")",
"result",
"=",
"await",
"self",
".",
"request",
"(",
"api",
".",
"Methods",
".",
"EDIT_MESSAGE_CAPTION",
",",
"payload",
")",
"if",
"isinstance",
"(",
"result",
",",
"bool",
")",
":",
"return",
"result",
"return",
"types",
".",
"Message",
"(",
"*",
"*",
"result",
")"
] | 63.666667
| 33.102564
|
def get_groups(self):
"""
Returns all groups for a user pool. Returns instances of the
self.group_class.
:return: list of instances
"""
response = self.client.list_groups(UserPoolId=self.user_pool_id)
return [self.get_group_obj(group_data)
for group_data in response.get('Groups')]
|
[
"def",
"get_groups",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"client",
".",
"list_groups",
"(",
"UserPoolId",
"=",
"self",
".",
"user_pool_id",
")",
"return",
"[",
"self",
".",
"get_group_obj",
"(",
"group_data",
")",
"for",
"group_data",
"in",
"response",
".",
"get",
"(",
"'Groups'",
")",
"]"
] | 38.333333
| 13.666667
|
def xslt(request):
"""Shows xml output transformed with standard xslt"""
foos = foobar_models.Foo.objects.all()
return render_xslt_to_response('xslt/model-to-xml.xsl', foos, mimetype='text/xml')
|
[
"def",
"xslt",
"(",
"request",
")",
":",
"foos",
"=",
"foobar_models",
".",
"Foo",
".",
"objects",
".",
"all",
"(",
")",
"return",
"render_xslt_to_response",
"(",
"'xslt/model-to-xml.xsl'",
",",
"foos",
",",
"mimetype",
"=",
"'text/xml'",
")"
] | 50.75
| 17.5
|
def get_file_from_cghub(job, cghub_xml, cghub_key, univ_options, write_to_jobstore=True):
"""
This function will download the file from cghub using the xml specified by cghub_xml
ARGUMENTS
1. cghub_xml: Path to an xml file for cghub.
2. cghub_key: Credentials for a cghub download operation.
3. write_to_jobstore: Flag indicating whether the final product should be written to jobStore.
RETURN VALUES
1. A path to the prefix for the fastqs that is compatible with the pipeline.
"""
work_dir = job.fileStore.getLocalTempDir()
# Get from S3 if required
if cghub_xml.startswith('http'):
assert cghub_xml.startswith('https://s3'), 'Not an S3 link'
cghub_xml = get_file_from_s3(job, cghub_xml, encryption_key=univ_options['sse_key'],
write_to_jobstore=False)
else:
assert os.path.exists(cghub_xml), 'Could not find file: %s' % cghub_xml
shutil.copy(cghub_xml, os.path.join(work_dir, 'cghub.xml'))
cghub_xml = os.path.join(work_dir, 'cghub.xml')
assert os.path.exists(cghub_key), 'Could not find file: %s' % cghub_key
shutil.copy(cghub_key, os.path.join(work_dir, 'cghub.key'))
cghub_key = os.path.join(work_dir, 'cghub.key')
temp_fastqdir = os.path.join(work_dir, 'temp_fastqdir')
os.mkdir(temp_fastqdir)
base_parameters = ['-d', docker_path(cghub_xml),
'-c', docker_path(cghub_key),
'-p', docker_path(temp_fastqdir)]
attemptNumber = 0
while True:
# timeout increases by 10 mins per try
parameters = base_parameters + ['-k', str((attemptNumber + 1) * 10)]
try:
docker_call('genetorrent', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
except RuntimeError as err:
time.sleep(600)
job.fileStore.logToMaster(err.message)
attemptNumber += 1
if attemptNumber == 3:
raise
else:
continue
else:
break
analysis_id = [x for x in os.listdir(temp_fastqdir)
if not (x.startswith('.') or x.endswith('.gto'))][0]
files = [x for x in os.listdir(os.path.join(temp_fastqdir, analysis_id))
if not x.startswith('.')]
if len(files) == 2:
prefixes = [os.path.splitext(x)[1] for x in files]
if {'.bam', '.bai'} - set(prefixes):
raise RuntimeError('This is probably not a TCGA archive for WXS or RSQ. If you are ' +
'sure it is, email aarao@ucsc.edu with details.')
else:
bamfile = os.path.join(temp_fastqdir, analysis_id,
[x for x in files if x.endswith('.bam')][0])
return bam2fastq(job, bamfile, univ_options)
elif len(files) == 1:
if not files[0].endswith('.tar.gz'):
raise RuntimeError('This is probably not a TCGA archive for WXS or RSQ. If you are ' +
'sure it is, email aarao@ucsc.edu with details.')
else:
outFastqDir = os.path.join(work_dir, 'fastqs')
os.mkdir(outFastqDir)
fastq_file = untargz(os.path.join(temp_fastqdir, analysis_id, files[0]), outFastqDir)
if fastq_file.endswith(('.fastq', '.fastq.gz')):
return re.sub('_2.fastq', '_1.fastq', fastq_file)
else:
raise RuntimeError('This is probably not a TCGA archive for WXS or RSQ. If you ' +
'are sure it is, email aarao@ucsc.edu with details.')
else:
raise RuntimeError('This is probably not a TCGA archive for WXS or RSQ. If you are sure ' +
'it is, email aarao@ucsc.edu with details.')
|
[
"def",
"get_file_from_cghub",
"(",
"job",
",",
"cghub_xml",
",",
"cghub_key",
",",
"univ_options",
",",
"write_to_jobstore",
"=",
"True",
")",
":",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"# Get from S3 if required",
"if",
"cghub_xml",
".",
"startswith",
"(",
"'http'",
")",
":",
"assert",
"cghub_xml",
".",
"startswith",
"(",
"'https://s3'",
")",
",",
"'Not an S3 link'",
"cghub_xml",
"=",
"get_file_from_s3",
"(",
"job",
",",
"cghub_xml",
",",
"encryption_key",
"=",
"univ_options",
"[",
"'sse_key'",
"]",
",",
"write_to_jobstore",
"=",
"False",
")",
"else",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"cghub_xml",
")",
",",
"'Could not find file: %s'",
"%",
"cghub_xml",
"shutil",
".",
"copy",
"(",
"cghub_xml",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'cghub.xml'",
")",
")",
"cghub_xml",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'cghub.xml'",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"cghub_key",
")",
",",
"'Could not find file: %s'",
"%",
"cghub_key",
"shutil",
".",
"copy",
"(",
"cghub_key",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'cghub.key'",
")",
")",
"cghub_key",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'cghub.key'",
")",
"temp_fastqdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'temp_fastqdir'",
")",
"os",
".",
"mkdir",
"(",
"temp_fastqdir",
")",
"base_parameters",
"=",
"[",
"'-d'",
",",
"docker_path",
"(",
"cghub_xml",
")",
",",
"'-c'",
",",
"docker_path",
"(",
"cghub_key",
")",
",",
"'-p'",
",",
"docker_path",
"(",
"temp_fastqdir",
")",
"]",
"attemptNumber",
"=",
"0",
"while",
"True",
":",
"# timeout increases by 10 mins per try",
"parameters",
"=",
"base_parameters",
"+",
"[",
"'-k'",
",",
"str",
"(",
"(",
"attemptNumber",
"+",
"1",
")",
"*",
"10",
")",
"]",
"try",
":",
"docker_call",
"(",
"'genetorrent'",
",",
"tool_parameters",
"=",
"parameters",
",",
"work_dir",
"=",
"work_dir",
",",
"dockerhub",
"=",
"univ_options",
"[",
"'dockerhub'",
"]",
")",
"except",
"RuntimeError",
"as",
"err",
":",
"time",
".",
"sleep",
"(",
"600",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"err",
".",
"message",
")",
"attemptNumber",
"+=",
"1",
"if",
"attemptNumber",
"==",
"3",
":",
"raise",
"else",
":",
"continue",
"else",
":",
"break",
"analysis_id",
"=",
"[",
"x",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"temp_fastqdir",
")",
"if",
"not",
"(",
"x",
".",
"startswith",
"(",
"'.'",
")",
"or",
"x",
".",
"endswith",
"(",
"'.gto'",
")",
")",
"]",
"[",
"0",
"]",
"files",
"=",
"[",
"x",
"for",
"x",
"in",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_fastqdir",
",",
"analysis_id",
")",
")",
"if",
"not",
"x",
".",
"startswith",
"(",
"'.'",
")",
"]",
"if",
"len",
"(",
"files",
")",
"==",
"2",
":",
"prefixes",
"=",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"x",
")",
"[",
"1",
"]",
"for",
"x",
"in",
"files",
"]",
"if",
"{",
"'.bam'",
",",
"'.bai'",
"}",
"-",
"set",
"(",
"prefixes",
")",
":",
"raise",
"RuntimeError",
"(",
"'This is probably not a TCGA archive for WXS or RSQ. If you are '",
"+",
"'sure it is, email aarao@ucsc.edu with details.'",
")",
"else",
":",
"bamfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_fastqdir",
",",
"analysis_id",
",",
"[",
"x",
"for",
"x",
"in",
"files",
"if",
"x",
".",
"endswith",
"(",
"'.bam'",
")",
"]",
"[",
"0",
"]",
")",
"return",
"bam2fastq",
"(",
"job",
",",
"bamfile",
",",
"univ_options",
")",
"elif",
"len",
"(",
"files",
")",
"==",
"1",
":",
"if",
"not",
"files",
"[",
"0",
"]",
".",
"endswith",
"(",
"'.tar.gz'",
")",
":",
"raise",
"RuntimeError",
"(",
"'This is probably not a TCGA archive for WXS or RSQ. If you are '",
"+",
"'sure it is, email aarao@ucsc.edu with details.'",
")",
"else",
":",
"outFastqDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'fastqs'",
")",
"os",
".",
"mkdir",
"(",
"outFastqDir",
")",
"fastq_file",
"=",
"untargz",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_fastqdir",
",",
"analysis_id",
",",
"files",
"[",
"0",
"]",
")",
",",
"outFastqDir",
")",
"if",
"fastq_file",
".",
"endswith",
"(",
"(",
"'.fastq'",
",",
"'.fastq.gz'",
")",
")",
":",
"return",
"re",
".",
"sub",
"(",
"'_2.fastq'",
",",
"'_1.fastq'",
",",
"fastq_file",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'This is probably not a TCGA archive for WXS or RSQ. If you '",
"+",
"'are sure it is, email aarao@ucsc.edu with details.'",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'This is probably not a TCGA archive for WXS or RSQ. If you are sure '",
"+",
"'it is, email aarao@ucsc.edu with details.'",
")"
] | 49.447368
| 24.842105
|
def run_selected_clicked(self):
"""Run the selected scenario."""
# get all selected rows
rows = sorted(set(index.row() for index in
self.table.selectedIndexes()))
self.enable_busy_cursor()
# iterate over selected rows
for row in rows:
current_row = row
item = self.table.item(current_row, 0)
status_item = self.table.item(current_row, 1)
self.run_task(item, status_item)
self.disable_busy_cursor()
|
[
"def",
"run_selected_clicked",
"(",
"self",
")",
":",
"# get all selected rows",
"rows",
"=",
"sorted",
"(",
"set",
"(",
"index",
".",
"row",
"(",
")",
"for",
"index",
"in",
"self",
".",
"table",
".",
"selectedIndexes",
"(",
")",
")",
")",
"self",
".",
"enable_busy_cursor",
"(",
")",
"# iterate over selected rows",
"for",
"row",
"in",
"rows",
":",
"current_row",
"=",
"row",
"item",
"=",
"self",
".",
"table",
".",
"item",
"(",
"current_row",
",",
"0",
")",
"status_item",
"=",
"self",
".",
"table",
".",
"item",
"(",
"current_row",
",",
"1",
")",
"self",
".",
"run_task",
"(",
"item",
",",
"status_item",
")",
"self",
".",
"disable_busy_cursor",
"(",
")"
] | 39.615385
| 9.153846
|
def overlay_gateway_site_bfd_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
bfd_enable = ET.SubElement(site, "bfd-enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"overlay_gateway_site_bfd_enable",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"overlay_gateway",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"overlay-gateway\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-tunnels\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"overlay_gateway",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"site",
"=",
"ET",
".",
"SubElement",
"(",
"overlay_gateway",
",",
"\"site\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"site",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"bfd_enable",
"=",
"ET",
".",
"SubElement",
"(",
"site",
",",
"\"bfd-enable\"",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 44.571429
| 14.785714
|
def getSensors(self):
""" Returns the currently visible state of the world as a numpy array
of doubles.
"""
Pd = array([b.p_demand for b in self.case.buses if b.type == PQ])
logger.info("State: %s" % list(Pd))
return Pd
|
[
"def",
"getSensors",
"(",
"self",
")",
":",
"Pd",
"=",
"array",
"(",
"[",
"b",
".",
"p_demand",
"for",
"b",
"in",
"self",
".",
"case",
".",
"buses",
"if",
"b",
".",
"type",
"==",
"PQ",
"]",
")",
"logger",
".",
"info",
"(",
"\"State: %s\"",
"%",
"list",
"(",
"Pd",
")",
")",
"return",
"Pd"
] | 37.285714
| 14.142857
|
def read_namespaced_stateful_set_scale(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_stateful_set_scale # noqa: E501
read scale of the specified StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
[
"def",
"read_namespaced_stateful_set_scale",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"read_namespaced_stateful_set_scale_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"read_namespaced_stateful_set_scale_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 52.304348
| 26.913043
|
def com_google_fonts_check_smart_dropout(ttFont):
"""Font enables smart dropout control in "prep" table instructions?
B8 01 FF PUSHW 0x01FF
85 SCANCTRL (unconditinally turn on
dropout control mode)
B0 04 PUSHB 0x04
8D SCANTYPE (enable smart dropout control)
Smart dropout control means activating rules 1, 2 and 5:
Rule 1: If a pixel's center falls within the glyph outline,
that pixel is turned on.
Rule 2: If a contour falls exactly on a pixel's center,
that pixel is turned on.
Rule 5: If a scan line between two adjacent pixel centers
(either vertical or horizontal) is intersected
by both an on-Transition contour and an off-Transition
contour and neither of the pixels was already turned on
by rules 1 and 2, turn on the pixel which is closer to
the midpoint between the on-Transition contour and
off-Transition contour. This is "Smart" dropout control.
"""
INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d"
if ("prep" in ttFont and
INSTRUCTIONS in ttFont["prep"].program.getBytecode()):
yield PASS, ("'prep' table contains instructions"
" enabling smart dropout control.")
else:
yield FAIL, ("'prep' table does not contain TrueType "
" instructions enabling smart dropout control."
" To fix, export the font with autohinting enabled,"
" or run ttfautohint on the font, or run the "
" `gftools fix-nonhinting` script.")
|
[
"def",
"com_google_fonts_check_smart_dropout",
"(",
"ttFont",
")",
":",
"INSTRUCTIONS",
"=",
"b\"\\xb8\\x01\\xff\\x85\\xb0\\x04\\x8d\"",
"if",
"(",
"\"prep\"",
"in",
"ttFont",
"and",
"INSTRUCTIONS",
"in",
"ttFont",
"[",
"\"prep\"",
"]",
".",
"program",
".",
"getBytecode",
"(",
")",
")",
":",
"yield",
"PASS",
",",
"(",
"\"'prep' table contains instructions\"",
"\" enabling smart dropout control.\"",
")",
"else",
":",
"yield",
"FAIL",
",",
"(",
"\"'prep' table does not contain TrueType \"",
"\" instructions enabling smart dropout control.\"",
"\" To fix, export the font with autohinting enabled,\"",
"\" or run ttfautohint on the font, or run the \"",
"\" `gftools fix-nonhinting` script.\"",
")"
] | 45.676471
| 18.147059
|
def add_annotator(self, doc, annotator):
"""Adds an annotator to the SPDX Document.
Annotator is an entity created by an EntityBuilder.
Raises SPDXValueError if not a valid annotator type.
"""
# Each annotator marks the start of a new annotation object.
# FIXME: this state does not make sense
self.reset_annotations()
if validations.validate_annotator(annotator):
doc.add_annotation(annotation.Annotation(annotator=annotator))
return True
else:
raise SPDXValueError('Annotation::Annotator')
|
[
"def",
"add_annotator",
"(",
"self",
",",
"doc",
",",
"annotator",
")",
":",
"# Each annotator marks the start of a new annotation object.",
"# FIXME: this state does not make sense",
"self",
".",
"reset_annotations",
"(",
")",
"if",
"validations",
".",
"validate_annotator",
"(",
"annotator",
")",
":",
"doc",
".",
"add_annotation",
"(",
"annotation",
".",
"Annotation",
"(",
"annotator",
"=",
"annotator",
")",
")",
"return",
"True",
"else",
":",
"raise",
"SPDXValueError",
"(",
"'Annotation::Annotator'",
")"
] | 45.153846
| 14.615385
|
def calendar_tuple(jd_float, offset=0.0):
"""Return a (year, month, day, hour, minute, second.fraction) tuple.
The `offset` is added to the time before it is split into its
components. This is useful if the user is going to round the
result before displaying it. If the result is going to be
displayed as seconds, for example, set `offset` to half a second
and then throw away the fraction; if the result is going to be
displayed as minutes, set `offset` to thirty seconds and then
throw away the seconds; and so forth.
"""
jd_float = _to_array(jd_float)
whole, fraction = divmod(jd_float + 0.5, 1.0)
whole = whole.astype(int)
year, month, day = calendar_date(whole)
hour, hfrac = divmod(fraction * 24.0, 1.0)
minute, second = divmod(hfrac * 3600.0, 60.0)
return year, month, day, hour.astype(int), minute.astype(int), second
|
[
"def",
"calendar_tuple",
"(",
"jd_float",
",",
"offset",
"=",
"0.0",
")",
":",
"jd_float",
"=",
"_to_array",
"(",
"jd_float",
")",
"whole",
",",
"fraction",
"=",
"divmod",
"(",
"jd_float",
"+",
"0.5",
",",
"1.0",
")",
"whole",
"=",
"whole",
".",
"astype",
"(",
"int",
")",
"year",
",",
"month",
",",
"day",
"=",
"calendar_date",
"(",
"whole",
")",
"hour",
",",
"hfrac",
"=",
"divmod",
"(",
"fraction",
"*",
"24.0",
",",
"1.0",
")",
"minute",
",",
"second",
"=",
"divmod",
"(",
"hfrac",
"*",
"3600.0",
",",
"60.0",
")",
"return",
"year",
",",
"month",
",",
"day",
",",
"hour",
".",
"astype",
"(",
"int",
")",
",",
"minute",
".",
"astype",
"(",
"int",
")",
",",
"second"
] | 46.052632
| 16.315789
|
def _get_repo_options(fromrepo=None, packagesite=None):
'''
Return a list of tuples to seed the "env" list, which is used to set
environment variables for any pkg_add commands that are spawned.
If ``fromrepo`` or ``packagesite`` are None, then their corresponding
config parameter will be looked up with config.get.
If both ``fromrepo`` and ``packagesite`` are None, and neither
freebsdpkg.PACKAGEROOT nor freebsdpkg.PACKAGESITE are specified, then an
empty list is returned, and it is assumed that the system defaults (or
environment variables) will be used.
'''
root = fromrepo if fromrepo is not None \
else __salt__['config.get']('freebsdpkg.PACKAGEROOT', None)
site = packagesite if packagesite is not None \
else __salt__['config.get']('freebsdpkg.PACKAGESITE', None)
ret = {}
if root is not None:
ret['PACKAGEROOT'] = root
if site is not None:
ret['PACKAGESITE'] = site
return ret
|
[
"def",
"_get_repo_options",
"(",
"fromrepo",
"=",
"None",
",",
"packagesite",
"=",
"None",
")",
":",
"root",
"=",
"fromrepo",
"if",
"fromrepo",
"is",
"not",
"None",
"else",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"'freebsdpkg.PACKAGEROOT'",
",",
"None",
")",
"site",
"=",
"packagesite",
"if",
"packagesite",
"is",
"not",
"None",
"else",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"'freebsdpkg.PACKAGESITE'",
",",
"None",
")",
"ret",
"=",
"{",
"}",
"if",
"root",
"is",
"not",
"None",
":",
"ret",
"[",
"'PACKAGEROOT'",
"]",
"=",
"root",
"if",
"site",
"is",
"not",
"None",
":",
"ret",
"[",
"'PACKAGESITE'",
"]",
"=",
"site",
"return",
"ret"
] | 41.869565
| 23.26087
|
def parse_market(self, market, split_char='_'):
"""
In comes the market identifier directly from the service. Returned is
the crypto and fiat identifier in moneywagon format.
"""
crypto, fiat = market.lower().split(split_char)
return (
self.fix_symbol(crypto, reverse=True),
self.fix_symbol(fiat, reverse=True)
)
|
[
"def",
"parse_market",
"(",
"self",
",",
"market",
",",
"split_char",
"=",
"'_'",
")",
":",
"crypto",
",",
"fiat",
"=",
"market",
".",
"lower",
"(",
")",
".",
"split",
"(",
"split_char",
")",
"return",
"(",
"self",
".",
"fix_symbol",
"(",
"crypto",
",",
"reverse",
"=",
"True",
")",
",",
"self",
".",
"fix_symbol",
"(",
"fiat",
",",
"reverse",
"=",
"True",
")",
")"
] | 38.3
| 15.1
|
def ctc_beam_search_decoder(probs_seq,
alphabet,
beam_size,
cutoff_prob=1.0,
cutoff_top_n=40,
scorer=None):
"""Wrapper for the CTC Beam Search Decoder.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over alphabet and blank.
:type probs_seq: 2-D list
:param alphabet: alphabet list.
:alphabet: Alphabet
:param beam_size: Width for beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in alphabet will be
used in beam search, default 40.
:type cutoff_top_n: int
:param scorer: External scorer for partially decoded sentence, e.g. word
count or language model.
:type scorer: Scorer
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
"""
beam_results = swigwrapper.ctc_beam_search_decoder(
probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n,
scorer)
beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results]
return beam_results
|
[
"def",
"ctc_beam_search_decoder",
"(",
"probs_seq",
",",
"alphabet",
",",
"beam_size",
",",
"cutoff_prob",
"=",
"1.0",
",",
"cutoff_top_n",
"=",
"40",
",",
"scorer",
"=",
"None",
")",
":",
"beam_results",
"=",
"swigwrapper",
".",
"ctc_beam_search_decoder",
"(",
"probs_seq",
",",
"alphabet",
".",
"config_file",
"(",
")",
",",
"beam_size",
",",
"cutoff_prob",
",",
"cutoff_top_n",
",",
"scorer",
")",
"beam_results",
"=",
"[",
"(",
"res",
".",
"probability",
",",
"alphabet",
".",
"decode",
"(",
"res",
".",
"tokens",
")",
")",
"for",
"res",
"in",
"beam_results",
"]",
"return",
"beam_results"
] | 44.914286
| 17.257143
|
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
|
[
"def",
"get_available_name",
"(",
"self",
",",
"name",
")",
":",
"dir_name",
",",
"file_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"name",
")",
"file_root",
",",
"file_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"# If the filename already exists, add an underscore and a number (before",
"# the file extension, if one exists) to the filename until the generated",
"# filename doesn't exist.",
"count",
"=",
"itertools",
".",
"count",
"(",
"1",
")",
"while",
"self",
".",
"exists",
"(",
"name",
")",
":",
"# file_ext includes the dot.",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"\"%s_%s%s\"",
"%",
"(",
"file_root",
",",
"next",
"(",
"count",
")",
",",
"file_ext",
")",
")",
"return",
"name"
] | 43.3125
| 17.8125
|
def splitPath( self, path ):
"""
Splits the path into its components.
:param path | <str>
:return [<str>, ..]
"""
sep = self.model().separator()
splt = nativestring(path).lstrip(sep).split(sep)
if ( splt and not splt[-1] ):
self.model().itemByPath(path)
return splt
|
[
"def",
"splitPath",
"(",
"self",
",",
"path",
")",
":",
"sep",
"=",
"self",
".",
"model",
"(",
")",
".",
"separator",
"(",
")",
"splt",
"=",
"nativestring",
"(",
"path",
")",
".",
"lstrip",
"(",
"sep",
")",
".",
"split",
"(",
"sep",
")",
"if",
"(",
"splt",
"and",
"not",
"splt",
"[",
"-",
"1",
"]",
")",
":",
"self",
".",
"model",
"(",
")",
".",
"itemByPath",
"(",
"path",
")",
"return",
"splt"
] | 25.6
| 13.333333
|
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
|
[
"def",
"maybe_convert_ix",
"(",
"*",
"args",
")",
":",
"ixify",
"=",
"True",
"for",
"arg",
"in",
"args",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"(",
"np",
".",
"ndarray",
",",
"list",
",",
"ABCSeries",
",",
"Index",
")",
")",
":",
"ixify",
"=",
"False",
"if",
"ixify",
":",
"return",
"np",
".",
"ix_",
"(",
"*",
"args",
")",
"else",
":",
"return",
"args"
] | 20.357143
| 19.642857
|
def _post_json(self, instance, space=None, rel_path=None, extra_params=None):
"""
Base level method for updating data via the API
"""
model = type(instance)
# Only API.spaces and API.event should not provide
# the `space argument
if space is None and model not in (Space, Event):
raise Exception(
'In general, `API._post_json` should always '
'be called with a `space` argument.'
)
if 'number' in instance.data:
raise AttributeError(
'You cannot create a ticket which already has a number'
)
if not extra_params:
extra_params = {}
# Generate the url to hit
url = '{0}/{1}/{2}?{3}'.format(
settings.API_ROOT_PATH,
settings.API_VERSION,
rel_path or model.rel_path,
urllib.urlencode(extra_params),
)
# Fetch the data
response = requests.post(
url=url,
data=json.dumps(instance.data),
headers={
'X-Api-Key': self.key,
'X-Api-Secret': self.secret,
'Content-type': "application/json",
},
)
if response.status_code == 201: # OK
instance = model(data=response.json())
instance.api = self
if space:
instance.space = space
return instance
else: # Most likely a 404 Not Found
raise Exception(
'Code {0} returned from `{1}`. Response text: "{2}".'.format(
response.status_code,
url,
response.text
)
)
|
[
"def",
"_post_json",
"(",
"self",
",",
"instance",
",",
"space",
"=",
"None",
",",
"rel_path",
"=",
"None",
",",
"extra_params",
"=",
"None",
")",
":",
"model",
"=",
"type",
"(",
"instance",
")",
"# Only API.spaces and API.event should not provide",
"# the `space argument",
"if",
"space",
"is",
"None",
"and",
"model",
"not",
"in",
"(",
"Space",
",",
"Event",
")",
":",
"raise",
"Exception",
"(",
"'In general, `API._post_json` should always '",
"'be called with a `space` argument.'",
")",
"if",
"'number'",
"in",
"instance",
".",
"data",
":",
"raise",
"AttributeError",
"(",
"'You cannot create a ticket which already has a number'",
")",
"if",
"not",
"extra_params",
":",
"extra_params",
"=",
"{",
"}",
"# Generate the url to hit",
"url",
"=",
"'{0}/{1}/{2}?{3}'",
".",
"format",
"(",
"settings",
".",
"API_ROOT_PATH",
",",
"settings",
".",
"API_VERSION",
",",
"rel_path",
"or",
"model",
".",
"rel_path",
",",
"urllib",
".",
"urlencode",
"(",
"extra_params",
")",
",",
")",
"# Fetch the data",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"instance",
".",
"data",
")",
",",
"headers",
"=",
"{",
"'X-Api-Key'",
":",
"self",
".",
"key",
",",
"'X-Api-Secret'",
":",
"self",
".",
"secret",
",",
"'Content-type'",
":",
"\"application/json\"",
",",
"}",
",",
")",
"if",
"response",
".",
"status_code",
"==",
"201",
":",
"# OK",
"instance",
"=",
"model",
"(",
"data",
"=",
"response",
".",
"json",
"(",
")",
")",
"instance",
".",
"api",
"=",
"self",
"if",
"space",
":",
"instance",
".",
"space",
"=",
"space",
"return",
"instance",
"else",
":",
"# Most likely a 404 Not Found",
"raise",
"Exception",
"(",
"'Code {0} returned from `{1}`. Response text: \"{2}\".'",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"url",
",",
"response",
".",
"text",
")",
")"
] | 30.535714
| 16.607143
|
def apply(self, node):
""" Apply transformation and return if an update happened. """
new_node = self.run(node)
return self.update, new_node
|
[
"def",
"apply",
"(",
"self",
",",
"node",
")",
":",
"new_node",
"=",
"self",
".",
"run",
"(",
"node",
")",
"return",
"self",
".",
"update",
",",
"new_node"
] | 40.25
| 7.25
|
def fit(self, X, y=None):
"""
The fit method is the primary drawing input for the frequency
distribution visualization. It requires vectorized lists of
documents and a list of features, which are the actual words
from the original corpus (needed to label the x-axis ticks).
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus
of frequency vectorized documents.
y : ndarray or DataFrame of shape n
Labels for the documents for conditional frequency distribution.
.. note:: Text documents must be vectorized before ``fit()``.
"""
# Compute the conditional word frequency
if y is not None:
# Fit the frequencies
self.conditional_freqdist_ = {}
# Conditional frequency distribution
self.classes_ = [str(label) for label in set(y)]
for label in self.classes_:
self.conditional_freqdist_[label] = self.count(X[y == label])
else:
# No conditional frequencies
self.conditional_freqdist_ = None
# Frequency distribution of entire corpus.
self.freqdist_ = self.count(X)
self.sorted_ = self.freqdist_.argsort()[::-1] # Descending order
# Compute the number of words, vocab, and hapaxes
self.vocab_ = self.freqdist_.shape[0]
self.words_ = self.freqdist_.sum()
self.hapaxes_ = sum(1 for c in self.freqdist_ if c == 1)
# Draw and ensure that we return self
self.draw()
return self
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"# Compute the conditional word frequency",
"if",
"y",
"is",
"not",
"None",
":",
"# Fit the frequencies",
"self",
".",
"conditional_freqdist_",
"=",
"{",
"}",
"# Conditional frequency distribution",
"self",
".",
"classes_",
"=",
"[",
"str",
"(",
"label",
")",
"for",
"label",
"in",
"set",
"(",
"y",
")",
"]",
"for",
"label",
"in",
"self",
".",
"classes_",
":",
"self",
".",
"conditional_freqdist_",
"[",
"label",
"]",
"=",
"self",
".",
"count",
"(",
"X",
"[",
"y",
"==",
"label",
"]",
")",
"else",
":",
"# No conditional frequencies",
"self",
".",
"conditional_freqdist_",
"=",
"None",
"# Frequency distribution of entire corpus.",
"self",
".",
"freqdist_",
"=",
"self",
".",
"count",
"(",
"X",
")",
"self",
".",
"sorted_",
"=",
"self",
".",
"freqdist_",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"# Descending order",
"# Compute the number of words, vocab, and hapaxes",
"self",
".",
"vocab_",
"=",
"self",
".",
"freqdist_",
".",
"shape",
"[",
"0",
"]",
"self",
".",
"words_",
"=",
"self",
".",
"freqdist_",
".",
"sum",
"(",
")",
"self",
".",
"hapaxes_",
"=",
"sum",
"(",
"1",
"for",
"c",
"in",
"self",
".",
"freqdist_",
"if",
"c",
"==",
"1",
")",
"# Draw and ensure that we return self",
"self",
".",
"draw",
"(",
")",
"return",
"self"
] | 37.113636
| 19.931818
|
def get_sanitized_endpoint(url):
"""
Sanitize an endpoint, as removing unneeded parameters
"""
# sanitize esri
sanitized_url = url.rstrip()
esri_string = '/rest/services'
if esri_string in url:
match = re.search(esri_string, sanitized_url)
sanitized_url = url[0:(match.start(0)+len(esri_string))]
return sanitized_url
|
[
"def",
"get_sanitized_endpoint",
"(",
"url",
")",
":",
"# sanitize esri",
"sanitized_url",
"=",
"url",
".",
"rstrip",
"(",
")",
"esri_string",
"=",
"'/rest/services'",
"if",
"esri_string",
"in",
"url",
":",
"match",
"=",
"re",
".",
"search",
"(",
"esri_string",
",",
"sanitized_url",
")",
"sanitized_url",
"=",
"url",
"[",
"0",
":",
"(",
"match",
".",
"start",
"(",
"0",
")",
"+",
"len",
"(",
"esri_string",
")",
")",
"]",
"return",
"sanitized_url"
] | 32.272727
| 11.545455
|
def filter_query(filter_dict, required_keys):
"""Ensure that the dict has all of the information available. If not, return what does"""
if not isinstance(filter_dict, dict):
raise TypeError("dict_list is not a list. Please try again")
if not isinstance(required_keys, list):
raise TypeError("dict_list is not a list. Please try again")
available = []
for k,v in filter_dict.items():
# print(k, v)
if k in required_keys:
available.append(k)
return available
|
[
"def",
"filter_query",
"(",
"filter_dict",
",",
"required_keys",
")",
":",
"if",
"not",
"isinstance",
"(",
"filter_dict",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"dict_list is not a list. Please try again\"",
")",
"if",
"not",
"isinstance",
"(",
"required_keys",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"dict_list is not a list. Please try again\"",
")",
"available",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"filter_dict",
".",
"items",
"(",
")",
":",
"# print(k, v)",
"if",
"k",
"in",
"required_keys",
":",
"available",
".",
"append",
"(",
"k",
")",
"return",
"available"
] | 36.642857
| 16.428571
|
def update(self, byts):
'''
Update all the hashes in the set with the given bytes.
'''
self.size += len(byts)
[h[1].update(byts) for h in self.hashes]
|
[
"def",
"update",
"(",
"self",
",",
"byts",
")",
":",
"self",
".",
"size",
"+=",
"len",
"(",
"byts",
")",
"[",
"h",
"[",
"1",
"]",
".",
"update",
"(",
"byts",
")",
"for",
"h",
"in",
"self",
".",
"hashes",
"]"
] | 30.833333
| 19.166667
|
def get_file_to_path(self, share_name, directory_name, file_name, file_path,
open_mode='wb', start_range=None, end_range=None,
range_get_content_md5=None, progress_callback=None,
max_connections=1, max_retries=5, retry_wait=1.0, timeout=None):
'''
Downloads a file to a file path, with automatic chunking and progress
notifications. Returns an instance of File with properties and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str file_path:
Path of file to write to.
:param str open_mode:
Mode to use when opening the file.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool range_get_content_md5:
When this header is set to True and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Set to 1 to download the file sequentially.
Set to 2 or greater if you want to download a file larger than 64MB in chunks.
If the file size does not exceed 64MB it will be downloaded in one chunk.
:param int max_retries:
Number of times to retry download of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('file_path', file_path)
_validate_not_none('open_mode', open_mode)
with open(file_path, open_mode) as stream:
file = self.get_file_to_stream(
share_name, directory_name, file_name, stream,
start_range, end_range, range_get_content_md5,
progress_callback, max_connections, max_retries,
retry_wait, timeout)
return file
|
[
"def",
"get_file_to_path",
"(",
"self",
",",
"share_name",
",",
"directory_name",
",",
"file_name",
",",
"file_path",
",",
"open_mode",
"=",
"'wb'",
",",
"start_range",
"=",
"None",
",",
"end_range",
"=",
"None",
",",
"range_get_content_md5",
"=",
"None",
",",
"progress_callback",
"=",
"None",
",",
"max_connections",
"=",
"1",
",",
"max_retries",
"=",
"5",
",",
"retry_wait",
"=",
"1.0",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'share_name'",
",",
"share_name",
")",
"_validate_not_none",
"(",
"'file_name'",
",",
"file_name",
")",
"_validate_not_none",
"(",
"'file_path'",
",",
"file_path",
")",
"_validate_not_none",
"(",
"'open_mode'",
",",
"open_mode",
")",
"with",
"open",
"(",
"file_path",
",",
"open_mode",
")",
"as",
"stream",
":",
"file",
"=",
"self",
".",
"get_file_to_stream",
"(",
"share_name",
",",
"directory_name",
",",
"file_name",
",",
"stream",
",",
"start_range",
",",
"end_range",
",",
"range_get_content_md5",
",",
"progress_callback",
",",
"max_connections",
",",
"max_retries",
",",
"retry_wait",
",",
"timeout",
")",
"return",
"file"
] | 52.092308
| 22.461538
|
def bucket_dict_to_policy(args, bucket_name, d):
"""
Create a bucket policy document from a permissions dict.
The dictionary d maps (user, prefix) to 'R' or 'W'.
:param bucket_name:
:param d:
:return:
"""
import json
iam = get_resource(args, 'iam')
statements = make_bucket_policy_statements(bucket_name)
user_stats = set() # statement tripples
for (user, prefix), mode in d.items():
user_stats.add((user, 'list'))
user_stats.add((user, 'bucket'))
if mode == 'R':
user_stats.add((user, 'Read' + prefix.title()))
user_stats.add((user, 'List' + prefix.title()))
elif mode == 'W':
user_stats.add((user, 'List' + prefix.title()))
user_stats.add((user, 'Read' + prefix.title()))
user_stats.add((user, 'Write' + prefix.title()))
users_arns = {}
for user_name, section in user_stats:
section = statements[section]
if user_name not in users_arns:
user = iam.User(user_name)
users_arns[user.name] = user
else:
user = users_arns[user_name]
section['Principal']['AWS'].append(user.arn)
for sid in list(statements.keys()):
if not statements[sid]['Principal']['AWS']:
del statements[sid]
return json.dumps(dict(Version="2012-10-17", Statement=list(statements.values())), indent=4)
|
[
"def",
"bucket_dict_to_policy",
"(",
"args",
",",
"bucket_name",
",",
"d",
")",
":",
"import",
"json",
"iam",
"=",
"get_resource",
"(",
"args",
",",
"'iam'",
")",
"statements",
"=",
"make_bucket_policy_statements",
"(",
"bucket_name",
")",
"user_stats",
"=",
"set",
"(",
")",
"# statement tripples",
"for",
"(",
"user",
",",
"prefix",
")",
",",
"mode",
"in",
"d",
".",
"items",
"(",
")",
":",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'list'",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'bucket'",
")",
")",
"if",
"mode",
"==",
"'R'",
":",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'Read'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'List'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"elif",
"mode",
"==",
"'W'",
":",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'List'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'Read'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'Write'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"users_arns",
"=",
"{",
"}",
"for",
"user_name",
",",
"section",
"in",
"user_stats",
":",
"section",
"=",
"statements",
"[",
"section",
"]",
"if",
"user_name",
"not",
"in",
"users_arns",
":",
"user",
"=",
"iam",
".",
"User",
"(",
"user_name",
")",
"users_arns",
"[",
"user",
".",
"name",
"]",
"=",
"user",
"else",
":",
"user",
"=",
"users_arns",
"[",
"user_name",
"]",
"section",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
".",
"append",
"(",
"user",
".",
"arn",
")",
"for",
"sid",
"in",
"list",
"(",
"statements",
".",
"keys",
"(",
")",
")",
":",
"if",
"not",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
":",
"del",
"statements",
"[",
"sid",
"]",
"return",
"json",
".",
"dumps",
"(",
"dict",
"(",
"Version",
"=",
"\"2012-10-17\"",
",",
"Statement",
"=",
"list",
"(",
"statements",
".",
"values",
"(",
")",
")",
")",
",",
"indent",
"=",
"4",
")"
] | 27.56
| 20.88
|
def update(self, password=values.unset):
"""
Update the CredentialInstance
:param unicode password: The password will not be returned in the response
:returns: Updated CredentialInstance
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialInstance
"""
return self._proxy.update(password=password, )
|
[
"def",
"update",
"(",
"self",
",",
"password",
"=",
"values",
".",
"unset",
")",
":",
"return",
"self",
".",
"_proxy",
".",
"update",
"(",
"password",
"=",
"password",
",",
")"
] | 37.4
| 19.8
|
def longest_run_1d(arr):
"""Return the length of the longest consecutive run of identical values.
Parameters
----------
arr : bool array
Input array
Returns
-------
int
Length of longest run.
"""
v, rl = rle_1d(arr)[:2]
return np.where(v, rl, 0).max()
|
[
"def",
"longest_run_1d",
"(",
"arr",
")",
":",
"v",
",",
"rl",
"=",
"rle_1d",
"(",
"arr",
")",
"[",
":",
"2",
"]",
"return",
"np",
".",
"where",
"(",
"v",
",",
"rl",
",",
"0",
")",
".",
"max",
"(",
")"
] | 19.4
| 20.8
|
def throw_random_private( lengths, regions, save_interval_func, allow_overlap=False, three_args=True ):
"""
(Internal function; we expect calls only through the interface functions
above)
`lengths`: A list containing the length of each interval to be generated.
`regions`: A list of regions in which intervals can be placed, sorted by
decreasing length. Elements are triples of the form (length,
start, extra), This list CAN BE MODIFIED by this function.
`save_interval_func`: A function accepting three arguments which will be
passed the (start,stop,extra) for each generated
interval.
"""
# Implementation:
# We keep a list of the regions, sorted from largest to smallest. We then
# place each length by following steps:
# (1) construct a candidate counts array (cc array)
# (2) choose a candidate at random
# (3) find region containing that candidate
# (4) map candidate to position in that region
# (5) split region if not allowing overlaps
# (6) report placed segment
#
# The cc array is only constructed if there's a change (different length
# to place, or the region list has changed). It contains, for each
# region, the total number of number of candidate positions in regions
# *preceding* it in the region list:
# cc[i] = sum over k in 0..(i-1) of length[i] - L + 1
# where N is the number of regions and L is the length being thrown.
# At the same time, we determine the total number of candidates (the total
# number of places the current length can be placed) and the index range
# of regions into which the length will fit.
#
# example:
# for L = 20
# i = 0 1 2 3 4 5 6 7 8 9
# length[i] = 96 66 56 50 48 40 29 17 11 8
# cc[i] = 0 77 124 161 192 221 242 X X X
# candidates = 252
# lo_rgn = 0
# hi_rgn = 6
#
# The candidate is chosen in (0..candidates-1). The candidate counts
# array allows us to do a binary search to locate the region that holds that
# candidate. Continuing the example above, we choose a random candidate
# s in (0..251). If s happens to be in (124..160), it will be mapped to
# region 2 at start position s-124.
#
# During the binary search, if we are looking at region 3, if s < cc[3]
# then the desired region is region 2 or lower. Otherwise it is region 3 or
# higher.
min_length = min( lengths )
prev_length = None # (force initial cc array construction)
cc = [0] * (len( regions ) + len(lengths) - 1)
num_thrown = 0
for length in lengths:
# construct cc array (only needed if length has changed or region list has
# changed)
if length != prev_length:
prev_length = length
assert len( cc ) >= len( regions )
candidates = 0
hi_rgn = 0
for region in regions:
rgn_len = region[0]
if rgn_len < length:
break
cc[hi_rgn] = candidates
candidates += rgn_len - length + 1
hi_rgn += 1
if candidates == 0:
raise MaxtriesException( "No region can fit an interval of length %d (we threw %d of %d)" \
% ( length, num_thrown,len( lengths ) ) )
hi_rgn -= 1
# Select a candidate
s = random.randrange( candidates )
#..
#..for ix in range( len( regions ) ):
#.. region = regions[ix]
#.. if ix <= hi_rgn: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], cc[ix] )
#.. else: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], "X" )
#..print "s = %s (of %s candidates)" % ( s, candidates )
# Locate region containing that candidate, by binary search
lo = 0
hi = hi_rgn
while hi > lo:
mid = (lo + hi + 1) / 2 # (we round up to prevent infinite loop)
if s < cc[mid]: hi = mid-1 # (s < num candidates from 0..mid-1)
else: lo = mid # (s >= num candidates from 0..mid-1)
s -= cc[lo]
# If we are not allowing overlaps we will remove the placed interval
# from the region list
if allow_overlap:
rgn_length, rgn_start, rgn_extra = regions[lo]
else:
# Remove the chosen region and split
rgn_length, rgn_start, rgn_extra = regions.pop( lo )
rgn_end = rgn_start + rgn_length
assert s >= 0
assert rgn_start + s + length <= rgn_end, "Expected: %d + %d + %d == %d <= %d" % ( rgn_start, s, length, rgn_start + s + length, rgn_end )
regions.reverse()
if s >= min_length:
bisect.insort( regions, ( s, rgn_start, rgn_extra ) )
if s + length <= rgn_length - min_length:
bisect.insort( regions, ( rgn_length - ( s + length ), rgn_start + s + length, rgn_extra ) )
regions.reverse()
prev_length = None # (force cc array construction)
# Save the new interval
if (three_args):
save_interval_func( rgn_start + s, rgn_start + s + length, rgn_extra )
else:
save_interval_func( rgn_start + s, rgn_start + s + length )
num_thrown += 1
|
[
"def",
"throw_random_private",
"(",
"lengths",
",",
"regions",
",",
"save_interval_func",
",",
"allow_overlap",
"=",
"False",
",",
"three_args",
"=",
"True",
")",
":",
"# Implementation:",
"# We keep a list of the regions, sorted from largest to smallest. We then",
"# place each length by following steps:",
"# (1) construct a candidate counts array (cc array)",
"# (2) choose a candidate at random",
"# (3) find region containing that candidate",
"# (4) map candidate to position in that region",
"# (5) split region if not allowing overlaps",
"# (6) report placed segment",
"#",
"# The cc array is only constructed if there's a change (different length",
"# to place, or the region list has changed). It contains, for each",
"# region, the total number of number of candidate positions in regions",
"# *preceding* it in the region list:",
"# cc[i] = sum over k in 0..(i-1) of length[i] - L + 1",
"# where N is the number of regions and L is the length being thrown.",
"# At the same time, we determine the total number of candidates (the total",
"# number of places the current length can be placed) and the index range",
"# of regions into which the length will fit.",
"#",
"# example:",
"# for L = 20",
"# i = 0 1 2 3 4 5 6 7 8 9",
"# length[i] = 96 66 56 50 48 40 29 17 11 8",
"# cc[i] = 0 77 124 161 192 221 242 X X X",
"# candidates = 252",
"# lo_rgn = 0",
"# hi_rgn = 6",
"#",
"# The candidate is chosen in (0..candidates-1). The candidate counts",
"# array allows us to do a binary search to locate the region that holds that",
"# candidate. Continuing the example above, we choose a random candidate",
"# s in (0..251). If s happens to be in (124..160), it will be mapped to",
"# region 2 at start position s-124.",
"#",
"# During the binary search, if we are looking at region 3, if s < cc[3]",
"# then the desired region is region 2 or lower. Otherwise it is region 3 or",
"# higher.",
"min_length",
"=",
"min",
"(",
"lengths",
")",
"prev_length",
"=",
"None",
"# (force initial cc array construction)",
"cc",
"=",
"[",
"0",
"]",
"*",
"(",
"len",
"(",
"regions",
")",
"+",
"len",
"(",
"lengths",
")",
"-",
"1",
")",
"num_thrown",
"=",
"0",
"for",
"length",
"in",
"lengths",
":",
"# construct cc array (only needed if length has changed or region list has",
"# changed)",
"if",
"length",
"!=",
"prev_length",
":",
"prev_length",
"=",
"length",
"assert",
"len",
"(",
"cc",
")",
">=",
"len",
"(",
"regions",
")",
"candidates",
"=",
"0",
"hi_rgn",
"=",
"0",
"for",
"region",
"in",
"regions",
":",
"rgn_len",
"=",
"region",
"[",
"0",
"]",
"if",
"rgn_len",
"<",
"length",
":",
"break",
"cc",
"[",
"hi_rgn",
"]",
"=",
"candidates",
"candidates",
"+=",
"rgn_len",
"-",
"length",
"+",
"1",
"hi_rgn",
"+=",
"1",
"if",
"candidates",
"==",
"0",
":",
"raise",
"MaxtriesException",
"(",
"\"No region can fit an interval of length %d (we threw %d of %d)\"",
"%",
"(",
"length",
",",
"num_thrown",
",",
"len",
"(",
"lengths",
")",
")",
")",
"hi_rgn",
"-=",
"1",
"# Select a candidate",
"s",
"=",
"random",
".",
"randrange",
"(",
"candidates",
")",
"#..",
"#..for ix in range( len( regions ) ):",
"#.. region = regions[ix]",
"#.. if ix <= hi_rgn: print \"%2s: %5s %5s %5s\" % ( ix, region[1], region[0], cc[ix] )",
"#.. else: print \"%2s: %5s %5s %5s\" % ( ix, region[1], region[0], \"X\" )",
"#..print \"s = %s (of %s candidates)\" % ( s, candidates )",
"# Locate region containing that candidate, by binary search",
"lo",
"=",
"0",
"hi",
"=",
"hi_rgn",
"while",
"hi",
">",
"lo",
":",
"mid",
"=",
"(",
"lo",
"+",
"hi",
"+",
"1",
")",
"/",
"2",
"# (we round up to prevent infinite loop)",
"if",
"s",
"<",
"cc",
"[",
"mid",
"]",
":",
"hi",
"=",
"mid",
"-",
"1",
"# (s < num candidates from 0..mid-1)",
"else",
":",
"lo",
"=",
"mid",
"# (s >= num candidates from 0..mid-1)",
"s",
"-=",
"cc",
"[",
"lo",
"]",
"# If we are not allowing overlaps we will remove the placed interval",
"# from the region list",
"if",
"allow_overlap",
":",
"rgn_length",
",",
"rgn_start",
",",
"rgn_extra",
"=",
"regions",
"[",
"lo",
"]",
"else",
":",
"# Remove the chosen region and split",
"rgn_length",
",",
"rgn_start",
",",
"rgn_extra",
"=",
"regions",
".",
"pop",
"(",
"lo",
")",
"rgn_end",
"=",
"rgn_start",
"+",
"rgn_length",
"assert",
"s",
">=",
"0",
"assert",
"rgn_start",
"+",
"s",
"+",
"length",
"<=",
"rgn_end",
",",
"\"Expected: %d + %d + %d == %d <= %d\"",
"%",
"(",
"rgn_start",
",",
"s",
",",
"length",
",",
"rgn_start",
"+",
"s",
"+",
"length",
",",
"rgn_end",
")",
"regions",
".",
"reverse",
"(",
")",
"if",
"s",
">=",
"min_length",
":",
"bisect",
".",
"insort",
"(",
"regions",
",",
"(",
"s",
",",
"rgn_start",
",",
"rgn_extra",
")",
")",
"if",
"s",
"+",
"length",
"<=",
"rgn_length",
"-",
"min_length",
":",
"bisect",
".",
"insort",
"(",
"regions",
",",
"(",
"rgn_length",
"-",
"(",
"s",
"+",
"length",
")",
",",
"rgn_start",
"+",
"s",
"+",
"length",
",",
"rgn_extra",
")",
")",
"regions",
".",
"reverse",
"(",
")",
"prev_length",
"=",
"None",
"# (force cc array construction)",
"# Save the new interval",
"if",
"(",
"three_args",
")",
":",
"save_interval_func",
"(",
"rgn_start",
"+",
"s",
",",
"rgn_start",
"+",
"s",
"+",
"length",
",",
"rgn_extra",
")",
"else",
":",
"save_interval_func",
"(",
"rgn_start",
"+",
"s",
",",
"rgn_start",
"+",
"s",
"+",
"length",
")",
"num_thrown",
"+=",
"1"
] | 47.46087
| 24.052174
|
def path_split(self, path):
"""
Splits a path into the part matching this middleware and the part remaining.
If path does not exist, it returns a pair of None values.
If the regex matches the entire pair, the second item in returned tuple is None.
Args:
path (str): The url to split
Returns:
Tuple
matching_path (str or None): The beginning of the path which matches this
middleware or None if it does not match
remaining_path (str or None): The 'rest' of the path, following the matching part
"""
match = self.path.match(path)
if match is None:
return None, None
# split string at position
the_rest = path[match.end():]
# ensure we split at a '/' character
if the_rest:
if match.group().endswith('/'):
pass
elif the_rest.startswith('/'):
pass
else:
return None, None
if self.IGNORE_TRAILING_SLASH and the_rest == '/':
the_rest = ''
return match, the_rest
|
[
"def",
"path_split",
"(",
"self",
",",
"path",
")",
":",
"match",
"=",
"self",
".",
"path",
".",
"match",
"(",
"path",
")",
"if",
"match",
"is",
"None",
":",
"return",
"None",
",",
"None",
"# split string at position",
"the_rest",
"=",
"path",
"[",
"match",
".",
"end",
"(",
")",
":",
"]",
"# ensure we split at a '/' character",
"if",
"the_rest",
":",
"if",
"match",
".",
"group",
"(",
")",
".",
"endswith",
"(",
"'/'",
")",
":",
"pass",
"elif",
"the_rest",
".",
"startswith",
"(",
"'/'",
")",
":",
"pass",
"else",
":",
"return",
"None",
",",
"None",
"if",
"self",
".",
"IGNORE_TRAILING_SLASH",
"and",
"the_rest",
"==",
"'/'",
":",
"the_rest",
"=",
"''",
"return",
"match",
",",
"the_rest"
] | 31.083333
| 21.583333
|
def runPlink(options):
"""Runs Plink with the geno option.
:param options: the options.
:type options: argparse.Namespace
"""
# The plink command
plinkCommand = ["plink", "--noweb", "--bfile", options.bfile, "--geno",
str(options.geno), "--make-bed", "--out", options.out]
output = None
try:
output = subprocess.check_output(plinkCommand,
stderr=subprocess.STDOUT, shell=False)
except subprocess.CalledProcessError:
msg = "plink: couldn't run plink"
raise ProgramError(msg)
|
[
"def",
"runPlink",
"(",
"options",
")",
":",
"# The plink command",
"plinkCommand",
"=",
"[",
"\"plink\"",
",",
"\"--noweb\"",
",",
"\"--bfile\"",
",",
"options",
".",
"bfile",
",",
"\"--geno\"",
",",
"str",
"(",
"options",
".",
"geno",
")",
",",
"\"--make-bed\"",
",",
"\"--out\"",
",",
"options",
".",
"out",
"]",
"output",
"=",
"None",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"plinkCommand",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"shell",
"=",
"False",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"msg",
"=",
"\"plink: couldn't run plink\"",
"raise",
"ProgramError",
"(",
"msg",
")"
] | 30.526316
| 20.736842
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.