text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def print_fn(results, niter, ncall, add_live_it=None,
dlogz=None, stop_val=None, nbatch=None,
logl_min=-np.inf, logl_max=np.inf):
"""
The default function used to print out results in real time.
Parameters
----------
results : tuple
Collection of variables output from the current state of the sampler.
Currently includes:
(1) particle index,
(2) unit cube position,
(3) parameter position,
(4) ln(likelihood),
(5) ln(volume),
(6) ln(weight),
(7) ln(evidence),
(8) Var[ln(evidence)],
(9) information,
(10) number of (current) function calls,
(11) iteration when the point was originally proposed,
(12) index of the bounding object originally proposed from,
(13) index of the bounding object active at a given iteration,
(14) cumulative efficiency, and
(15) estimated remaining ln(evidence).
niter : int
The current iteration of the sampler.
ncall : int
The total number of function calls at the current iteration.
add_live_it : int, optional
If the last set of live points are being added explicitly, this
quantity tracks the sorted index of the current live point being added.
dlogz : float, optional
The evidence stopping criterion. If not provided, the provided
stopping value will be used instead.
stop_val : float, optional
The current stopping criterion (for dynamic nested sampling). Used if
the `dlogz` value is not specified.
nbatch : int, optional
The current batch (for dynamic nested sampling).
logl_min : float, optional
The minimum log-likelihood used when starting sampling. Default is
`-np.inf`.
logl_max : float, optional
The maximum log-likelihood used when stopping sampling. Default is
`np.inf`.
"""
# Extract results at the current iteration.
(worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
eff, delta_logz) = results
# Adjusting outputs for printing.
if delta_logz > 1e6:
delta_logz = np.inf
if logzvar >= 0. and logzvar <= 1e6:
logzerr = np.sqrt(logzvar)
else:
logzerr = np.nan
if logz <= -1e6:
logz = -np.inf
if loglstar <= -1e6:
loglstar = -np.inf
# Constructing output.
long_str = []
long_str.append("iter: {:d}".format(niter))
if add_live_it is not None:
long_str.append("+{:d}".format(add_live_it))
short_str = list(long_str)
if nbatch is not None:
long_str.append("batch: {:d}".format(nbatch))
long_str.append("bound: {:d}".format(bounditer))
long_str.append("nc: {:d}".format(nc))
long_str.append("ncall: {:d}".format(ncall))
long_str.append("eff(%): {:6.3f}".format(eff))
short_str.append(long_str[-1])
long_str.append("loglstar: {:6.3f} < {:6.3f} < {:6.3f}".format(logl_min,
loglstar,
logl_max))
short_str.append("logl*: {:6.1f}<{:6.1f}<{:6.1f}".format(logl_min,
loglstar,
logl_max))
long_str.append("logz: {:6.3f} +/- {:6.3f}".format(logz, logzerr))
short_str.append("logz: {:6.1f}+/-{:.1f}".format(logz, logzerr))
mid_str = list(short_str)
if dlogz is not None:
long_str.append("dlogz: {:6.3f} > {:6.3f}".format(delta_logz, dlogz))
mid_str.append("dlogz: {:6.1f}>{:6.1f}".format(delta_logz, dlogz))
else:
long_str.append("stop: {:6.3f}".format(stop_val))
mid_str.append("stop: {:6.3f}".format(stop_val))
# Printing.
long_str = ' | '.join(long_str)
mid_str = ' | '.join(mid_str)
short_str = '|'.join(short_str)
if sys.stderr.isatty() and hasattr(shutil, 'get_terminal_size'):
columns = shutil.get_terminal_size(fallback=(80, 25))[0]
else:
columns = 200
if columns > len(long_str):
sys.stderr.write("\r" + long_str + ' '*(columns-len(long_str)-2))
elif columns > len(mid_str):
sys.stderr.write("\r" + mid_str + ' '*(columns-len(mid_str)-2))
else:
sys.stderr.write("\r" + short_str + ' '*(columns-len(short_str)-2))
sys.stderr.flush() | [
"def",
"print_fn",
"(",
"results",
",",
"niter",
",",
"ncall",
",",
"add_live_it",
"=",
"None",
",",
"dlogz",
"=",
"None",
",",
"stop_val",
"=",
"None",
",",
"nbatch",
"=",
"None",
",",
"logl_min",
"=",
"-",
"np",
".",
"inf",
",",
"logl_max",
"=",
"np",
".",
"inf",
")",
":",
"# Extract results at the current iteration.",
"(",
"worst",
",",
"ustar",
",",
"vstar",
",",
"loglstar",
",",
"logvol",
",",
"logwt",
",",
"logz",
",",
"logzvar",
",",
"h",
",",
"nc",
",",
"worst_it",
",",
"boundidx",
",",
"bounditer",
",",
"eff",
",",
"delta_logz",
")",
"=",
"results",
"# Adjusting outputs for printing.",
"if",
"delta_logz",
">",
"1e6",
":",
"delta_logz",
"=",
"np",
".",
"inf",
"if",
"logzvar",
">=",
"0.",
"and",
"logzvar",
"<=",
"1e6",
":",
"logzerr",
"=",
"np",
".",
"sqrt",
"(",
"logzvar",
")",
"else",
":",
"logzerr",
"=",
"np",
".",
"nan",
"if",
"logz",
"<=",
"-",
"1e6",
":",
"logz",
"=",
"-",
"np",
".",
"inf",
"if",
"loglstar",
"<=",
"-",
"1e6",
":",
"loglstar",
"=",
"-",
"np",
".",
"inf",
"# Constructing output.",
"long_str",
"=",
"[",
"]",
"long_str",
".",
"append",
"(",
"\"iter: {:d}\"",
".",
"format",
"(",
"niter",
")",
")",
"if",
"add_live_it",
"is",
"not",
"None",
":",
"long_str",
".",
"append",
"(",
"\"+{:d}\"",
".",
"format",
"(",
"add_live_it",
")",
")",
"short_str",
"=",
"list",
"(",
"long_str",
")",
"if",
"nbatch",
"is",
"not",
"None",
":",
"long_str",
".",
"append",
"(",
"\"batch: {:d}\"",
".",
"format",
"(",
"nbatch",
")",
")",
"long_str",
".",
"append",
"(",
"\"bound: {:d}\"",
".",
"format",
"(",
"bounditer",
")",
")",
"long_str",
".",
"append",
"(",
"\"nc: {:d}\"",
".",
"format",
"(",
"nc",
")",
")",
"long_str",
".",
"append",
"(",
"\"ncall: {:d}\"",
".",
"format",
"(",
"ncall",
")",
")",
"long_str",
".",
"append",
"(",
"\"eff(%): {:6.3f}\"",
".",
"format",
"(",
"eff",
")",
")",
"short_str",
".",
"append",
"(",
"long_str",
"[",
"-",
"1",
"]",
")",
"long_str",
".",
"append",
"(",
"\"loglstar: {:6.3f} < {:6.3f} < {:6.3f}\"",
".",
"format",
"(",
"logl_min",
",",
"loglstar",
",",
"logl_max",
")",
")",
"short_str",
".",
"append",
"(",
"\"logl*: {:6.1f}<{:6.1f}<{:6.1f}\"",
".",
"format",
"(",
"logl_min",
",",
"loglstar",
",",
"logl_max",
")",
")",
"long_str",
".",
"append",
"(",
"\"logz: {:6.3f} +/- {:6.3f}\"",
".",
"format",
"(",
"logz",
",",
"logzerr",
")",
")",
"short_str",
".",
"append",
"(",
"\"logz: {:6.1f}+/-{:.1f}\"",
".",
"format",
"(",
"logz",
",",
"logzerr",
")",
")",
"mid_str",
"=",
"list",
"(",
"short_str",
")",
"if",
"dlogz",
"is",
"not",
"None",
":",
"long_str",
".",
"append",
"(",
"\"dlogz: {:6.3f} > {:6.3f}\"",
".",
"format",
"(",
"delta_logz",
",",
"dlogz",
")",
")",
"mid_str",
".",
"append",
"(",
"\"dlogz: {:6.1f}>{:6.1f}\"",
".",
"format",
"(",
"delta_logz",
",",
"dlogz",
")",
")",
"else",
":",
"long_str",
".",
"append",
"(",
"\"stop: {:6.3f}\"",
".",
"format",
"(",
"stop_val",
")",
")",
"mid_str",
".",
"append",
"(",
"\"stop: {:6.3f}\"",
".",
"format",
"(",
"stop_val",
")",
")",
"# Printing.",
"long_str",
"=",
"' | '",
".",
"join",
"(",
"long_str",
")",
"mid_str",
"=",
"' | '",
".",
"join",
"(",
"mid_str",
")",
"short_str",
"=",
"'|'",
".",
"join",
"(",
"short_str",
")",
"if",
"sys",
".",
"stderr",
".",
"isatty",
"(",
")",
"and",
"hasattr",
"(",
"shutil",
",",
"'get_terminal_size'",
")",
":",
"columns",
"=",
"shutil",
".",
"get_terminal_size",
"(",
"fallback",
"=",
"(",
"80",
",",
"25",
")",
")",
"[",
"0",
"]",
"else",
":",
"columns",
"=",
"200",
"if",
"columns",
">",
"len",
"(",
"long_str",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\r\"",
"+",
"long_str",
"+",
"' '",
"*",
"(",
"columns",
"-",
"len",
"(",
"long_str",
")",
"-",
"2",
")",
")",
"elif",
"columns",
">",
"len",
"(",
"mid_str",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\r\"",
"+",
"mid_str",
"+",
"' '",
"*",
"(",
"columns",
"-",
"len",
"(",
"mid_str",
")",
"-",
"2",
")",
")",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\r\"",
"+",
"short_str",
"+",
"' '",
"*",
"(",
"columns",
"-",
"len",
"(",
"short_str",
")",
"-",
"2",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")"
] | 36.441667 | 20.658333 |
def _setup_tunnel(
self):
"""
*setup ssh tunnel if required*
"""
from subprocess import Popen, PIPE, STDOUT
import pymysql as ms
# SETUP TUNNEL IF REQUIRED
if "ssh tunnel" in self.settings:
# TEST TUNNEL DOES NOT ALREADY EXIST
sshPort = self.settings["ssh tunnel"]["port"]
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
if connected:
pass
else:
# GRAB TUNNEL SETTINGS FROM SETTINGS FILE
ru = self.settings["ssh tunnel"]["remote user"]
rip = self.settings["ssh tunnel"]["remote ip"]
rh = self.settings["ssh tunnel"]["remote datbase host"]
cmd = "ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306" % locals()
p = Popen(cmd, shell=True, close_fds=True)
output = p.communicate()[0]
# TEST CONNECTION - QUIT AFTER SO MANY TRIES
connected = False
count = 0
while not connected:
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
time.sleep(1)
count += 1
if count == 5:
self.log.error(
'cound not setup tunnel to remote datbase' % locals())
sys.exit(0)
if "tunnel" in self.settings["database settings"] and self.settings["database settings"]["tunnel"]:
# TEST TUNNEL DOES NOT ALREADY EXIST
sshPort = self.settings["database settings"]["tunnel"]["port"]
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
if connected:
pass
else:
# GRAB TUNNEL SETTINGS FROM SETTINGS FILE
ru = self.settings["database settings"][
"tunnel"]["remote user"]
rip = self.settings["database settings"]["tunnel"]["remote ip"]
rh = self.settings["database settings"][
"tunnel"]["remote datbase host"]
cmd = "ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306" % locals()
p = Popen(cmd, shell=True, close_fds=True)
output = p.communicate()[0]
# TEST CONNECTION - QUIT AFTER SO MANY TRIES
connected = False
count = 0
while not connected:
connected = self._checkServer(
self.settings["database settings"]["host"], sshPort)
time.sleep(1)
count += 1
if count == 5:
self.log.error(
'cound not setup tunnel to remote datbase' % locals())
sys.exit(0)
# SETUP A DATABASE CONNECTION FOR THE remote database
host = self.settings["database settings"]["host"]
user = self.settings["database settings"]["user"]
passwd = self.settings["database settings"]["password"]
dbName = self.settings["database settings"]["db"]
thisConn = ms.connect(
host=host,
user=user,
passwd=passwd,
db=dbName,
port=sshPort,
use_unicode=True,
charset='utf8',
local_infile=1,
client_flag=ms.constants.CLIENT.MULTI_STATEMENTS,
connect_timeout=36000,
max_allowed_packet=51200000
)
thisConn.autocommit(True)
self.remoteDBConn = thisConn
return None | [
"def",
"_setup_tunnel",
"(",
"self",
")",
":",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
",",
"STDOUT",
"import",
"pymysql",
"as",
"ms",
"# SETUP TUNNEL IF REQUIRED",
"if",
"\"ssh tunnel\"",
"in",
"self",
".",
"settings",
":",
"# TEST TUNNEL DOES NOT ALREADY EXIST",
"sshPort",
"=",
"self",
".",
"settings",
"[",
"\"ssh tunnel\"",
"]",
"[",
"\"port\"",
"]",
"connected",
"=",
"self",
".",
"_checkServer",
"(",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"host\"",
"]",
",",
"sshPort",
")",
"if",
"connected",
":",
"pass",
"else",
":",
"# GRAB TUNNEL SETTINGS FROM SETTINGS FILE",
"ru",
"=",
"self",
".",
"settings",
"[",
"\"ssh tunnel\"",
"]",
"[",
"\"remote user\"",
"]",
"rip",
"=",
"self",
".",
"settings",
"[",
"\"ssh tunnel\"",
"]",
"[",
"\"remote ip\"",
"]",
"rh",
"=",
"self",
".",
"settings",
"[",
"\"ssh tunnel\"",
"]",
"[",
"\"remote datbase host\"",
"]",
"cmd",
"=",
"\"ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"close_fds",
"=",
"True",
")",
"output",
"=",
"p",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"# TEST CONNECTION - QUIT AFTER SO MANY TRIES",
"connected",
"=",
"False",
"count",
"=",
"0",
"while",
"not",
"connected",
":",
"connected",
"=",
"self",
".",
"_checkServer",
"(",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"host\"",
"]",
",",
"sshPort",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"count",
"+=",
"1",
"if",
"count",
"==",
"5",
":",
"self",
".",
"log",
".",
"error",
"(",
"'cound not setup tunnel to remote datbase'",
"%",
"locals",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"if",
"\"tunnel\"",
"in",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"and",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"tunnel\"",
"]",
":",
"# TEST TUNNEL DOES NOT ALREADY EXIST",
"sshPort",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"tunnel\"",
"]",
"[",
"\"port\"",
"]",
"connected",
"=",
"self",
".",
"_checkServer",
"(",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"host\"",
"]",
",",
"sshPort",
")",
"if",
"connected",
":",
"pass",
"else",
":",
"# GRAB TUNNEL SETTINGS FROM SETTINGS FILE",
"ru",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"tunnel\"",
"]",
"[",
"\"remote user\"",
"]",
"rip",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"tunnel\"",
"]",
"[",
"\"remote ip\"",
"]",
"rh",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"tunnel\"",
"]",
"[",
"\"remote datbase host\"",
"]",
"cmd",
"=",
"\"ssh -fnN %(ru)s@%(rip)s -L %(sshPort)s:%(rh)s:3306\"",
"%",
"locals",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"close_fds",
"=",
"True",
")",
"output",
"=",
"p",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
"# TEST CONNECTION - QUIT AFTER SO MANY TRIES",
"connected",
"=",
"False",
"count",
"=",
"0",
"while",
"not",
"connected",
":",
"connected",
"=",
"self",
".",
"_checkServer",
"(",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"host\"",
"]",
",",
"sshPort",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"count",
"+=",
"1",
"if",
"count",
"==",
"5",
":",
"self",
".",
"log",
".",
"error",
"(",
"'cound not setup tunnel to remote datbase'",
"%",
"locals",
"(",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"# SETUP A DATABASE CONNECTION FOR THE remote database",
"host",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"host\"",
"]",
"user",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"user\"",
"]",
"passwd",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"password\"",
"]",
"dbName",
"=",
"self",
".",
"settings",
"[",
"\"database settings\"",
"]",
"[",
"\"db\"",
"]",
"thisConn",
"=",
"ms",
".",
"connect",
"(",
"host",
"=",
"host",
",",
"user",
"=",
"user",
",",
"passwd",
"=",
"passwd",
",",
"db",
"=",
"dbName",
",",
"port",
"=",
"sshPort",
",",
"use_unicode",
"=",
"True",
",",
"charset",
"=",
"'utf8'",
",",
"local_infile",
"=",
"1",
",",
"client_flag",
"=",
"ms",
".",
"constants",
".",
"CLIENT",
".",
"MULTI_STATEMENTS",
",",
"connect_timeout",
"=",
"36000",
",",
"max_allowed_packet",
"=",
"51200000",
")",
"thisConn",
".",
"autocommit",
"(",
"True",
")",
"self",
".",
"remoteDBConn",
"=",
"thisConn",
"return",
"None"
] | 39.978495 | 18.021505 |
def asList(self):
""" returns the value as the list object"""
return [self._red, self._green, self._blue, self._alpha] | [
"def",
"asList",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"_red",
",",
"self",
".",
"_green",
",",
"self",
".",
"_blue",
",",
"self",
".",
"_alpha",
"]"
] | 44 | 15.666667 |
def mac_access_list_extended_hide_mac_acl_ext_seq_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
mac = ET.SubElement(config, "mac", xmlns="urn:brocade.com:mgmt:brocade-mac-access-list")
access_list = ET.SubElement(mac, "access-list")
extended = ET.SubElement(access_list, "extended")
name_key = ET.SubElement(extended, "name")
name_key.text = kwargs.pop('name')
hide_mac_acl_ext = ET.SubElement(extended, "hide-mac-acl-ext")
seq = ET.SubElement(hide_mac_acl_ext, "seq")
seq_id_key = ET.SubElement(seq, "seq-id")
seq_id_key.text = kwargs.pop('seq_id')
action = ET.SubElement(seq, "action")
action.text = kwargs.pop('action')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"mac_access_list_extended_hide_mac_acl_ext_seq_action",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"mac",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"mac\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-mac-access-list\"",
")",
"access_list",
"=",
"ET",
".",
"SubElement",
"(",
"mac",
",",
"\"access-list\"",
")",
"extended",
"=",
"ET",
".",
"SubElement",
"(",
"access_list",
",",
"\"extended\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"extended",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"hide_mac_acl_ext",
"=",
"ET",
".",
"SubElement",
"(",
"extended",
",",
"\"hide-mac-acl-ext\"",
")",
"seq",
"=",
"ET",
".",
"SubElement",
"(",
"hide_mac_acl_ext",
",",
"\"seq\"",
")",
"seq_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"seq",
",",
"\"seq-id\"",
")",
"seq_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'seq_id'",
")",
"action",
"=",
"ET",
".",
"SubElement",
"(",
"seq",
",",
"\"action\"",
")",
"action",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'action'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 46.833333 | 14.777778 |
def start(self, hostname=None, port=None):
""" Spawns a new HTTP server, residing on defined hostname and port
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
"""
if hostname is None:
hostname = settings.settings['mx_host']
if port is None:
port = settings.settings['mx_port']
reloader = False # use_reloader: the default setting for the reloader.
debugger = False #
evalex = True # should the exception evaluation feature be enabled?
threaded = False # True if each request is handled in a separate thread
processes = 1 # if greater than 1 then handle each request in a new process
reloader_interval = 1 # the interval for the reloader in seconds.
static_files = None # static_files: optional dict of static files.
extra_files = None # extra_files: optional list of extra files to track for reloading.
ssl_context = None # ssl_context: optional SSL context for running server in HTTPS mode.
self.mx_thread = Thread(target=run_simple(hostname=hostname,
port=port,
application=self,
use_debugger=debugger,
use_evalex=evalex,
extra_files=extra_files,
use_reloader=reloader,
reloader_interval=reloader_interval,
threaded=threaded,
processes=processes,
static_files=static_files,
ssl_context=ssl_context))
self.mx_thread.daemon = True
self.mx_thread.start() | [
"def",
"start",
"(",
"self",
",",
"hostname",
"=",
"None",
",",
"port",
"=",
"None",
")",
":",
"if",
"hostname",
"is",
"None",
":",
"hostname",
"=",
"settings",
".",
"settings",
"[",
"'mx_host'",
"]",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"settings",
".",
"settings",
"[",
"'mx_port'",
"]",
"reloader",
"=",
"False",
"# use_reloader: the default setting for the reloader.",
"debugger",
"=",
"False",
"#",
"evalex",
"=",
"True",
"# should the exception evaluation feature be enabled?",
"threaded",
"=",
"False",
"# True if each request is handled in a separate thread",
"processes",
"=",
"1",
"# if greater than 1 then handle each request in a new process",
"reloader_interval",
"=",
"1",
"# the interval for the reloader in seconds.",
"static_files",
"=",
"None",
"# static_files: optional dict of static files.",
"extra_files",
"=",
"None",
"# extra_files: optional list of extra files to track for reloading.",
"ssl_context",
"=",
"None",
"# ssl_context: optional SSL context for running server in HTTPS mode.",
"self",
".",
"mx_thread",
"=",
"Thread",
"(",
"target",
"=",
"run_simple",
"(",
"hostname",
"=",
"hostname",
",",
"port",
"=",
"port",
",",
"application",
"=",
"self",
",",
"use_debugger",
"=",
"debugger",
",",
"use_evalex",
"=",
"evalex",
",",
"extra_files",
"=",
"extra_files",
",",
"use_reloader",
"=",
"reloader",
",",
"reloader_interval",
"=",
"reloader_interval",
",",
"threaded",
"=",
"threaded",
",",
"processes",
"=",
"processes",
",",
"static_files",
"=",
"static_files",
",",
"ssl_context",
"=",
"ssl_context",
")",
")",
"self",
".",
"mx_thread",
".",
"daemon",
"=",
"True",
"self",
".",
"mx_thread",
".",
"start",
"(",
")"
] | 60.617647 | 28.029412 |
def ignored_double_corner(
intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2
):
"""Check if an intersection is an "ignored" double corner.
.. note::
This is a helper used only by :func:`ignored_corner`, which in turn is
only used by :func:`classify_intersection`.
Helper for :func:`ignored_corner` where both ``s`` and
``t`` are ``0``.
Does so by checking if either edge through the ``t`` corner goes
through the interior of the other surface. An interior check
is done by checking that a few cross products are positive.
Args:
intersection (.Intersection): An intersection to "diagnose".
tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to
the first curve at the intersection.
tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to
the second curve at the intersection.
edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The
nodes of the three edges of the first surface being intersected.
edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The
nodes of the three edges of the second surface being intersected.
Returns:
bool: Indicates if the corner is to be ignored.
"""
# Compute the other edge for the ``s`` surface.
prev_index = (intersection.index_first - 1) % 3
prev_edge = edge_nodes1[prev_index]
alt_tangent_s = _curve_helpers.evaluate_hodograph(1.0, prev_edge)
# First check if ``tangent_t`` is interior to the ``s`` surface.
cross_prod1 = _helpers.cross_product(
tangent_s.ravel(order="F"), tangent_t.ravel(order="F")
)
# A positive cross product indicates that ``tangent_t`` is
# interior to ``tangent_s``. Similar for ``alt_tangent_s``.
# If ``tangent_t`` is interior to both, then the surfaces
# do more than just "kiss" at the corner, so the corner should
# not be ignored.
if cross_prod1 >= 0.0:
# Only compute ``cross_prod2`` if we need to.
cross_prod2 = _helpers.cross_product(
alt_tangent_s.ravel(order="F"), tangent_t.ravel(order="F")
)
if cross_prod2 >= 0.0:
return False
# If ``tangent_t`` is not interior, we check the other ``t``
# edge that ends at the corner.
prev_index = (intersection.index_second - 1) % 3
prev_edge = edge_nodes2[prev_index]
alt_tangent_t = _curve_helpers.evaluate_hodograph(1.0, prev_edge)
# Change the direction of the "in" tangent so that it points "out".
alt_tangent_t *= -1.0
cross_prod3 = _helpers.cross_product(
tangent_s.ravel(order="F"), alt_tangent_t.ravel(order="F")
)
if cross_prod3 >= 0.0:
# Only compute ``cross_prod4`` if we need to.
cross_prod4 = _helpers.cross_product(
alt_tangent_s.ravel(order="F"), alt_tangent_t.ravel(order="F")
)
if cross_prod4 >= 0.0:
return False
# If neither of ``tangent_t`` or ``alt_tangent_t`` are interior
# to the ``s`` surface, one of two things is true. Either
# the two surfaces have no interior intersection (1) or the
# ``s`` surface is bounded by both edges of the ``t`` surface
# at the corner intersection (2). To detect (2), we only need
# check if ``tangent_s`` is interior to both ``tangent_t``
# and ``alt_tangent_t``. ``cross_prod1`` contains
# (tangent_s) x (tangent_t), so it's negative will tell if
# ``tangent_s`` is interior. Similarly, ``cross_prod3``
# contains (tangent_s) x (alt_tangent_t), but we also reversed
# the sign on ``alt_tangent_t`` so switching the sign back
# and reversing the arguments in the cross product cancel out.
return cross_prod1 > 0.0 or cross_prod3 < 0.0 | [
"def",
"ignored_double_corner",
"(",
"intersection",
",",
"tangent_s",
",",
"tangent_t",
",",
"edge_nodes1",
",",
"edge_nodes2",
")",
":",
"# Compute the other edge for the ``s`` surface.",
"prev_index",
"=",
"(",
"intersection",
".",
"index_first",
"-",
"1",
")",
"%",
"3",
"prev_edge",
"=",
"edge_nodes1",
"[",
"prev_index",
"]",
"alt_tangent_s",
"=",
"_curve_helpers",
".",
"evaluate_hodograph",
"(",
"1.0",
",",
"prev_edge",
")",
"# First check if ``tangent_t`` is interior to the ``s`` surface.",
"cross_prod1",
"=",
"_helpers",
".",
"cross_product",
"(",
"tangent_s",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
",",
"tangent_t",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
")",
"# A positive cross product indicates that ``tangent_t`` is",
"# interior to ``tangent_s``. Similar for ``alt_tangent_s``.",
"# If ``tangent_t`` is interior to both, then the surfaces",
"# do more than just \"kiss\" at the corner, so the corner should",
"# not be ignored.",
"if",
"cross_prod1",
">=",
"0.0",
":",
"# Only compute ``cross_prod2`` if we need to.",
"cross_prod2",
"=",
"_helpers",
".",
"cross_product",
"(",
"alt_tangent_s",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
",",
"tangent_t",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
")",
"if",
"cross_prod2",
">=",
"0.0",
":",
"return",
"False",
"# If ``tangent_t`` is not interior, we check the other ``t``",
"# edge that ends at the corner.",
"prev_index",
"=",
"(",
"intersection",
".",
"index_second",
"-",
"1",
")",
"%",
"3",
"prev_edge",
"=",
"edge_nodes2",
"[",
"prev_index",
"]",
"alt_tangent_t",
"=",
"_curve_helpers",
".",
"evaluate_hodograph",
"(",
"1.0",
",",
"prev_edge",
")",
"# Change the direction of the \"in\" tangent so that it points \"out\".",
"alt_tangent_t",
"*=",
"-",
"1.0",
"cross_prod3",
"=",
"_helpers",
".",
"cross_product",
"(",
"tangent_s",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
",",
"alt_tangent_t",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
")",
"if",
"cross_prod3",
">=",
"0.0",
":",
"# Only compute ``cross_prod4`` if we need to.",
"cross_prod4",
"=",
"_helpers",
".",
"cross_product",
"(",
"alt_tangent_s",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
",",
"alt_tangent_t",
".",
"ravel",
"(",
"order",
"=",
"\"F\"",
")",
")",
"if",
"cross_prod4",
">=",
"0.0",
":",
"return",
"False",
"# If neither of ``tangent_t`` or ``alt_tangent_t`` are interior",
"# to the ``s`` surface, one of two things is true. Either",
"# the two surfaces have no interior intersection (1) or the",
"# ``s`` surface is bounded by both edges of the ``t`` surface",
"# at the corner intersection (2). To detect (2), we only need",
"# check if ``tangent_s`` is interior to both ``tangent_t``",
"# and ``alt_tangent_t``. ``cross_prod1`` contains",
"# (tangent_s) x (tangent_t), so it's negative will tell if",
"# ``tangent_s`` is interior. Similarly, ``cross_prod3``",
"# contains (tangent_s) x (alt_tangent_t), but we also reversed",
"# the sign on ``alt_tangent_t`` so switching the sign back",
"# and reversing the arguments in the cross product cancel out.",
"return",
"cross_prod1",
">",
"0.0",
"or",
"cross_prod3",
"<",
"0.0"
] | 44.746988 | 22.493976 |
def generate_nucmer_commands(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
"""Return a tuple of lists of NUCmer command-lines for ANIm
The first element is a list of NUCmer commands, the second a list
of delta_filter_wrapper.py commands. These are ordered such that
commands are paired. The NUCmer commands should be run before
the delta-filter commands.
- filenames - a list of paths to input FASTA files
- outdir - path to output directory
- nucmer_exe - location of the nucmer binary
- maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option
Loop over all FASTA files generating NUCmer command lines for each
pairwise comparison.
"""
nucmer_cmdlines, delta_filter_cmdlines = [], []
for idx, fname1 in enumerate(filenames[:-1]):
for fname2 in filenames[idx + 1 :]:
ncmd, dcmd = construct_nucmer_cmdline(
fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch
)
nucmer_cmdlines.append(ncmd)
delta_filter_cmdlines.append(dcmd)
return (nucmer_cmdlines, delta_filter_cmdlines) | [
"def",
"generate_nucmer_commands",
"(",
"filenames",
",",
"outdir",
"=",
"\".\"",
",",
"nucmer_exe",
"=",
"pyani_config",
".",
"NUCMER_DEFAULT",
",",
"filter_exe",
"=",
"pyani_config",
".",
"FILTER_DEFAULT",
",",
"maxmatch",
"=",
"False",
",",
")",
":",
"nucmer_cmdlines",
",",
"delta_filter_cmdlines",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"idx",
",",
"fname1",
"in",
"enumerate",
"(",
"filenames",
"[",
":",
"-",
"1",
"]",
")",
":",
"for",
"fname2",
"in",
"filenames",
"[",
"idx",
"+",
"1",
":",
"]",
":",
"ncmd",
",",
"dcmd",
"=",
"construct_nucmer_cmdline",
"(",
"fname1",
",",
"fname2",
",",
"outdir",
",",
"nucmer_exe",
",",
"filter_exe",
",",
"maxmatch",
")",
"nucmer_cmdlines",
".",
"append",
"(",
"ncmd",
")",
"delta_filter_cmdlines",
".",
"append",
"(",
"dcmd",
")",
"return",
"(",
"nucmer_cmdlines",
",",
"delta_filter_cmdlines",
")"
] | 38.387097 | 17.741935 |
def general_eq(a, b, attributes):
"""Return whether two objects are equal up to the given attributes.
If an attribute is called ``'phi'``, it is compared up to |PRECISION|.
If an attribute is called ``'mechanism'`` or ``'purview'``, it is
compared using set equality. All other attributes are compared with
:func:`numpy_aware_eq`.
"""
try:
for attr in attributes:
_a, _b = getattr(a, attr), getattr(b, attr)
if attr in ['phi', 'alpha']:
if not utils.eq(_a, _b):
return False
elif attr in ['mechanism', 'purview']:
if _a is None or _b is None:
if _a != _b:
return False
elif not set(_a) == set(_b):
return False
else:
if not numpy_aware_eq(_a, _b):
return False
return True
except AttributeError:
return False | [
"def",
"general_eq",
"(",
"a",
",",
"b",
",",
"attributes",
")",
":",
"try",
":",
"for",
"attr",
"in",
"attributes",
":",
"_a",
",",
"_b",
"=",
"getattr",
"(",
"a",
",",
"attr",
")",
",",
"getattr",
"(",
"b",
",",
"attr",
")",
"if",
"attr",
"in",
"[",
"'phi'",
",",
"'alpha'",
"]",
":",
"if",
"not",
"utils",
".",
"eq",
"(",
"_a",
",",
"_b",
")",
":",
"return",
"False",
"elif",
"attr",
"in",
"[",
"'mechanism'",
",",
"'purview'",
"]",
":",
"if",
"_a",
"is",
"None",
"or",
"_b",
"is",
"None",
":",
"if",
"_a",
"!=",
"_b",
":",
"return",
"False",
"elif",
"not",
"set",
"(",
"_a",
")",
"==",
"set",
"(",
"_b",
")",
":",
"return",
"False",
"else",
":",
"if",
"not",
"numpy_aware_eq",
"(",
"_a",
",",
"_b",
")",
":",
"return",
"False",
"return",
"True",
"except",
"AttributeError",
":",
"return",
"False"
] | 36.807692 | 13.423077 |
def memoize(f):
""" Memoization decorator for a function taking one or more arguments. """
class memodict(dict):
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
ret = self[key] = f(*key)
return ret
return memodict().__getitem__ | [
"def",
"memoize",
"(",
"f",
")",
":",
"class",
"memodict",
"(",
"dict",
")",
":",
"def",
"__getitem__",
"(",
"self",
",",
"*",
"key",
")",
":",
"return",
"dict",
".",
"__getitem__",
"(",
"self",
",",
"key",
")",
"def",
"__missing__",
"(",
"self",
",",
"key",
")",
":",
"ret",
"=",
"self",
"[",
"key",
"]",
"=",
"f",
"(",
"*",
"key",
")",
"return",
"ret",
"return",
"memodict",
"(",
")",
".",
"__getitem__"
] | 27.25 | 16.916667 |
def extract(self, *args):
"""
Extract a specific variable
"""
self.time = np.loadtxt(self.abspath,
skiprows=self._attributes['data_idx']+1,
unpack=True, usecols=(0,))
for variable_idx in args:
data = np.loadtxt(self.abspath,
skiprows=self._attributes['data_idx']+1,
unpack=True,
usecols=(variable_idx,))
with open(self.abspath) as fobj:
for idx, line in enumerate(fobj):
if idx == 1 + variable_idx+self._attributes['CATALOG']:
try:
self.data[variable_idx] = data[:len(self.time)]
except TypeError:
self.data[variable_idx] = data.base
self.label[variable_idx] = line.replace("\'",
'').replace("\n",
"")
break | [
"def",
"extract",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"time",
"=",
"np",
".",
"loadtxt",
"(",
"self",
".",
"abspath",
",",
"skiprows",
"=",
"self",
".",
"_attributes",
"[",
"'data_idx'",
"]",
"+",
"1",
",",
"unpack",
"=",
"True",
",",
"usecols",
"=",
"(",
"0",
",",
")",
")",
"for",
"variable_idx",
"in",
"args",
":",
"data",
"=",
"np",
".",
"loadtxt",
"(",
"self",
".",
"abspath",
",",
"skiprows",
"=",
"self",
".",
"_attributes",
"[",
"'data_idx'",
"]",
"+",
"1",
",",
"unpack",
"=",
"True",
",",
"usecols",
"=",
"(",
"variable_idx",
",",
")",
")",
"with",
"open",
"(",
"self",
".",
"abspath",
")",
"as",
"fobj",
":",
"for",
"idx",
",",
"line",
"in",
"enumerate",
"(",
"fobj",
")",
":",
"if",
"idx",
"==",
"1",
"+",
"variable_idx",
"+",
"self",
".",
"_attributes",
"[",
"'CATALOG'",
"]",
":",
"try",
":",
"self",
".",
"data",
"[",
"variable_idx",
"]",
"=",
"data",
"[",
":",
"len",
"(",
"self",
".",
"time",
")",
"]",
"except",
"TypeError",
":",
"self",
".",
"data",
"[",
"variable_idx",
"]",
"=",
"data",
".",
"base",
"self",
".",
"label",
"[",
"variable_idx",
"]",
"=",
"line",
".",
"replace",
"(",
"\"\\'\"",
",",
"''",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"break"
] | 50.043478 | 16.478261 |
def lsm_var_to_grid(self, out_grid_file, lsm_data_var, gssha_convert_var, time_step=0, ascii_format='grass'):
"""This function takes array data and writes out a GSSHA ascii grid.
Parameters:
out_grid_file(str): Location of ASCII file to generate.
lsm_data_var(str or list): This is the variable name for precipitation in the LSM files.
gssha_convert_var(str): This is the name of the variable used in GRIDtoGSSHA to convert data with.
time_step(Optional[int, datetime]): Time step in file to export data from. Default is the initial time step.
ascii_format(Optional[str]): Default is 'grass' for GRASS ASCII. If you want Arc ASCII, use 'arc'.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
# STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
# STEP 2: Generate init snow grid (from LSM)
# NOTE: Card is INIT_SWE_DEPTH
g2g.lsm_var_to_grid(out_grid_file="E:/GSSHA/swe_grid.asc",
lsm_data_var='SWE_inst',
gssha_convert_var='swe')
"""
self._load_converted_gssha_data_from_lsm(gssha_convert_var, lsm_data_var, 'grid', time_step)
gssha_data_var_name = self.netcdf_attributes[gssha_convert_var]['gssha_name']
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
self._resample_data(gssha_data_var_name)
arr_grid = ArrayGrid(in_array=self.data[gssha_data_var_name].values,
wkt_projection=self.data.lsm.projection.ExportToWkt(),
geotransform=self.data.lsm.geotransform)
if ascii_format.strip().lower() == 'grass':
arr_grid.to_grass_ascii(out_grid_file)
elif ascii_format.strip().lower() == 'arc':
arr_grid.to_arc_ascii(out_grid_file)
else:
raise ValueError("Invalid argument for 'ascii_format'. Only 'grass' or 'arc' allowed.") | [
"def",
"lsm_var_to_grid",
"(",
"self",
",",
"out_grid_file",
",",
"lsm_data_var",
",",
"gssha_convert_var",
",",
"time_step",
"=",
"0",
",",
"ascii_format",
"=",
"'grass'",
")",
":",
"self",
".",
"_load_converted_gssha_data_from_lsm",
"(",
"gssha_convert_var",
",",
"lsm_data_var",
",",
"'grid'",
",",
"time_step",
")",
"gssha_data_var_name",
"=",
"self",
".",
"netcdf_attributes",
"[",
"gssha_convert_var",
"]",
"[",
"'gssha_name'",
"]",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"to_projection",
"(",
"gssha_data_var_name",
",",
"projection",
"=",
"self",
".",
"gssha_grid",
".",
"projection",
")",
"self",
".",
"_resample_data",
"(",
"gssha_data_var_name",
")",
"arr_grid",
"=",
"ArrayGrid",
"(",
"in_array",
"=",
"self",
".",
"data",
"[",
"gssha_data_var_name",
"]",
".",
"values",
",",
"wkt_projection",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"projection",
".",
"ExportToWkt",
"(",
")",
",",
"geotransform",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"geotransform",
")",
"if",
"ascii_format",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'grass'",
":",
"arr_grid",
".",
"to_grass_ascii",
"(",
"out_grid_file",
")",
"elif",
"ascii_format",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'arc'",
":",
"arr_grid",
".",
"to_arc_ascii",
"(",
"out_grid_file",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid argument for 'ascii_format'. Only 'grass' or 'arc' allowed.\"",
")"
] | 51.596154 | 28.653846 |
def _get_merge_rules(properties, path=None):
"""
Yields merge rules as key-value pairs, in which the first element is a JSON path as a tuple, and the second element
is a list of merge properties whose values are `true`.
"""
if path is None:
path = ()
for key, value in properties.items():
new_path = path + (key,)
types = _get_types(value)
# `omitWhenMerged` supersedes all other rules.
# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#omit-when-merged
if value.get('omitWhenMerged') or value.get('mergeStrategy') == 'ocdsOmit':
yield (new_path, {'omitWhenMerged'})
# `wholeListMerge` supersedes any nested rules.
# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#whole-list-merge
elif 'array' in types and (value.get('wholeListMerge') or value.get('mergeStrategy') == 'ocdsVersion'):
yield (new_path, {'wholeListMerge'})
elif 'object' in types and 'properties' in value:
yield from _get_merge_rules(value['properties'], path=new_path)
elif 'array' in types and 'items' in value:
item_types = _get_types(value['items'])
# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#objects
if any(item_type != 'object' for item_type in item_types):
yield (new_path, {'wholeListMerge'})
elif 'object' in item_types and 'properties' in value['items']:
# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#whole-list-merge
if 'id' not in value['items']['properties']:
yield (new_path, {'wholeListMerge'})
else:
yield from _get_merge_rules(value['items']['properties'], path=new_path) | [
"def",
"_get_merge_rules",
"(",
"properties",
",",
"path",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"(",
")",
"for",
"key",
",",
"value",
"in",
"properties",
".",
"items",
"(",
")",
":",
"new_path",
"=",
"path",
"+",
"(",
"key",
",",
")",
"types",
"=",
"_get_types",
"(",
"value",
")",
"# `omitWhenMerged` supersedes all other rules.",
"# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#omit-when-merged",
"if",
"value",
".",
"get",
"(",
"'omitWhenMerged'",
")",
"or",
"value",
".",
"get",
"(",
"'mergeStrategy'",
")",
"==",
"'ocdsOmit'",
":",
"yield",
"(",
"new_path",
",",
"{",
"'omitWhenMerged'",
"}",
")",
"# `wholeListMerge` supersedes any nested rules.",
"# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#whole-list-merge",
"elif",
"'array'",
"in",
"types",
"and",
"(",
"value",
".",
"get",
"(",
"'wholeListMerge'",
")",
"or",
"value",
".",
"get",
"(",
"'mergeStrategy'",
")",
"==",
"'ocdsVersion'",
")",
":",
"yield",
"(",
"new_path",
",",
"{",
"'wholeListMerge'",
"}",
")",
"elif",
"'object'",
"in",
"types",
"and",
"'properties'",
"in",
"value",
":",
"yield",
"from",
"_get_merge_rules",
"(",
"value",
"[",
"'properties'",
"]",
",",
"path",
"=",
"new_path",
")",
"elif",
"'array'",
"in",
"types",
"and",
"'items'",
"in",
"value",
":",
"item_types",
"=",
"_get_types",
"(",
"value",
"[",
"'items'",
"]",
")",
"# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#objects",
"if",
"any",
"(",
"item_type",
"!=",
"'object'",
"for",
"item_type",
"in",
"item_types",
")",
":",
"yield",
"(",
"new_path",
",",
"{",
"'wholeListMerge'",
"}",
")",
"elif",
"'object'",
"in",
"item_types",
"and",
"'properties'",
"in",
"value",
"[",
"'items'",
"]",
":",
"# See http://standard.open-contracting.org/1.1-dev/en/schema/merging/#whole-list-merge",
"if",
"'id'",
"not",
"in",
"value",
"[",
"'items'",
"]",
"[",
"'properties'",
"]",
":",
"yield",
"(",
"new_path",
",",
"{",
"'wholeListMerge'",
"}",
")",
"else",
":",
"yield",
"from",
"_get_merge_rules",
"(",
"value",
"[",
"'items'",
"]",
"[",
"'properties'",
"]",
",",
"path",
"=",
"new_path",
")"
] | 55.030303 | 26.545455 |
def density_2d(self, x, y, Rs, rho0, r_trunc, center_x=0, center_y=0):
"""
projected two dimenstional NFW profile (kappa*Sigma_crit)
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:return: Epsilon(R) projected density at radius R
"""
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
x = R * Rs ** -1
tau = float(r_trunc) * Rs ** -1
Fx = self._F(x, tau)
return 2 * rho0 * Rs * Fx | [
"def",
"density_2d",
"(",
"self",
",",
"x",
",",
"y",
",",
"Rs",
",",
"rho0",
",",
"r_trunc",
",",
"center_x",
"=",
"0",
",",
"center_y",
"=",
"0",
")",
":",
"x_",
"=",
"x",
"-",
"center_x",
"y_",
"=",
"y",
"-",
"center_y",
"R",
"=",
"np",
".",
"sqrt",
"(",
"x_",
"**",
"2",
"+",
"y_",
"**",
"2",
")",
"x",
"=",
"R",
"*",
"Rs",
"**",
"-",
"1",
"tau",
"=",
"float",
"(",
"r_trunc",
")",
"*",
"Rs",
"**",
"-",
"1",
"Fx",
"=",
"self",
".",
"_F",
"(",
"x",
",",
"tau",
")",
"return",
"2",
"*",
"rho0",
"*",
"Rs",
"*",
"Fx"
] | 33.761905 | 12.904762 |
def cache_size(self, new_value):
'''
Set the cache size used to reduce the number of database
access operations.
'''
if type(new_value) == int and 0 < new_value:
if self._lemma_cache is not None:
self._lemma_cache = repoze.lru.LRUCache(new_value)
self._synset_cache = repoze.lru.LRUCache(new_value) | [
"def",
"cache_size",
"(",
"self",
",",
"new_value",
")",
":",
"if",
"type",
"(",
"new_value",
")",
"==",
"int",
"and",
"0",
"<",
"new_value",
":",
"if",
"self",
".",
"_lemma_cache",
"is",
"not",
"None",
":",
"self",
".",
"_lemma_cache",
"=",
"repoze",
".",
"lru",
".",
"LRUCache",
"(",
"new_value",
")",
"self",
".",
"_synset_cache",
"=",
"repoze",
".",
"lru",
".",
"LRUCache",
"(",
"new_value",
")"
] | 41.666667 | 19.444444 |
def reset(self):
"""
Resets the value of config item to its default value.
"""
old_value = self._value
old_raw_str_value = self.raw_str_value
self._value = not_set
self.raw_str_value = not_set
new_value = self._value
if old_value is not_set:
# Nothing to report
return
if self.section:
self.section.dispatch_event(
self.section.hooks.item_value_changed,
item=self,
old_value=old_value,
new_value=new_value,
old_raw_str_value=old_raw_str_value,
new_raw_str_value=self.raw_str_value,
) | [
"def",
"reset",
"(",
"self",
")",
":",
"old_value",
"=",
"self",
".",
"_value",
"old_raw_str_value",
"=",
"self",
".",
"raw_str_value",
"self",
".",
"_value",
"=",
"not_set",
"self",
".",
"raw_str_value",
"=",
"not_set",
"new_value",
"=",
"self",
".",
"_value",
"if",
"old_value",
"is",
"not_set",
":",
"# Nothing to report",
"return",
"if",
"self",
".",
"section",
":",
"self",
".",
"section",
".",
"dispatch_event",
"(",
"self",
".",
"section",
".",
"hooks",
".",
"item_value_changed",
",",
"item",
"=",
"self",
",",
"old_value",
"=",
"old_value",
",",
"new_value",
"=",
"new_value",
",",
"old_raw_str_value",
"=",
"old_raw_str_value",
",",
"new_raw_str_value",
"=",
"self",
".",
"raw_str_value",
",",
")"
] | 27.48 | 15.48 |
def get_structdmtypes_for_python_typeorobject(typeorobj):
"""
Return structchar, dmtype for the python (or numpy)
type or object typeorobj.
For more complex types we only return the dm type
"""
# not isinstance is probably a bit more lenient than 'is'
# ie isinstance(x,str) is nicer than type(x) is str.
# hence we use isinstance when available
if isinstance(typeorobj, type):
comparer = lambda test: test is typeorobj
else:
comparer = lambda test: isinstance(typeorobj, test)
if comparer(int) and not -2**31 < typeorobj < 2**31 - 1:
return 'q', 11
for key, name, sc, types in dm_simple_names:
for t in types:
if comparer(t):
return sc, key
if comparer(str):
return None, get_dmtype_for_name('array') # treat all strings as arrays!
elif comparer(unicode_type):
return None, get_dmtype_for_name('array') # treat all strings as arrays!
elif comparer(array.array):
return None, get_dmtype_for_name('array')
elif comparer(tuple):
return None, get_dmtype_for_name('struct')
elif comparer(structarray):
return None, get_dmtype_for_name('array')
logging.warn("No appropriate DMType found for %s, %s", typeorobj, type(typeorobj))
return None | [
"def",
"get_structdmtypes_for_python_typeorobject",
"(",
"typeorobj",
")",
":",
"# not isinstance is probably a bit more lenient than 'is'",
"# ie isinstance(x,str) is nicer than type(x) is str.",
"# hence we use isinstance when available",
"if",
"isinstance",
"(",
"typeorobj",
",",
"type",
")",
":",
"comparer",
"=",
"lambda",
"test",
":",
"test",
"is",
"typeorobj",
"else",
":",
"comparer",
"=",
"lambda",
"test",
":",
"isinstance",
"(",
"typeorobj",
",",
"test",
")",
"if",
"comparer",
"(",
"int",
")",
"and",
"not",
"-",
"2",
"**",
"31",
"<",
"typeorobj",
"<",
"2",
"**",
"31",
"-",
"1",
":",
"return",
"'q'",
",",
"11",
"for",
"key",
",",
"name",
",",
"sc",
",",
"types",
"in",
"dm_simple_names",
":",
"for",
"t",
"in",
"types",
":",
"if",
"comparer",
"(",
"t",
")",
":",
"return",
"sc",
",",
"key",
"if",
"comparer",
"(",
"str",
")",
":",
"return",
"None",
",",
"get_dmtype_for_name",
"(",
"'array'",
")",
"# treat all strings as arrays!",
"elif",
"comparer",
"(",
"unicode_type",
")",
":",
"return",
"None",
",",
"get_dmtype_for_name",
"(",
"'array'",
")",
"# treat all strings as arrays!",
"elif",
"comparer",
"(",
"array",
".",
"array",
")",
":",
"return",
"None",
",",
"get_dmtype_for_name",
"(",
"'array'",
")",
"elif",
"comparer",
"(",
"tuple",
")",
":",
"return",
"None",
",",
"get_dmtype_for_name",
"(",
"'struct'",
")",
"elif",
"comparer",
"(",
"structarray",
")",
":",
"return",
"None",
",",
"get_dmtype_for_name",
"(",
"'array'",
")",
"logging",
".",
"warn",
"(",
"\"No appropriate DMType found for %s, %s\"",
",",
"typeorobj",
",",
"type",
"(",
"typeorobj",
")",
")",
"return",
"None"
] | 38.848485 | 17.212121 |
def deployed_resources(self, chalice_stage_name):
# type: (str) -> DeployedResources
"""Return resources associated with a given stage.
If a deployment to a given stage has never happened,
this method will return a value of None.
"""
# This is arguably the wrong level of abstraction.
# We might be able to move this elsewhere.
deployed_file = os.path.join(
self.project_dir, '.chalice', 'deployed',
'%s.json' % chalice_stage_name)
data = self._load_json_file(deployed_file)
if data is not None:
schema_version = data.get('schema_version', '1.0')
if schema_version != '2.0':
raise ValueError("Unsupported schema version (%s) in file: %s"
% (schema_version, deployed_file))
return DeployedResources(data)
return self._try_old_deployer_values(chalice_stage_name) | [
"def",
"deployed_resources",
"(",
"self",
",",
"chalice_stage_name",
")",
":",
"# type: (str) -> DeployedResources",
"# This is arguably the wrong level of abstraction.",
"# We might be able to move this elsewhere.",
"deployed_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"project_dir",
",",
"'.chalice'",
",",
"'deployed'",
",",
"'%s.json'",
"%",
"chalice_stage_name",
")",
"data",
"=",
"self",
".",
"_load_json_file",
"(",
"deployed_file",
")",
"if",
"data",
"is",
"not",
"None",
":",
"schema_version",
"=",
"data",
".",
"get",
"(",
"'schema_version'",
",",
"'1.0'",
")",
"if",
"schema_version",
"!=",
"'2.0'",
":",
"raise",
"ValueError",
"(",
"\"Unsupported schema version (%s) in file: %s\"",
"%",
"(",
"schema_version",
",",
"deployed_file",
")",
")",
"return",
"DeployedResources",
"(",
"data",
")",
"return",
"self",
".",
"_try_old_deployer_values",
"(",
"chalice_stage_name",
")"
] | 44.714286 | 14.380952 |
def new_edge(self, node_a, node_b, cost=1):
"""Adds a new edge from node_a to node_b that has a cost.
Returns the edge id of the new edge."""
# Verify that both nodes exist in the graph
try:
self.nodes[node_a]
except KeyError:
raise NonexistentNodeError(node_a)
try:
self.nodes[node_b]
except KeyError:
raise NonexistentNodeError(node_b)
# Create the new edge
edge_id = self.generate_edge_id()
edge = {'id': edge_id,
'vertices': (node_a, node_b),
'cost': cost,
'data': {}
}
self.edges[edge_id] = edge
self.nodes[node_a]['edges'].append(edge_id)
self._num_edges += 1
return edge_id | [
"def",
"new_edge",
"(",
"self",
",",
"node_a",
",",
"node_b",
",",
"cost",
"=",
"1",
")",
":",
"# Verify that both nodes exist in the graph",
"try",
":",
"self",
".",
"nodes",
"[",
"node_a",
"]",
"except",
"KeyError",
":",
"raise",
"NonexistentNodeError",
"(",
"node_a",
")",
"try",
":",
"self",
".",
"nodes",
"[",
"node_b",
"]",
"except",
"KeyError",
":",
"raise",
"NonexistentNodeError",
"(",
"node_b",
")",
"# Create the new edge",
"edge_id",
"=",
"self",
".",
"generate_edge_id",
"(",
")",
"edge",
"=",
"{",
"'id'",
":",
"edge_id",
",",
"'vertices'",
":",
"(",
"node_a",
",",
"node_b",
")",
",",
"'cost'",
":",
"cost",
",",
"'data'",
":",
"{",
"}",
"}",
"self",
".",
"edges",
"[",
"edge_id",
"]",
"=",
"edge",
"self",
".",
"nodes",
"[",
"node_a",
"]",
"[",
"'edges'",
"]",
".",
"append",
"(",
"edge_id",
")",
"self",
".",
"_num_edges",
"+=",
"1",
"return",
"edge_id"
] | 26.689655 | 17.37931 |
def set_attribute_mapping(resource_attr_a, resource_attr_b, **kwargs):
"""
Define one resource attribute from one network as being the same as
that from another network.
"""
user_id = kwargs.get('user_id')
ra_1 = get_resource_attribute(resource_attr_a)
ra_2 = get_resource_attribute(resource_attr_b)
mapping = ResourceAttrMap(resource_attr_id_a = resource_attr_a,
resource_attr_id_b = resource_attr_b,
network_a_id = ra_1.get_network().id,
network_b_id = ra_2.get_network().id )
db.DBSession.add(mapping)
db.DBSession.flush()
return mapping | [
"def",
"set_attribute_mapping",
"(",
"resource_attr_a",
",",
"resource_attr_b",
",",
"*",
"*",
"kwargs",
")",
":",
"user_id",
"=",
"kwargs",
".",
"get",
"(",
"'user_id'",
")",
"ra_1",
"=",
"get_resource_attribute",
"(",
"resource_attr_a",
")",
"ra_2",
"=",
"get_resource_attribute",
"(",
"resource_attr_b",
")",
"mapping",
"=",
"ResourceAttrMap",
"(",
"resource_attr_id_a",
"=",
"resource_attr_a",
",",
"resource_attr_id_b",
"=",
"resource_attr_b",
",",
"network_a_id",
"=",
"ra_1",
".",
"get_network",
"(",
")",
".",
"id",
",",
"network_b_id",
"=",
"ra_2",
".",
"get_network",
"(",
")",
".",
"id",
")",
"db",
".",
"DBSession",
".",
"add",
"(",
"mapping",
")",
"db",
".",
"DBSession",
".",
"flush",
"(",
")",
"return",
"mapping"
] | 35.473684 | 22.105263 |
def main(): # pylint: disable-msg=R0912,R0915
"""Main."""
parser = optparse.OptionParser()
parser.usage = textwrap.dedent("""\
%prog {--run|--install_key|--dump_config} [options]
SSH command authenticator.
Used to restrict which commands can be run via trusted SSH keys.
""")
group = optparse.OptionGroup(
parser, 'Run Mode Options',
'These options determine in which mode the authprogs '
'program runs.')
group.add_option(
'-r', '--run', dest='run', action='store_true',
help='Act as ssh command authenticator. Use this '
'when calling from authorized_keys.')
group.add_option(
'--dump_config', dest='dump_config',
action='store_true',
help='Dump configuration (python format) '
'to standard out and exit.')
group.add_option(
'--install_key', dest='install_key',
help='Install the named ssh public key file to '
'authorized_keys.', metavar='FILE')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Other Options')
group.add_option(
'--keyname', dest='keyname',
help='Name for this key, used when matching '
'config blocks.')
group.add_option(
'--configfile', dest='configfile',
help='Path to authprogs configuration file. '
'Defaults to ~/.ssh/authprogs.yaml',
metavar='FILE')
group.add_option(
'--configdir', dest='configdir',
help='Path to authprogs configuration directory. '
'Defaults to ~/.ssh/authprogs.d',
metavar='DIR')
group.add_option('--logfile', dest='logfile',
help='Write logging info to this file. '
'Defaults to no logging.',
metavar='FILE')
group.add_option('--debug', dest='debug', action='store_true',
help='Write additional debugging information '
'to --logfile')
group.add_option('--authorized_keys', dest='authorized_keys',
default=os.path.expanduser('~/.ssh/authorized_keys'),
help='Location of authorized_keys file for '
'--install_key. Defaults to ~/.ssh/authorized_keys',
metavar='FILE')
parser.add_option_group(group)
opts, args = parser.parse_args()
if args:
sys.exit('authprogs does not accept commandline arguments.')
if not opts.configfile:
cfg = os.path.expanduser('~/.ssh/authprogs.yaml')
if os.path.isfile(cfg):
opts.configfile = cfg
if not opts.configdir:
cfg = os.path.expanduser('~/.ssh/authprogs.d')
if os.path.isdir(cfg):
opts.configdir = cfg
if opts.debug and not opts.logfile:
parser.error('--debug requires use of --logfile')
ap = None
try:
ap = AuthProgs(logfile=opts.logfile, # pylint: disable-msg=C0103
configfile=opts.configfile,
configdir=opts.configdir,
debug=opts.debug,
keyname=opts.keyname)
if opts.dump_config:
ap.dump_config()
sys.exit(0)
elif opts.install_key:
try:
ap.install_key(opts.install_key, opts.authorized_keys)
sys.stderr.write('Key installed successfully.\n')
sys.exit(0)
except InstallError as err:
sys.stderr.write('Key install failed: %s' % err)
sys.exit(1)
elif opts.run:
ap.exec_command()
sys.exit('authprogs command returned - should '
'never happen.')
else:
parser.error('Not sure what to do. Consider --help')
except SSHEnvironmentError as err:
ap.log('SSHEnvironmentError "%s"\n%s\n' % (
err, traceback.format_exc()))
sys.exit('authprogs: %s' % err)
except ConfigError as err:
ap.log('ConfigError "%s"\n%s\n' % (
err, traceback.format_exc()))
sys.exit('authprogs: %s' % err)
except CommandRejected as err:
sys.exit('authprogs: %s' % err)
except Exception as err:
if ap:
ap.log('Unexpected exception: %s\n%s\n' % (
err, traceback.format_exc()))
else:
sys.stderr.write('Unexpected exception: %s\n%s\n' % (
err, traceback.format_exc()))
sys.exit('authprogs experienced an unexpected exception.') | [
"def",
"main",
"(",
")",
":",
"# pylint: disable-msg=R0912,R0915",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
")",
"parser",
".",
"usage",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n %prog {--run|--install_key|--dump_config} [options]\n\n SSH command authenticator.\n\n Used to restrict which commands can be run via trusted SSH keys.\n \"\"\"",
")",
"group",
"=",
"optparse",
".",
"OptionGroup",
"(",
"parser",
",",
"'Run Mode Options'",
",",
"'These options determine in which mode the authprogs '",
"'program runs.'",
")",
"group",
".",
"add_option",
"(",
"'-r'",
",",
"'--run'",
",",
"dest",
"=",
"'run'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Act as ssh command authenticator. Use this '",
"'when calling from authorized_keys.'",
")",
"group",
".",
"add_option",
"(",
"'--dump_config'",
",",
"dest",
"=",
"'dump_config'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Dump configuration (python format) '",
"'to standard out and exit.'",
")",
"group",
".",
"add_option",
"(",
"'--install_key'",
",",
"dest",
"=",
"'install_key'",
",",
"help",
"=",
"'Install the named ssh public key file to '",
"'authorized_keys.'",
",",
"metavar",
"=",
"'FILE'",
")",
"parser",
".",
"add_option_group",
"(",
"group",
")",
"group",
"=",
"optparse",
".",
"OptionGroup",
"(",
"parser",
",",
"'Other Options'",
")",
"group",
".",
"add_option",
"(",
"'--keyname'",
",",
"dest",
"=",
"'keyname'",
",",
"help",
"=",
"'Name for this key, used when matching '",
"'config blocks.'",
")",
"group",
".",
"add_option",
"(",
"'--configfile'",
",",
"dest",
"=",
"'configfile'",
",",
"help",
"=",
"'Path to authprogs configuration file. '",
"'Defaults to ~/.ssh/authprogs.yaml'",
",",
"metavar",
"=",
"'FILE'",
")",
"group",
".",
"add_option",
"(",
"'--configdir'",
",",
"dest",
"=",
"'configdir'",
",",
"help",
"=",
"'Path to authprogs configuration directory. '",
"'Defaults to ~/.ssh/authprogs.d'",
",",
"metavar",
"=",
"'DIR'",
")",
"group",
".",
"add_option",
"(",
"'--logfile'",
",",
"dest",
"=",
"'logfile'",
",",
"help",
"=",
"'Write logging info to this file. '",
"'Defaults to no logging.'",
",",
"metavar",
"=",
"'FILE'",
")",
"group",
".",
"add_option",
"(",
"'--debug'",
",",
"dest",
"=",
"'debug'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Write additional debugging information '",
"'to --logfile'",
")",
"group",
".",
"add_option",
"(",
"'--authorized_keys'",
",",
"dest",
"=",
"'authorized_keys'",
",",
"default",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.ssh/authorized_keys'",
")",
",",
"help",
"=",
"'Location of authorized_keys file for '",
"'--install_key. Defaults to ~/.ssh/authorized_keys'",
",",
"metavar",
"=",
"'FILE'",
")",
"parser",
".",
"add_option_group",
"(",
"group",
")",
"opts",
",",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
":",
"sys",
".",
"exit",
"(",
"'authprogs does not accept commandline arguments.'",
")",
"if",
"not",
"opts",
".",
"configfile",
":",
"cfg",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.ssh/authprogs.yaml'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"cfg",
")",
":",
"opts",
".",
"configfile",
"=",
"cfg",
"if",
"not",
"opts",
".",
"configdir",
":",
"cfg",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.ssh/authprogs.d'",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"cfg",
")",
":",
"opts",
".",
"configdir",
"=",
"cfg",
"if",
"opts",
".",
"debug",
"and",
"not",
"opts",
".",
"logfile",
":",
"parser",
".",
"error",
"(",
"'--debug requires use of --logfile'",
")",
"ap",
"=",
"None",
"try",
":",
"ap",
"=",
"AuthProgs",
"(",
"logfile",
"=",
"opts",
".",
"logfile",
",",
"# pylint: disable-msg=C0103",
"configfile",
"=",
"opts",
".",
"configfile",
",",
"configdir",
"=",
"opts",
".",
"configdir",
",",
"debug",
"=",
"opts",
".",
"debug",
",",
"keyname",
"=",
"opts",
".",
"keyname",
")",
"if",
"opts",
".",
"dump_config",
":",
"ap",
".",
"dump_config",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"elif",
"opts",
".",
"install_key",
":",
"try",
":",
"ap",
".",
"install_key",
"(",
"opts",
".",
"install_key",
",",
"opts",
".",
"authorized_keys",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Key installed successfully.\\n'",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"except",
"InstallError",
"as",
"err",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Key install failed: %s'",
"%",
"err",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"opts",
".",
"run",
":",
"ap",
".",
"exec_command",
"(",
")",
"sys",
".",
"exit",
"(",
"'authprogs command returned - should '",
"'never happen.'",
")",
"else",
":",
"parser",
".",
"error",
"(",
"'Not sure what to do. Consider --help'",
")",
"except",
"SSHEnvironmentError",
"as",
"err",
":",
"ap",
".",
"log",
"(",
"'SSHEnvironmentError \"%s\"\\n%s\\n'",
"%",
"(",
"err",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"sys",
".",
"exit",
"(",
"'authprogs: %s'",
"%",
"err",
")",
"except",
"ConfigError",
"as",
"err",
":",
"ap",
".",
"log",
"(",
"'ConfigError \"%s\"\\n%s\\n'",
"%",
"(",
"err",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"sys",
".",
"exit",
"(",
"'authprogs: %s'",
"%",
"err",
")",
"except",
"CommandRejected",
"as",
"err",
":",
"sys",
".",
"exit",
"(",
"'authprogs: %s'",
"%",
"err",
")",
"except",
"Exception",
"as",
"err",
":",
"if",
"ap",
":",
"ap",
".",
"log",
"(",
"'Unexpected exception: %s\\n%s\\n'",
"%",
"(",
"err",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Unexpected exception: %s\\n%s\\n'",
"%",
"(",
"err",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"sys",
".",
"exit",
"(",
"'authprogs experienced an unexpected exception.'",
")"
] | 36.694215 | 15.85124 |
def get_pipeline(self, project, pipeline_id, revision=None):
"""GetPipeline.
[Preview API]
:param str project: Project ID or project name
:param int pipeline_id:
:param int revision:
:rtype: :class:`<Pipeline> <azure.devops.v5_1.pipelines.models.Pipeline>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if pipeline_id is not None:
route_values['pipelineId'] = self._serialize.url('pipeline_id', pipeline_id, 'int')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
response = self._send(http_method='GET',
location_id='28e1305e-2afe-47bf-abaf-cbb0e6a91988',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Pipeline', response) | [
"def",
"get_pipeline",
"(",
"self",
",",
"project",
",",
"pipeline_id",
",",
"revision",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"pipeline_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'pipelineId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'pipeline_id'",
",",
"pipeline_id",
",",
"'int'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"revision",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'revision'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'revision'",
",",
"revision",
",",
"'int'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'28e1305e-2afe-47bf-abaf-cbb0e6a91988'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'Pipeline'",
",",
"response",
")"
] | 49.545455 | 19.636364 |
def set_log_level(level):
"""
Sets the log level.
Lower log levels log more.
if level is 8, nothing is logged. If level is 0, everything is logged.
"""
from .._connect import main as _glconnect
unity = _glconnect.get_unity()
return unity.set_log_level(level) | [
"def",
"set_log_level",
"(",
"level",
")",
":",
"from",
".",
".",
"_connect",
"import",
"main",
"as",
"_glconnect",
"unity",
"=",
"_glconnect",
".",
"get_unity",
"(",
")",
"return",
"unity",
".",
"set_log_level",
"(",
"level",
")"
] | 31.333333 | 10 |
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
"Helper function for `anneal_poly`."
return end + (start-end) * (1-pct)**degree | [
"def",
"do_annealing_poly",
"(",
"start",
":",
"Number",
",",
"end",
":",
"Number",
",",
"pct",
":",
"float",
",",
"degree",
":",
"Number",
")",
"->",
"Number",
":",
"return",
"end",
"+",
"(",
"start",
"-",
"end",
")",
"*",
"(",
"1",
"-",
"pct",
")",
"**",
"degree"
] | 56 | 16 |
def condensed_coords_within(pop, n):
"""Return indices into a condensed distance matrix for all
pairwise comparisons within the given population.
Parameters
----------
pop : array_like, int
Indices of samples or haplotypes within the population.
n : int
Size of the square matrix (length of first or second dimension).
Returns
-------
indices : ndarray, int
"""
return [condensed_coords(i, j, n)
for i, j in itertools.combinations(sorted(pop), 2)] | [
"def",
"condensed_coords_within",
"(",
"pop",
",",
"n",
")",
":",
"return",
"[",
"condensed_coords",
"(",
"i",
",",
"j",
",",
"n",
")",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"combinations",
"(",
"sorted",
"(",
"pop",
")",
",",
"2",
")",
"]"
] | 26.578947 | 22.421053 |
def render_table(self, **kwargs):
"""Render the data as a html table"""
# Import here to avoid lxml import
try:
from pygal.table import Table
except ImportError:
raise ImportError('You must install lxml to use render table')
return Table(self).render(**kwargs) | [
"def",
"render_table",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Import here to avoid lxml import",
"try",
":",
"from",
"pygal",
".",
"table",
"import",
"Table",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'You must install lxml to use render table'",
")",
"return",
"Table",
"(",
"self",
")",
".",
"render",
"(",
"*",
"*",
"kwargs",
")"
] | 39.625 | 11 |
def find_data(folder):
""" Include everything in the folder """
for (path, directories, filenames) in os.walk(folder):
for filename in filenames:
yield os.path.join('..', path, filename) | [
"def",
"find_data",
"(",
"folder",
")",
":",
"for",
"(",
"path",
",",
"directories",
",",
"filenames",
")",
"in",
"os",
".",
"walk",
"(",
"folder",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"'..'",
",",
"path",
",",
"filename",
")"
] | 42 | 10.8 |
def associate(self, model):
"""
Associate the model instance to the given parent.
:type model: orator.Model
:rtype: orator.Model
"""
self._parent.set_attribute(self._foreign_key, model.get_key())
self._parent.set_attribute(self._morph_type, model.get_morph_name())
return self._parent.set_relation(
self._relation, Result(model, self, self._parent)
) | [
"def",
"associate",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"_parent",
".",
"set_attribute",
"(",
"self",
".",
"_foreign_key",
",",
"model",
".",
"get_key",
"(",
")",
")",
"self",
".",
"_parent",
".",
"set_attribute",
"(",
"self",
".",
"_morph_type",
",",
"model",
".",
"get_morph_name",
"(",
")",
")",
"return",
"self",
".",
"_parent",
".",
"set_relation",
"(",
"self",
".",
"_relation",
",",
"Result",
"(",
"model",
",",
"self",
",",
"self",
".",
"_parent",
")",
")"
] | 30.285714 | 20.571429 |
def find_cell_end(self, lines):
"""Return position of end of cell, and position
of first line after cell, and whether there was an
explicit end of cell marker"""
if self.cell_type == 'markdown':
# Empty cell "" or ''
if len(self.markdown_marker) <= 2:
if len(lines) == 1 or _BLANK_LINE.match(lines[1]):
return 0, 2, True
return 0, 1, True
# Multi-line comment with triple quote
if len(self.markdown_marker) == 3:
for i, line in enumerate(lines):
if (i > 0 or line.strip() != self.markdown_marker) and line.rstrip().endswith(self.markdown_marker):
explicit_end_of_cell_marker = line.strip() == self.markdown_marker
if explicit_end_of_cell_marker:
end_of_cell = i
else:
end_of_cell = i + 1
if len(lines) <= i + 1 or _BLANK_LINE.match(
lines[i + 1]):
return end_of_cell, i + 2, explicit_end_of_cell_marker
return end_of_cell, i + 1, explicit_end_of_cell_marker
else:
# 20 # or more
for i, line in enumerate(lines[1:], 1):
if not line.startswith(self.comment):
if _BLANK_LINE.match(line):
return i, i + 1, False
return i, i, False
elif self.cell_type == 'code':
parser = StringParser('python')
for i, line in enumerate(lines):
if parser.is_quoted():
parser.read_line(line)
continue
if self.start_of_new_markdown_cell(line):
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
return i - 1, i, False
return i, i, False
parser.read_line(line)
return len(lines), len(lines), False | [
"def",
"find_cell_end",
"(",
"self",
",",
"lines",
")",
":",
"if",
"self",
".",
"cell_type",
"==",
"'markdown'",
":",
"# Empty cell \"\" or ''",
"if",
"len",
"(",
"self",
".",
"markdown_marker",
")",
"<=",
"2",
":",
"if",
"len",
"(",
"lines",
")",
"==",
"1",
"or",
"_BLANK_LINE",
".",
"match",
"(",
"lines",
"[",
"1",
"]",
")",
":",
"return",
"0",
",",
"2",
",",
"True",
"return",
"0",
",",
"1",
",",
"True",
"# Multi-line comment with triple quote",
"if",
"len",
"(",
"self",
".",
"markdown_marker",
")",
"==",
"3",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"(",
"i",
">",
"0",
"or",
"line",
".",
"strip",
"(",
")",
"!=",
"self",
".",
"markdown_marker",
")",
"and",
"line",
".",
"rstrip",
"(",
")",
".",
"endswith",
"(",
"self",
".",
"markdown_marker",
")",
":",
"explicit_end_of_cell_marker",
"=",
"line",
".",
"strip",
"(",
")",
"==",
"self",
".",
"markdown_marker",
"if",
"explicit_end_of_cell_marker",
":",
"end_of_cell",
"=",
"i",
"else",
":",
"end_of_cell",
"=",
"i",
"+",
"1",
"if",
"len",
"(",
"lines",
")",
"<=",
"i",
"+",
"1",
"or",
"_BLANK_LINE",
".",
"match",
"(",
"lines",
"[",
"i",
"+",
"1",
"]",
")",
":",
"return",
"end_of_cell",
",",
"i",
"+",
"2",
",",
"explicit_end_of_cell_marker",
"return",
"end_of_cell",
",",
"i",
"+",
"1",
",",
"explicit_end_of_cell_marker",
"else",
":",
"# 20 # or more",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
"[",
"1",
":",
"]",
",",
"1",
")",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"self",
".",
"comment",
")",
":",
"if",
"_BLANK_LINE",
".",
"match",
"(",
"line",
")",
":",
"return",
"i",
",",
"i",
"+",
"1",
",",
"False",
"return",
"i",
",",
"i",
",",
"False",
"elif",
"self",
".",
"cell_type",
"==",
"'code'",
":",
"parser",
"=",
"StringParser",
"(",
"'python'",
")",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"parser",
".",
"is_quoted",
"(",
")",
":",
"parser",
".",
"read_line",
"(",
"line",
")",
"continue",
"if",
"self",
".",
"start_of_new_markdown_cell",
"(",
"line",
")",
":",
"if",
"i",
">",
"0",
"and",
"_BLANK_LINE",
".",
"match",
"(",
"lines",
"[",
"i",
"-",
"1",
"]",
")",
":",
"return",
"i",
"-",
"1",
",",
"i",
",",
"False",
"return",
"i",
",",
"i",
",",
"False",
"parser",
".",
"read_line",
"(",
"line",
")",
"return",
"len",
"(",
"lines",
")",
",",
"len",
"(",
"lines",
")",
",",
"False"
] | 43.87234 | 15.93617 |
def visual_callback_2d(background, fig=None):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(background, cmap=plt.cm.gray)
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)
plt.pause(0.001)
def callback(levelset):
if ax1.collections:
del ax1.collections[0]
ax1.contour(levelset, [0.5], colors='r')
ax_u.set_data(levelset)
fig.canvas.draw()
plt.pause(0.001)
return callback | [
"def",
"visual_callback_2d",
"(",
"background",
",",
"fig",
"=",
"None",
")",
":",
"# Prepare the visual environment.",
"if",
"fig",
"is",
"None",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"fig",
".",
"clf",
"(",
")",
"ax1",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"2",
",",
"1",
")",
"ax1",
".",
"imshow",
"(",
"background",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"gray",
")",
"ax2",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"2",
",",
"2",
")",
"ax_u",
"=",
"ax2",
".",
"imshow",
"(",
"np",
".",
"zeros_like",
"(",
"background",
")",
",",
"vmin",
"=",
"0",
",",
"vmax",
"=",
"1",
")",
"plt",
".",
"pause",
"(",
"0.001",
")",
"def",
"callback",
"(",
"levelset",
")",
":",
"if",
"ax1",
".",
"collections",
":",
"del",
"ax1",
".",
"collections",
"[",
"0",
"]",
"ax1",
".",
"contour",
"(",
"levelset",
",",
"[",
"0.5",
"]",
",",
"colors",
"=",
"'r'",
")",
"ax_u",
".",
"set_data",
"(",
"levelset",
")",
"fig",
".",
"canvas",
".",
"draw",
"(",
")",
"plt",
".",
"pause",
"(",
"0.001",
")",
"return",
"callback"
] | 29.913043 | 19.391304 |
def exit_actor():
"""Intentionally exit the current actor.
This function is used to disconnect an actor and exit the worker.
Raises:
Exception: An exception is raised if this is a driver or this
worker is not an actor.
"""
worker = ray.worker.global_worker
if worker.mode == ray.WORKER_MODE and not worker.actor_id.is_nil():
# Disconnect the worker from the raylet. The point of
# this is so that when the worker kills itself below, the
# raylet won't push an error message to the driver.
worker.raylet_client.disconnect()
ray.disconnect()
# Disconnect global state from GCS.
ray.global_state.disconnect()
sys.exit(0)
assert False, "This process should have terminated."
else:
raise Exception("exit_actor called on a non-actor worker.") | [
"def",
"exit_actor",
"(",
")",
":",
"worker",
"=",
"ray",
".",
"worker",
".",
"global_worker",
"if",
"worker",
".",
"mode",
"==",
"ray",
".",
"WORKER_MODE",
"and",
"not",
"worker",
".",
"actor_id",
".",
"is_nil",
"(",
")",
":",
"# Disconnect the worker from the raylet. The point of",
"# this is so that when the worker kills itself below, the",
"# raylet won't push an error message to the driver.",
"worker",
".",
"raylet_client",
".",
"disconnect",
"(",
")",
"ray",
".",
"disconnect",
"(",
")",
"# Disconnect global state from GCS.",
"ray",
".",
"global_state",
".",
"disconnect",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"assert",
"False",
",",
"\"This process should have terminated.\"",
"else",
":",
"raise",
"Exception",
"(",
"\"exit_actor called on a non-actor worker.\"",
")"
] | 38.409091 | 18.909091 |
def create_new_state_from_state_with_type(source_state, target_state_class):
"""The function duplicates/transforms a state to a new state type. If the source state type and the new state
type both are ContainerStates the new state will have not transitions to force the user to explicitly re-order
the logical flow according the paradigm of the new state type.
:param source_state: previous/original state that is to transform into a new state type (target_state_class)
:param target_state_class: the final state class type
:return:
"""
current_state_is_container = isinstance(source_state, ContainerState)
new_state_is_container = issubclass(target_state_class, ContainerState)
if current_state_is_container and new_state_is_container: # TRANSFORM from CONTAINER- TO CONTAINER-STATE
# by default all transitions are left out if the new and original state are container states
# -> because switch from Barrier, Preemptive or Hierarchy has always different rules
state_transitions = {}
state_start_state_id = None
logger.info("Type change from %s to %s" % (type(source_state).__name__, target_state_class.__name__))
# decider state is removed because it is unique for BarrierConcurrencyState
if isinstance(source_state, BarrierConcurrencyState):
source_state.remove_state(UNIQUE_DECIDER_STATE_ID, force=True)
assert UNIQUE_DECIDER_STATE_ID not in source_state.states
# separate state-elements from source state
data_flows = dict(source_state.data_flows)
source_state.data_flows = {}
input_data_ports = dict(source_state.input_data_ports)
output_data_ports = dict(source_state.output_data_ports)
scoped_variables = dict(source_state.scoped_variables)
income = source_state.income
outcomes = dict(source_state.outcomes)
source_state.input_data_ports = {}
source_state.output_data_ports = {}
source_state.scoped_variables = {}
source_state.transitions = {} # before remove of outcomes related transitions should be gone
source_state.income = Income()
source_state.outcomes = {}
states = dict(source_state.states)
# TODO check why next line can not be performed
# source_state.states = {}
new_state = target_state_class(name=source_state.name, state_id=source_state.state_id,
input_data_ports=input_data_ports,
output_data_ports=output_data_ports,
scoped_variables=scoped_variables,
income=income,
outcomes=outcomes,
transitions=state_transitions,
data_flows=data_flows,
states=states,
start_state_id=state_start_state_id)
else: # TRANSFORM from EXECUTION- TO CONTAINER-STATE or FROM CONTAINER- TO EXECUTION-STATE
# in case the new state is an execution state remove of child states (for observable notifications)
if current_state_is_container and issubclass(target_state_class, ExecutionState):
if isinstance(source_state, BarrierConcurrencyState):
source_state.remove_state(UNIQUE_DECIDER_STATE_ID, force=True)
assert UNIQUE_DECIDER_STATE_ID not in source_state.states
for state_id in list(source_state.states.keys()):
source_state.remove_state(state_id)
# separate state-elements from source state
input_data_ports = dict(source_state.input_data_ports)
output_data_ports = dict(source_state.output_data_ports)
income = source_state.income
outcomes = dict(source_state.outcomes)
source_state.input_data_ports = {}
source_state.output_data_ports = {}
source_state.income = Income()
source_state.outcomes = {}
new_state = target_state_class(name=source_state.name, state_id=source_state.state_id,
input_data_ports=input_data_ports,
output_data_ports=output_data_ports,
income=income, outcomes=outcomes)
if source_state.description is not None and len(source_state.description) > 0:
new_state.description = source_state.description
new_state.semantic_data = Vividict(source_state.semantic_data)
return new_state | [
"def",
"create_new_state_from_state_with_type",
"(",
"source_state",
",",
"target_state_class",
")",
":",
"current_state_is_container",
"=",
"isinstance",
"(",
"source_state",
",",
"ContainerState",
")",
"new_state_is_container",
"=",
"issubclass",
"(",
"target_state_class",
",",
"ContainerState",
")",
"if",
"current_state_is_container",
"and",
"new_state_is_container",
":",
"# TRANSFORM from CONTAINER- TO CONTAINER-STATE",
"# by default all transitions are left out if the new and original state are container states",
"# -> because switch from Barrier, Preemptive or Hierarchy has always different rules",
"state_transitions",
"=",
"{",
"}",
"state_start_state_id",
"=",
"None",
"logger",
".",
"info",
"(",
"\"Type change from %s to %s\"",
"%",
"(",
"type",
"(",
"source_state",
")",
".",
"__name__",
",",
"target_state_class",
".",
"__name__",
")",
")",
"# decider state is removed because it is unique for BarrierConcurrencyState",
"if",
"isinstance",
"(",
"source_state",
",",
"BarrierConcurrencyState",
")",
":",
"source_state",
".",
"remove_state",
"(",
"UNIQUE_DECIDER_STATE_ID",
",",
"force",
"=",
"True",
")",
"assert",
"UNIQUE_DECIDER_STATE_ID",
"not",
"in",
"source_state",
".",
"states",
"# separate state-elements from source state",
"data_flows",
"=",
"dict",
"(",
"source_state",
".",
"data_flows",
")",
"source_state",
".",
"data_flows",
"=",
"{",
"}",
"input_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"input_data_ports",
")",
"output_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"output_data_ports",
")",
"scoped_variables",
"=",
"dict",
"(",
"source_state",
".",
"scoped_variables",
")",
"income",
"=",
"source_state",
".",
"income",
"outcomes",
"=",
"dict",
"(",
"source_state",
".",
"outcomes",
")",
"source_state",
".",
"input_data_ports",
"=",
"{",
"}",
"source_state",
".",
"output_data_ports",
"=",
"{",
"}",
"source_state",
".",
"scoped_variables",
"=",
"{",
"}",
"source_state",
".",
"transitions",
"=",
"{",
"}",
"# before remove of outcomes related transitions should be gone",
"source_state",
".",
"income",
"=",
"Income",
"(",
")",
"source_state",
".",
"outcomes",
"=",
"{",
"}",
"states",
"=",
"dict",
"(",
"source_state",
".",
"states",
")",
"# TODO check why next line can not be performed",
"# source_state.states = {}",
"new_state",
"=",
"target_state_class",
"(",
"name",
"=",
"source_state",
".",
"name",
",",
"state_id",
"=",
"source_state",
".",
"state_id",
",",
"input_data_ports",
"=",
"input_data_ports",
",",
"output_data_ports",
"=",
"output_data_ports",
",",
"scoped_variables",
"=",
"scoped_variables",
",",
"income",
"=",
"income",
",",
"outcomes",
"=",
"outcomes",
",",
"transitions",
"=",
"state_transitions",
",",
"data_flows",
"=",
"data_flows",
",",
"states",
"=",
"states",
",",
"start_state_id",
"=",
"state_start_state_id",
")",
"else",
":",
"# TRANSFORM from EXECUTION- TO CONTAINER-STATE or FROM CONTAINER- TO EXECUTION-STATE",
"# in case the new state is an execution state remove of child states (for observable notifications)",
"if",
"current_state_is_container",
"and",
"issubclass",
"(",
"target_state_class",
",",
"ExecutionState",
")",
":",
"if",
"isinstance",
"(",
"source_state",
",",
"BarrierConcurrencyState",
")",
":",
"source_state",
".",
"remove_state",
"(",
"UNIQUE_DECIDER_STATE_ID",
",",
"force",
"=",
"True",
")",
"assert",
"UNIQUE_DECIDER_STATE_ID",
"not",
"in",
"source_state",
".",
"states",
"for",
"state_id",
"in",
"list",
"(",
"source_state",
".",
"states",
".",
"keys",
"(",
")",
")",
":",
"source_state",
".",
"remove_state",
"(",
"state_id",
")",
"# separate state-elements from source state",
"input_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"input_data_ports",
")",
"output_data_ports",
"=",
"dict",
"(",
"source_state",
".",
"output_data_ports",
")",
"income",
"=",
"source_state",
".",
"income",
"outcomes",
"=",
"dict",
"(",
"source_state",
".",
"outcomes",
")",
"source_state",
".",
"input_data_ports",
"=",
"{",
"}",
"source_state",
".",
"output_data_ports",
"=",
"{",
"}",
"source_state",
".",
"income",
"=",
"Income",
"(",
")",
"source_state",
".",
"outcomes",
"=",
"{",
"}",
"new_state",
"=",
"target_state_class",
"(",
"name",
"=",
"source_state",
".",
"name",
",",
"state_id",
"=",
"source_state",
".",
"state_id",
",",
"input_data_ports",
"=",
"input_data_ports",
",",
"output_data_ports",
"=",
"output_data_ports",
",",
"income",
"=",
"income",
",",
"outcomes",
"=",
"outcomes",
")",
"if",
"source_state",
".",
"description",
"is",
"not",
"None",
"and",
"len",
"(",
"source_state",
".",
"description",
")",
">",
"0",
":",
"new_state",
".",
"description",
"=",
"source_state",
".",
"description",
"new_state",
".",
"semantic_data",
"=",
"Vividict",
"(",
"source_state",
".",
"semantic_data",
")",
"return",
"new_state"
] | 54.297619 | 27.559524 |
def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
"""
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
) | [
"def",
"merge_split_adjustments_with_overwrites",
"(",
"self",
",",
"pre",
",",
"post",
",",
"overwrites",
",",
"requested_split_adjusted_columns",
")",
":",
"for",
"column_name",
"in",
"requested_split_adjusted_columns",
":",
"# We can do a merge here because the timestamps in 'pre' and",
"# 'post' are guaranteed to not overlap.",
"if",
"pre",
":",
"# Either empty or contains all columns.",
"for",
"ts",
"in",
"pre",
"[",
"column_name",
"]",
":",
"add_new_adjustments",
"(",
"overwrites",
",",
"pre",
"[",
"column_name",
"]",
"[",
"ts",
"]",
",",
"column_name",
",",
"ts",
")",
"if",
"post",
":",
"# Either empty or contains all columns.",
"for",
"ts",
"in",
"post",
"[",
"column_name",
"]",
":",
"add_new_adjustments",
"(",
"overwrites",
",",
"post",
"[",
"column_name",
"]",
"[",
"ts",
"]",
",",
"column_name",
",",
"ts",
")"
] | 37.72093 | 15.72093 |
def infer_dm(self, m, s, ds):
"""Infer probable output from input x, y
"""
OptimizedInverseModel.infer_dm(self, ds)
if len(self.fmodel.dataset) == 0:
return [[0.0]*self.dim_out]
else:
_, index = self.fmodel.dataset.nn_dims(m, np.hstack((s, ds)), range(len(m)), range(self.dim_x, self.dim_x + self.dim_y), k=1)
guesses = [self.fmodel.dataset.get_dims(index[0], dims=range(len(m), self.dim_x))]
result = []
for g in guesses:
res = scipy.optimize.minimize(lambda dm:self._error_dm(m, dm, s), g,
args = (),
method = self.algo,
options = self.conf
)
d = self._error_dm(m, res.x, s)
result.append((d, res.x))
return [xi for fi, xi in sorted(result)][0] | [
"def",
"infer_dm",
"(",
"self",
",",
"m",
",",
"s",
",",
"ds",
")",
":",
"OptimizedInverseModel",
".",
"infer_dm",
"(",
"self",
",",
"ds",
")",
"if",
"len",
"(",
"self",
".",
"fmodel",
".",
"dataset",
")",
"==",
"0",
":",
"return",
"[",
"[",
"0.0",
"]",
"*",
"self",
".",
"dim_out",
"]",
"else",
":",
"_",
",",
"index",
"=",
"self",
".",
"fmodel",
".",
"dataset",
".",
"nn_dims",
"(",
"m",
",",
"np",
".",
"hstack",
"(",
"(",
"s",
",",
"ds",
")",
")",
",",
"range",
"(",
"len",
"(",
"m",
")",
")",
",",
"range",
"(",
"self",
".",
"dim_x",
",",
"self",
".",
"dim_x",
"+",
"self",
".",
"dim_y",
")",
",",
"k",
"=",
"1",
")",
"guesses",
"=",
"[",
"self",
".",
"fmodel",
".",
"dataset",
".",
"get_dims",
"(",
"index",
"[",
"0",
"]",
",",
"dims",
"=",
"range",
"(",
"len",
"(",
"m",
")",
",",
"self",
".",
"dim_x",
")",
")",
"]",
"result",
"=",
"[",
"]",
"for",
"g",
"in",
"guesses",
":",
"res",
"=",
"scipy",
".",
"optimize",
".",
"minimize",
"(",
"lambda",
"dm",
":",
"self",
".",
"_error_dm",
"(",
"m",
",",
"dm",
",",
"s",
")",
",",
"g",
",",
"args",
"=",
"(",
")",
",",
"method",
"=",
"self",
".",
"algo",
",",
"options",
"=",
"self",
".",
"conf",
")",
"d",
"=",
"self",
".",
"_error_dm",
"(",
"m",
",",
"res",
".",
"x",
",",
"s",
")",
"result",
".",
"append",
"(",
"(",
"d",
",",
"res",
".",
"x",
")",
")",
"return",
"[",
"xi",
"for",
"fi",
",",
"xi",
"in",
"sorted",
"(",
"result",
")",
"]",
"[",
"0",
"]"
] | 44.304348 | 22.173913 |
def stop(self):
"""Stop subtasks and let run() finish."""
self._stop_event.set()
if self.select_greenlet is not None:
self.select_greenlet.kill()
self.select_greenlet.get()
gevent.sleep() | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"_stop_event",
".",
"set",
"(",
")",
"if",
"self",
".",
"select_greenlet",
"is",
"not",
"None",
":",
"self",
".",
"select_greenlet",
".",
"kill",
"(",
")",
"self",
".",
"select_greenlet",
".",
"get",
"(",
")",
"gevent",
".",
"sleep",
"(",
")"
] | 34.428571 | 8 |
def unstack(self, column_names, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column_names`` is a numeric column, the result will be of
array.array type. If ``column_names`` is a non-numeric column, the new column
will be of list type. If ``column_names`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column_names : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~turicreate.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = turicreate.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column_names=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = turicreate.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='new name')
+------+-----------+
| user | new name |
+------+-----------+
| 3 | [5] |
| 1 | [2, 3, 4] |
| 2 | [6, 4, 5] |
| 4 | [2, 3] |
+------+-----------+
[4 rows x 2 columns]
"""
if (type(column_names) != str and len(column_names) != 2):
raise TypeError("'column_names' parameter has to be either a string or a list of two strings.")
with cython_context():
if type(column_names) == str:
key_columns = [i for i in self.column_names() if i != column_names]
if new_column_name is not None:
return self.groupby(key_columns, {new_column_name : aggregate.CONCAT(column_names)})
else:
return self.groupby(key_columns, aggregate.CONCAT(column_names))
elif len(column_names) == 2:
key_columns = [i for i in self.column_names() if i not in column_names]
if new_column_name is not None:
return self.groupby(key_columns, {new_column_name: aggregate.CONCAT(column_names[0], column_names[1])})
else:
return self.groupby(key_columns, aggregate.CONCAT(column_names[0], column_names[1])) | [
"def",
"unstack",
"(",
"self",
",",
"column_names",
",",
"new_column_name",
"=",
"None",
")",
":",
"if",
"(",
"type",
"(",
"column_names",
")",
"!=",
"str",
"and",
"len",
"(",
"column_names",
")",
"!=",
"2",
")",
":",
"raise",
"TypeError",
"(",
"\"'column_names' parameter has to be either a string or a list of two strings.\"",
")",
"with",
"cython_context",
"(",
")",
":",
"if",
"type",
"(",
"column_names",
")",
"==",
"str",
":",
"key_columns",
"=",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"column_names",
"(",
")",
"if",
"i",
"!=",
"column_names",
"]",
"if",
"new_column_name",
"is",
"not",
"None",
":",
"return",
"self",
".",
"groupby",
"(",
"key_columns",
",",
"{",
"new_column_name",
":",
"aggregate",
".",
"CONCAT",
"(",
"column_names",
")",
"}",
")",
"else",
":",
"return",
"self",
".",
"groupby",
"(",
"key_columns",
",",
"aggregate",
".",
"CONCAT",
"(",
"column_names",
")",
")",
"elif",
"len",
"(",
"column_names",
")",
"==",
"2",
":",
"key_columns",
"=",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"column_names",
"(",
")",
"if",
"i",
"not",
"in",
"column_names",
"]",
"if",
"new_column_name",
"is",
"not",
"None",
":",
"return",
"self",
".",
"groupby",
"(",
"key_columns",
",",
"{",
"new_column_name",
":",
"aggregate",
".",
"CONCAT",
"(",
"column_names",
"[",
"0",
"]",
",",
"column_names",
"[",
"1",
"]",
")",
"}",
")",
"else",
":",
"return",
"self",
".",
"groupby",
"(",
"key_columns",
",",
"aggregate",
".",
"CONCAT",
"(",
"column_names",
"[",
"0",
"]",
",",
"column_names",
"[",
"1",
"]",
")",
")"
] | 43.363636 | 25.090909 |
def registerAddon(cls, name, addon, force=False):
"""
Registers the inputted addon to the class.
:param name | <str>
addon | <variant>
"""
prop = '_{0}__addons'.format(cls.__name__)
cmds = getattr(cls, prop, {})
if name in cmds and not force:
raise errors.AddonAlreadyExists(cls, name, addon)
cmds[name] = addon
try:
if issubclass(addon, cls):
setattr(addon, '_{0}__addonName'.format(addon.__name__), name)
except StandardError:
pass
setattr(cls, prop, cmds) | [
"def",
"registerAddon",
"(",
"cls",
",",
"name",
",",
"addon",
",",
"force",
"=",
"False",
")",
":",
"prop",
"=",
"'_{0}__addons'",
".",
"format",
"(",
"cls",
".",
"__name__",
")",
"cmds",
"=",
"getattr",
"(",
"cls",
",",
"prop",
",",
"{",
"}",
")",
"if",
"name",
"in",
"cmds",
"and",
"not",
"force",
":",
"raise",
"errors",
".",
"AddonAlreadyExists",
"(",
"cls",
",",
"name",
",",
"addon",
")",
"cmds",
"[",
"name",
"]",
"=",
"addon",
"try",
":",
"if",
"issubclass",
"(",
"addon",
",",
"cls",
")",
":",
"setattr",
"(",
"addon",
",",
"'_{0}__addonName'",
".",
"format",
"(",
"addon",
".",
"__name__",
")",
",",
"name",
")",
"except",
"StandardError",
":",
"pass",
"setattr",
"(",
"cls",
",",
"prop",
",",
"cmds",
")"
] | 29.52381 | 16.095238 |
def draw_geoscale(ax, minx=0, maxx=175):
"""
Draw geological epoch on million year ago (mya) scale.
"""
a, b = .1, .6 # Correspond to 200mya and 0mya
def cv(x): return b - (x - b) / (maxx - minx) * (b - a)
ax.plot((a, b), (.5, .5), "k-")
tick = .015
for mya in xrange(maxx - 25, 0, -25):
p = cv(mya)
ax.plot((p, p), (.5, .5 - tick), "k-")
ax.text(p, .5 - 2.5 * tick, str(mya), ha="center", va="center")
ax.text((a + b) / 2, .5 - 5 * tick, "Time before present (million years)",
ha="center", va="center")
# Source:
# http://www.weston.org/schools/ms/biologyweb/evolution/handouts/GSAchron09.jpg
Geo = (("Neogene", 2.6, 23.0, "#fee400"),
("Paleogene", 23.0, 65.5, "#ff9a65"),
("Cretaceous", 65.5, 145.5, "#80ff40"),
("Jurassic", 145.5, 201.6, "#33fff3"))
h = .05
for era, start, end, color in Geo:
start, end = cv(start), cv(end)
end = max(a, end)
p = Rectangle((end, .5 + tick / 2), abs(start - end),
h, lw=1, ec="w", fc=color)
ax.text((start + end) / 2, .5 + (tick + h) / 2, era,
ha="center", va="center", size=9)
ax.add_patch(p) | [
"def",
"draw_geoscale",
"(",
"ax",
",",
"minx",
"=",
"0",
",",
"maxx",
"=",
"175",
")",
":",
"a",
",",
"b",
"=",
".1",
",",
".6",
"# Correspond to 200mya and 0mya",
"def",
"cv",
"(",
"x",
")",
":",
"return",
"b",
"-",
"(",
"x",
"-",
"b",
")",
"/",
"(",
"maxx",
"-",
"minx",
")",
"*",
"(",
"b",
"-",
"a",
")",
"ax",
".",
"plot",
"(",
"(",
"a",
",",
"b",
")",
",",
"(",
".5",
",",
".5",
")",
",",
"\"k-\"",
")",
"tick",
"=",
".015",
"for",
"mya",
"in",
"xrange",
"(",
"maxx",
"-",
"25",
",",
"0",
",",
"-",
"25",
")",
":",
"p",
"=",
"cv",
"(",
"mya",
")",
"ax",
".",
"plot",
"(",
"(",
"p",
",",
"p",
")",
",",
"(",
".5",
",",
".5",
"-",
"tick",
")",
",",
"\"k-\"",
")",
"ax",
".",
"text",
"(",
"p",
",",
".5",
"-",
"2.5",
"*",
"tick",
",",
"str",
"(",
"mya",
")",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"center\"",
")",
"ax",
".",
"text",
"(",
"(",
"a",
"+",
"b",
")",
"/",
"2",
",",
".5",
"-",
"5",
"*",
"tick",
",",
"\"Time before present (million years)\"",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"center\"",
")",
"# Source:",
"# http://www.weston.org/schools/ms/biologyweb/evolution/handouts/GSAchron09.jpg",
"Geo",
"=",
"(",
"(",
"\"Neogene\"",
",",
"2.6",
",",
"23.0",
",",
"\"#fee400\"",
")",
",",
"(",
"\"Paleogene\"",
",",
"23.0",
",",
"65.5",
",",
"\"#ff9a65\"",
")",
",",
"(",
"\"Cretaceous\"",
",",
"65.5",
",",
"145.5",
",",
"\"#80ff40\"",
")",
",",
"(",
"\"Jurassic\"",
",",
"145.5",
",",
"201.6",
",",
"\"#33fff3\"",
")",
")",
"h",
"=",
".05",
"for",
"era",
",",
"start",
",",
"end",
",",
"color",
"in",
"Geo",
":",
"start",
",",
"end",
"=",
"cv",
"(",
"start",
")",
",",
"cv",
"(",
"end",
")",
"end",
"=",
"max",
"(",
"a",
",",
"end",
")",
"p",
"=",
"Rectangle",
"(",
"(",
"end",
",",
".5",
"+",
"tick",
"/",
"2",
")",
",",
"abs",
"(",
"start",
"-",
"end",
")",
",",
"h",
",",
"lw",
"=",
"1",
",",
"ec",
"=",
"\"w\"",
",",
"fc",
"=",
"color",
")",
"ax",
".",
"text",
"(",
"(",
"start",
"+",
"end",
")",
"/",
"2",
",",
".5",
"+",
"(",
"tick",
"+",
"h",
")",
"/",
"2",
",",
"era",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"center\"",
",",
"size",
"=",
"9",
")",
"ax",
".",
"add_patch",
"(",
"p",
")"
] | 40.166667 | 14.7 |
def create_factories(fs_provider, task_problem_types, hook_manager=None, course_class=Course, task_class=Task):
"""
Shorthand for creating Factories
:param fs_provider: A FileSystemProvider leading to the courses
:param hook_manager: an Hook Manager instance. If None, a new Hook Manager is created
:param course_class:
:param task_class:
:return: a tuple with two objects: the first being of type CourseFactory, the second of type TaskFactory
"""
if hook_manager is None:
hook_manager = HookManager()
task_factory = TaskFactory(fs_provider, hook_manager, task_problem_types, task_class)
return CourseFactory(fs_provider, task_factory, hook_manager, course_class), task_factory | [
"def",
"create_factories",
"(",
"fs_provider",
",",
"task_problem_types",
",",
"hook_manager",
"=",
"None",
",",
"course_class",
"=",
"Course",
",",
"task_class",
"=",
"Task",
")",
":",
"if",
"hook_manager",
"is",
"None",
":",
"hook_manager",
"=",
"HookManager",
"(",
")",
"task_factory",
"=",
"TaskFactory",
"(",
"fs_provider",
",",
"hook_manager",
",",
"task_problem_types",
",",
"task_class",
")",
"return",
"CourseFactory",
"(",
"fs_provider",
",",
"task_factory",
",",
"hook_manager",
",",
"course_class",
")",
",",
"task_factory"
] | 51.214286 | 29.357143 |
def nfa_to_dot(nfa: dict, name: str, path: str = './'):
""" Generates a DOT file and a relative SVG image in **path**
folder of the input NFA using graphviz library.
:param dict nfa: input NFA;
:param str name: string with the name of the output file;
:param str path: path where to save the DOT/SVG files (default:
working directory).
"""
g = graphviz.Digraph(format='svg')
fakes = []
for i in range(len(nfa['initial_states'])):
fakes.append('fake' + str(i))
g.node('fake' + str(i), style='invisible')
for state in nfa['states']:
if state in nfa['initial_states']:
if state in nfa['accepting_states']:
g.node(str(state), root='true',
shape='doublecircle')
else:
g.node(str(state), root='true')
elif state in nfa['accepting_states']:
g.node(str(state), shape='doublecircle')
else:
g.node(str(state))
for initial_state in nfa['initial_states']:
g.edge(fakes.pop(), str(initial_state), style='bold')
for transition in nfa['transitions']:
for destination in nfa['transitions'][transition]:
g.edge(str(transition[0]), str(destination),
label=transition[1])
g.render(filename=os.path.join(path, name + '.dot')) | [
"def",
"nfa_to_dot",
"(",
"nfa",
":",
"dict",
",",
"name",
":",
"str",
",",
"path",
":",
"str",
"=",
"'./'",
")",
":",
"g",
"=",
"graphviz",
".",
"Digraph",
"(",
"format",
"=",
"'svg'",
")",
"fakes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"nfa",
"[",
"'initial_states'",
"]",
")",
")",
":",
"fakes",
".",
"append",
"(",
"'fake'",
"+",
"str",
"(",
"i",
")",
")",
"g",
".",
"node",
"(",
"'fake'",
"+",
"str",
"(",
"i",
")",
",",
"style",
"=",
"'invisible'",
")",
"for",
"state",
"in",
"nfa",
"[",
"'states'",
"]",
":",
"if",
"state",
"in",
"nfa",
"[",
"'initial_states'",
"]",
":",
"if",
"state",
"in",
"nfa",
"[",
"'accepting_states'",
"]",
":",
"g",
".",
"node",
"(",
"str",
"(",
"state",
")",
",",
"root",
"=",
"'true'",
",",
"shape",
"=",
"'doublecircle'",
")",
"else",
":",
"g",
".",
"node",
"(",
"str",
"(",
"state",
")",
",",
"root",
"=",
"'true'",
")",
"elif",
"state",
"in",
"nfa",
"[",
"'accepting_states'",
"]",
":",
"g",
".",
"node",
"(",
"str",
"(",
"state",
")",
",",
"shape",
"=",
"'doublecircle'",
")",
"else",
":",
"g",
".",
"node",
"(",
"str",
"(",
"state",
")",
")",
"for",
"initial_state",
"in",
"nfa",
"[",
"'initial_states'",
"]",
":",
"g",
".",
"edge",
"(",
"fakes",
".",
"pop",
"(",
")",
",",
"str",
"(",
"initial_state",
")",
",",
"style",
"=",
"'bold'",
")",
"for",
"transition",
"in",
"nfa",
"[",
"'transitions'",
"]",
":",
"for",
"destination",
"in",
"nfa",
"[",
"'transitions'",
"]",
"[",
"transition",
"]",
":",
"g",
".",
"edge",
"(",
"str",
"(",
"transition",
"[",
"0",
"]",
")",
",",
"str",
"(",
"destination",
")",
",",
"label",
"=",
"transition",
"[",
"1",
"]",
")",
"g",
".",
"render",
"(",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"name",
"+",
"'.dot'",
")",
")"
] | 37.166667 | 14.611111 |
def set_orders(self, object_pks):
"""
Perform a mass update of sort_orders across the full queryset.
Accepts a list, object_pks, of the intended order for the objects.
Works as follows:
- Compile a list of all sort orders in the queryset. Leave out anything that
isn't in the object_pks list - this deals with pagination and any
inconsistencies.
- Get the maximum among all model object sort orders. Update the queryset to add
it to all the existing sort order values. This lifts them 'out of the way' of
unique_together clashes when setting the intended sort orders.
- Set the sort order on each object. Use only sort_order values that the objects
had before calling this method, so they get rearranged in place.
Performs O(n) queries.
"""
objects_to_sort = self.filter(pk__in=object_pks)
max_value = self.model.objects.all().aggregate(
models.Max('sort_order')
)['sort_order__max']
# Call list() on the values right away, so they don't get affected by the
# update() later (since values_list() is lazy).
orders = list(objects_to_sort.values_list('sort_order', flat=True))
# Check there are no unrecognised entries in the object_pks list. If so,
# throw an error. We only have to check that they're the same length because
# orders is built using only entries in object_pks, and all the pks are unique,
# so if their lengths are the same, the elements must match up exactly.
if len(orders) != len(object_pks):
pks = set(objects_to_sort.values_list('pk', flat=True))
message = 'The following object_pks are not in this queryset: {}'.format(
[pk for pk in object_pks if pk not in pks]
)
raise TypeError(message)
with transaction.atomic():
objects_to_sort.update(sort_order=models.F('sort_order') + max_value)
for pk, order in zip(object_pks, orders):
# Use update() to save a query per item and dodge the insertion sort
# code in save().
self.filter(pk=pk).update(sort_order=order)
# Return the operated-on queryset for convenience.
return objects_to_sort | [
"def",
"set_orders",
"(",
"self",
",",
"object_pks",
")",
":",
"objects_to_sort",
"=",
"self",
".",
"filter",
"(",
"pk__in",
"=",
"object_pks",
")",
"max_value",
"=",
"self",
".",
"model",
".",
"objects",
".",
"all",
"(",
")",
".",
"aggregate",
"(",
"models",
".",
"Max",
"(",
"'sort_order'",
")",
")",
"[",
"'sort_order__max'",
"]",
"# Call list() on the values right away, so they don't get affected by the",
"# update() later (since values_list() is lazy).",
"orders",
"=",
"list",
"(",
"objects_to_sort",
".",
"values_list",
"(",
"'sort_order'",
",",
"flat",
"=",
"True",
")",
")",
"# Check there are no unrecognised entries in the object_pks list. If so,",
"# throw an error. We only have to check that they're the same length because",
"# orders is built using only entries in object_pks, and all the pks are unique,",
"# so if their lengths are the same, the elements must match up exactly.",
"if",
"len",
"(",
"orders",
")",
"!=",
"len",
"(",
"object_pks",
")",
":",
"pks",
"=",
"set",
"(",
"objects_to_sort",
".",
"values_list",
"(",
"'pk'",
",",
"flat",
"=",
"True",
")",
")",
"message",
"=",
"'The following object_pks are not in this queryset: {}'",
".",
"format",
"(",
"[",
"pk",
"for",
"pk",
"in",
"object_pks",
"if",
"pk",
"not",
"in",
"pks",
"]",
")",
"raise",
"TypeError",
"(",
"message",
")",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"objects_to_sort",
".",
"update",
"(",
"sort_order",
"=",
"models",
".",
"F",
"(",
"'sort_order'",
")",
"+",
"max_value",
")",
"for",
"pk",
",",
"order",
"in",
"zip",
"(",
"object_pks",
",",
"orders",
")",
":",
"# Use update() to save a query per item and dodge the insertion sort",
"# code in save().",
"self",
".",
"filter",
"(",
"pk",
"=",
"pk",
")",
".",
"update",
"(",
"sort_order",
"=",
"order",
")",
"# Return the operated-on queryset for convenience.",
"return",
"objects_to_sort"
] | 51.044444 | 26.377778 |
def _process_thread(self, client):
"""Process a single client.
Args:
client: GRR client object to act on.
"""
file_list = self.files
if not file_list:
return
print('Filefinder to collect {0:d} items'.format(len(file_list)))
flow_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
flow_args = flows_pb2.FileFinderArgs(
paths=file_list,
action=flow_action,)
flow_id = self._launch_flow(client, 'FileFinder', flow_args)
self._await_flow(client, flow_id)
collected_flow_data = self._download_files(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data)) | [
"def",
"_process_thread",
"(",
"self",
",",
"client",
")",
":",
"file_list",
"=",
"self",
".",
"files",
"if",
"not",
"file_list",
":",
"return",
"print",
"(",
"'Filefinder to collect {0:d} items'",
".",
"format",
"(",
"len",
"(",
"file_list",
")",
")",
")",
"flow_action",
"=",
"flows_pb2",
".",
"FileFinderAction",
"(",
"action_type",
"=",
"flows_pb2",
".",
"FileFinderAction",
".",
"DOWNLOAD",
")",
"flow_args",
"=",
"flows_pb2",
".",
"FileFinderArgs",
"(",
"paths",
"=",
"file_list",
",",
"action",
"=",
"flow_action",
",",
")",
"flow_id",
"=",
"self",
".",
"_launch_flow",
"(",
"client",
",",
"'FileFinder'",
",",
"flow_args",
")",
"self",
".",
"_await_flow",
"(",
"client",
",",
"flow_id",
")",
"collected_flow_data",
"=",
"self",
".",
"_download_files",
"(",
"client",
",",
"flow_id",
")",
"if",
"collected_flow_data",
":",
"print",
"(",
"'{0!s}: Downloaded: {1:s}'",
".",
"format",
"(",
"flow_id",
",",
"collected_flow_data",
")",
")",
"fqdn",
"=",
"client",
".",
"data",
".",
"os_info",
".",
"fqdn",
".",
"lower",
"(",
")",
"self",
".",
"state",
".",
"output",
".",
"append",
"(",
"(",
"fqdn",
",",
"collected_flow_data",
")",
")"
] | 35.478261 | 16.608696 |
def _OpenFileObject(self, path_spec):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pysmraw.handle: a file-like object or None.
Raises:
PathSpecError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
file_system = resolver.Resolver.OpenFileSystem(
parent_path_spec, resolver_context=self._resolver_context)
# Note that we cannot use pysmraw's glob function since it does not
# handle the file system abstraction dfvfs provides.
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
if not segment_file_path_specs:
return None
if parent_path_spec.IsSystemLevel():
# Typically the file-like object cache should have room for 127 items.
self._resolver_context.SetMaximumNumberOfFileObjects(
len(segment_file_path_specs) + 127)
file_objects = []
for segment_file_path_spec in segment_file_path_specs:
file_object = resolver.Resolver.OpenFileObject(
segment_file_path_spec, resolver_context=self._resolver_context)
file_objects.append(file_object)
raw_handle = pysmraw.handle()
raw_handle.open_file_objects(file_objects)
return raw_handle | [
"def",
"_OpenFileObject",
"(",
"self",
",",
"path_spec",
")",
":",
"if",
"not",
"path_spec",
".",
"HasParent",
"(",
")",
":",
"raise",
"errors",
".",
"PathSpecError",
"(",
"'Unsupported path specification without parent.'",
")",
"parent_path_spec",
"=",
"path_spec",
".",
"parent",
"file_system",
"=",
"resolver",
".",
"Resolver",
".",
"OpenFileSystem",
"(",
"parent_path_spec",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"# Note that we cannot use pysmraw's glob function since it does not",
"# handle the file system abstraction dfvfs provides.",
"segment_file_path_specs",
"=",
"raw",
".",
"RawGlobPathSpec",
"(",
"file_system",
",",
"path_spec",
")",
"if",
"not",
"segment_file_path_specs",
":",
"return",
"None",
"if",
"parent_path_spec",
".",
"IsSystemLevel",
"(",
")",
":",
"# Typically the file-like object cache should have room for 127 items.",
"self",
".",
"_resolver_context",
".",
"SetMaximumNumberOfFileObjects",
"(",
"len",
"(",
"segment_file_path_specs",
")",
"+",
"127",
")",
"file_objects",
"=",
"[",
"]",
"for",
"segment_file_path_spec",
"in",
"segment_file_path_specs",
":",
"file_object",
"=",
"resolver",
".",
"Resolver",
".",
"OpenFileObject",
"(",
"segment_file_path_spec",
",",
"resolver_context",
"=",
"self",
".",
"_resolver_context",
")",
"file_objects",
".",
"append",
"(",
"file_object",
")",
"raw_handle",
"=",
"pysmraw",
".",
"handle",
"(",
")",
"raw_handle",
".",
"open_file_objects",
"(",
"file_objects",
")",
"return",
"raw_handle"
] | 33.926829 | 20.536585 |
def dict_matches_params_deep(params_dct, dct):
"""
Filters deeply by comparing dct to filter_dct's value at each depth. Whenever a mismatch occurs the whole
thing returns false
:param params_dct: dict matching any portion of dct. E.g. filter_dct = {foo: {bar: 1}} would allow
{foo: {bar: 1, car: 2}} to pass, {foo: {bar: 2}} would fail, {goo: ...} would fail
:param dct: Dict for deep processing
:return: True if all pass else false
"""
def recurse_if_param_exists(params, key, value):
"""
If a param[key] exists, recurse. Otherwise return True since there is no param to contest value
:param params:
:param key:
:param value:
:return:
"""
return dict_matches_params_deep(
prop(key, params),
value
) if has(key, params) else True
def recurse_if_array_param_exists(params, index, value):
"""
If a param[key] exists, recurse. Otherwise return True since there is no param to contest value
:param params:
:param index:
:param value:
:return:
"""
return dict_matches_params_deep(
params[index],
value
) if isinstance((list, tuple), params_dct) and index < length(params_dct) else True
if isinstance(dict, dct):
# Filter out keys and then recurse on each value
return all_pass_dict(
# Recurse on each value if there is a corresponding filter_dct[key]. If not we pass
lambda key, value: recurse_if_param_exists(params_dct, key, value),
# We shallow merge, giving dct priority with (hopefully) unmatchable values
merge(map_with_obj(lambda k, v: 1 / (-e * pi), params_dct), dct)
)
if isinstance((list, tuple), dct):
if isinstance((list, tuple), params_dct) and length(dct) < length(params_dct):
# if there are more param items then dct items fail
return False
# run map_deep on each value
return all(map(
lambda ivalue: recurse_if_array_param_exists(params_dct, *ivalue),
enumerate(dct)
))
# scalar. Not that anything not truthy, False, None, 0, are considered equal
return params_dct == dct | [
"def",
"dict_matches_params_deep",
"(",
"params_dct",
",",
"dct",
")",
":",
"def",
"recurse_if_param_exists",
"(",
"params",
",",
"key",
",",
"value",
")",
":",
"\"\"\"\n If a param[key] exists, recurse. Otherwise return True since there is no param to contest value\n :param params:\n :param key:\n :param value:\n :return:\n \"\"\"",
"return",
"dict_matches_params_deep",
"(",
"prop",
"(",
"key",
",",
"params",
")",
",",
"value",
")",
"if",
"has",
"(",
"key",
",",
"params",
")",
"else",
"True",
"def",
"recurse_if_array_param_exists",
"(",
"params",
",",
"index",
",",
"value",
")",
":",
"\"\"\"\n If a param[key] exists, recurse. Otherwise return True since there is no param to contest value\n :param params:\n :param index:\n :param value:\n :return:\n \"\"\"",
"return",
"dict_matches_params_deep",
"(",
"params",
"[",
"index",
"]",
",",
"value",
")",
"if",
"isinstance",
"(",
"(",
"list",
",",
"tuple",
")",
",",
"params_dct",
")",
"and",
"index",
"<",
"length",
"(",
"params_dct",
")",
"else",
"True",
"if",
"isinstance",
"(",
"dict",
",",
"dct",
")",
":",
"# Filter out keys and then recurse on each value",
"return",
"all_pass_dict",
"(",
"# Recurse on each value if there is a corresponding filter_dct[key]. If not we pass",
"lambda",
"key",
",",
"value",
":",
"recurse_if_param_exists",
"(",
"params_dct",
",",
"key",
",",
"value",
")",
",",
"# We shallow merge, giving dct priority with (hopefully) unmatchable values",
"merge",
"(",
"map_with_obj",
"(",
"lambda",
"k",
",",
"v",
":",
"1",
"/",
"(",
"-",
"e",
"*",
"pi",
")",
",",
"params_dct",
")",
",",
"dct",
")",
")",
"if",
"isinstance",
"(",
"(",
"list",
",",
"tuple",
")",
",",
"dct",
")",
":",
"if",
"isinstance",
"(",
"(",
"list",
",",
"tuple",
")",
",",
"params_dct",
")",
"and",
"length",
"(",
"dct",
")",
"<",
"length",
"(",
"params_dct",
")",
":",
"# if there are more param items then dct items fail",
"return",
"False",
"# run map_deep on each value",
"return",
"all",
"(",
"map",
"(",
"lambda",
"ivalue",
":",
"recurse_if_array_param_exists",
"(",
"params_dct",
",",
"*",
"ivalue",
")",
",",
"enumerate",
"(",
"dct",
")",
")",
")",
"# scalar. Not that anything not truthy, False, None, 0, are considered equal",
"return",
"params_dct",
"==",
"dct"
] | 40 | 23.178571 |
def _format_operation(operation, parameters=None):
"""Formats parameters in operation in way BigQuery expects.
:type: str
:param operation: A Google BigQuery query string.
:type: Mapping[str, Any] or Sequence[Any]
:param parameters: Optional parameter values.
:rtype: str
:returns: A formatted query string.
:raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError`
if a parameter used in the operation is not found in the
``parameters`` argument.
"""
if parameters is None:
return operation
if isinstance(parameters, collections_abc.Mapping):
return _format_operation_dict(operation, parameters)
return _format_operation_list(operation, parameters) | [
"def",
"_format_operation",
"(",
"operation",
",",
"parameters",
"=",
"None",
")",
":",
"if",
"parameters",
"is",
"None",
":",
"return",
"operation",
"if",
"isinstance",
"(",
"parameters",
",",
"collections_abc",
".",
"Mapping",
")",
":",
"return",
"_format_operation_dict",
"(",
"operation",
",",
"parameters",
")",
"return",
"_format_operation_list",
"(",
"operation",
",",
"parameters",
")"
] | 32.681818 | 19.5 |
def htmIndex(ra,dec,htm_level=3):
"""Compute htm index of htm_level at position ra,dec"""
import re
if os.uname()[0] == "Linux": javabin = '/opt/java2/bin/java '
htm_level = htm_level
verc_htm_cmd = javabin+'-classpath /usr/cadc/misc/htm/htmIndex.jar edu.jhu.htm.app.lookup %s %s %s' % (htm_level, ra, dec)
for result in os.popen( verc_htm_cmd ).readlines():
result = result[:-1]
if re.search("ID/Name cc", result):
(void, coord ) = result.split("=")
(void, junk, htm_index) = coord.split(" ")
return htm_index | [
"def",
"htmIndex",
"(",
"ra",
",",
"dec",
",",
"htm_level",
"=",
"3",
")",
":",
"import",
"re",
"if",
"os",
".",
"uname",
"(",
")",
"[",
"0",
"]",
"==",
"\"Linux\"",
":",
"javabin",
"=",
"'/opt/java2/bin/java '",
"htm_level",
"=",
"htm_level",
"verc_htm_cmd",
"=",
"javabin",
"+",
"'-classpath /usr/cadc/misc/htm/htmIndex.jar edu.jhu.htm.app.lookup %s %s %s'",
"%",
"(",
"htm_level",
",",
"ra",
",",
"dec",
")",
"for",
"result",
"in",
"os",
".",
"popen",
"(",
"verc_htm_cmd",
")",
".",
"readlines",
"(",
")",
":",
"result",
"=",
"result",
"[",
":",
"-",
"1",
"]",
"if",
"re",
".",
"search",
"(",
"\"ID/Name cc\"",
",",
"result",
")",
":",
"(",
"void",
",",
"coord",
")",
"=",
"result",
".",
"split",
"(",
"\"=\"",
")",
"(",
"void",
",",
"junk",
",",
"htm_index",
")",
"=",
"coord",
".",
"split",
"(",
"\" \"",
")",
"return",
"htm_index"
] | 44.230769 | 20.153846 |
def main():
"""
The main() function handles command line arguments and maneuvers the cover
image generation.
"""
# Helper function.
def _draw_and_save(title, subtitle, author, filename):
"""
Draw a cover and write it to a file. Note that only PNG is supported.
"""
cover_image = draw(title, subtitle, author)
if filename == "-":
assert not "Implement."
else:
_, ext = os.path.splitext(os.path.basename(filename))
if ext.upper() == ".PNG":
try:
with open(filename, "wb") as f:
cover_image.save(f)
except FileNotFoundError:
print("Error opening target file " + filename)
return 1
else:
print("Unsupported image file format '" + ext + "', use PNG")
return 1
return 0
# Set up and parse the command line arguments passed to the program.
usage = "Python implementation of the 10PRINT Cover image generator."
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-t", "--title", dest="title", help="Book title")
parser.add_argument("-s", "--subtitle", dest="subtitle", help="Book subtitle", default="")
parser.add_argument("-a", "--author", dest="author", help="Author(s) of the book")
parser.add_argument("-o", "--cover", dest="outfile", help="Filename of the cover image in PNG format")
parser.add_argument("-j", "--json-covers", dest="json_covers", help="JSON file containing cover information")
args = parser.parse_args()
# A JSON file is given as command line parameter; ignore the other ones.
# Read the file line by line and use the given information to generate the
# book covers. The file contains lines of JSON maps of the format
#
# {"authors": "..", "identifier": "..", "subtitle": null, "title": "..",
# "identifier_type": "Gutenberg ID", "filename": ".."}
if args.json_covers:
try:
with open(args.json_covers, "r") as f:
for line in f:
data = json.loads(line)
print("Generating cover for " + data["identifier"])
status = _draw_and_save(
data["title"],
data["subtitle"],
data["authors"],
data["filename"]
)
if status:
print("Error generating book cover image, skipping")
return 0
except ValueError:
print("Error reading from JSON file, exiting")
except FileNotFoundError:
print("JSON cover file does not exist: " + args.json_covers)
# Generate only a single cover based on the given command line arguments.
else:
if not args.title or not args.author:
print("Missing --title or --author argument, exiting")
elif not args.outfile:
print("No outfile specified, exiting")
else:
return _draw_and_save(args.title, args.subtitle, args.author, args.outfile)
return 1 | [
"def",
"main",
"(",
")",
":",
"# Helper function.",
"def",
"_draw_and_save",
"(",
"title",
",",
"subtitle",
",",
"author",
",",
"filename",
")",
":",
"\"\"\"\n Draw a cover and write it to a file. Note that only PNG is supported.\n \"\"\"",
"cover_image",
"=",
"draw",
"(",
"title",
",",
"subtitle",
",",
"author",
")",
"if",
"filename",
"==",
"\"-\"",
":",
"assert",
"not",
"\"Implement.\"",
"else",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
")",
"if",
"ext",
".",
"upper",
"(",
")",
"==",
"\".PNG\"",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"\"wb\"",
")",
"as",
"f",
":",
"cover_image",
".",
"save",
"(",
"f",
")",
"except",
"FileNotFoundError",
":",
"print",
"(",
"\"Error opening target file \"",
"+",
"filename",
")",
"return",
"1",
"else",
":",
"print",
"(",
"\"Unsupported image file format '\"",
"+",
"ext",
"+",
"\"', use PNG\"",
")",
"return",
"1",
"return",
"0",
"# Set up and parse the command line arguments passed to the program.",
"usage",
"=",
"\"Python implementation of the 10PRINT Cover image generator.\"",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"usage",
"=",
"usage",
")",
"parser",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--title\"",
",",
"dest",
"=",
"\"title\"",
",",
"help",
"=",
"\"Book title\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--subtitle\"",
",",
"dest",
"=",
"\"subtitle\"",
",",
"help",
"=",
"\"Book subtitle\"",
",",
"default",
"=",
"\"\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--author\"",
",",
"dest",
"=",
"\"author\"",
",",
"help",
"=",
"\"Author(s) of the book\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--cover\"",
",",
"dest",
"=",
"\"outfile\"",
",",
"help",
"=",
"\"Filename of the cover image in PNG format\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-j\"",
",",
"\"--json-covers\"",
",",
"dest",
"=",
"\"json_covers\"",
",",
"help",
"=",
"\"JSON file containing cover information\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# A JSON file is given as command line parameter; ignore the other ones.",
"# Read the file line by line and use the given information to generate the",
"# book covers. The file contains lines of JSON maps of the format",
"#",
"# {\"authors\": \"..\", \"identifier\": \"..\", \"subtitle\": null, \"title\": \"..\",",
"# \"identifier_type\": \"Gutenberg ID\", \"filename\": \"..\"}",
"if",
"args",
".",
"json_covers",
":",
"try",
":",
"with",
"open",
"(",
"args",
".",
"json_covers",
",",
"\"r\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"line",
")",
"print",
"(",
"\"Generating cover for \"",
"+",
"data",
"[",
"\"identifier\"",
"]",
")",
"status",
"=",
"_draw_and_save",
"(",
"data",
"[",
"\"title\"",
"]",
",",
"data",
"[",
"\"subtitle\"",
"]",
",",
"data",
"[",
"\"authors\"",
"]",
",",
"data",
"[",
"\"filename\"",
"]",
")",
"if",
"status",
":",
"print",
"(",
"\"Error generating book cover image, skipping\"",
")",
"return",
"0",
"except",
"ValueError",
":",
"print",
"(",
"\"Error reading from JSON file, exiting\"",
")",
"except",
"FileNotFoundError",
":",
"print",
"(",
"\"JSON cover file does not exist: \"",
"+",
"args",
".",
"json_covers",
")",
"# Generate only a single cover based on the given command line arguments.",
"else",
":",
"if",
"not",
"args",
".",
"title",
"or",
"not",
"args",
".",
"author",
":",
"print",
"(",
"\"Missing --title or --author argument, exiting\"",
")",
"elif",
"not",
"args",
".",
"outfile",
":",
"print",
"(",
"\"No outfile specified, exiting\"",
")",
"else",
":",
"return",
"_draw_and_save",
"(",
"args",
".",
"title",
",",
"args",
".",
"subtitle",
",",
"args",
".",
"author",
",",
"args",
".",
"outfile",
")",
"return",
"1"
] | 42.945205 | 22.068493 |
def alphanumeric(text):
"""Make an ultra-safe, ASCII version a string.
For instance for use as a filename.
\w matches any alphanumeric character and the underscore."""
return "".join([c for c in text if re.match(r'\w', c)]) | [
"def",
"alphanumeric",
"(",
"text",
")",
":",
"return",
"\"\"",
".",
"join",
"(",
"[",
"c",
"for",
"c",
"in",
"text",
"if",
"re",
".",
"match",
"(",
"r'\\w'",
",",
"c",
")",
"]",
")"
] | 47 | 7.4 |
def transcribe(self, text, punctuation=True):
"""
Parameters:
:param text: str: The text to be transcribed
:param punctuation: bool: Retain punctuation
This module attempts to reconstruct the approximate phonology
of Old English.
The algorithm first tries the substitutions defined in
IPA_rules and IPA.
The following exceptions are considered:
- Geminants are pronounced as long consonants/vowels
- [v ð z] are allophones of the fricatives /f θ s/ between vowels
- [ŋ] is an allophone of /n/ occurring before /k/ and /ɡ/
- [ɣ] is an allophone of /g/ after a vowel or liquid
- /l r/ were velarized when geminated or before a consonant
- [ç, x] are allophones of /h/ when occuring in the coda of a syllable and
preceded by front and back vowels respectively
Examples:
>>> Transcriber().transcribe('Fæder ūre þū þe eeart on heofonum,', punctuation = True)
'[fæder uːre θuː θe eːɑrˠt on heovonum,]'
>>> Transcriber().transcribe('Hwæt! wē Gār-Dena in ġēar-dagum', punctuation = False)
'[ʍæt weː gɑːrdenɑ in jæːɑrdɑgum]'
"""
if not punctuation:
text = re.sub(r"[\.\";\,\:\[\]\(\)!&?‘]", "", text)
text = text.lower()
text = re.sub(r'rr', 'rˠ', text)
text = re.sub(r'(\w)\1', r'\1ː', text)
text = re.sub(r'(?<=[iīæǣeē])h', 'ç', text)
text = re.sub(r'(?<=[aāoōuū])h', 'x', text)
text = re.sub(r'r(?=[bcdðfgġhlmnprstwƿxþ])', 'rˠ', text)
text = re.sub(r'l(?=[bcdðfgġhlmnprstwƿxþ])', 'ɫ', text)
text = re.sub(r'(?<=[aæeiouyǣāēīūōȳ])f(?=[aæeiouyǣāēīūōȳ])', 'v', text)
text = re.sub(r'(?<=[aæeiouyǣāēīūōȳ])þ(?=[aæeiouyǣāēīūōȳ])', 'ð', text)
text = re.sub(r'(?<=[aæeiouyǣāēīūōȳ])s(?=[aæeiouyǣāēīūōȳ])', 'z', text)
for w, val in zip(IPA_rules.keys(), IPA_rules.values()):
text = text.replace(w, val)
for w, val in zip(IPA.keys(), IPA.values()):
text = text.replace(w, val)
return '[' + text.replace('-', '') + ']' | [
"def",
"transcribe",
"(",
"self",
",",
"text",
",",
"punctuation",
"=",
"True",
")",
":",
"if",
"not",
"punctuation",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"r\"[\\.\\\";\\,\\:\\[\\]\\(\\)!&?‘]\", ",
"\"",
", ",
"t",
"xt)",
"",
"text",
"=",
"text",
".",
"lower",
"(",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'rr'",
",",
"'rˠ',",
" ",
"ext)",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(\\w)\\1'",
",",
"r'\\1ː',",
" ",
"ext)",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(?<=[iīæǣeē])h', 'ç",
"'",
" tex",
"t",
"",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(?<=[aāoōuū])h', '",
"x",
", t",
"e",
"t)",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'r(?=[bcdðfgġhlmnprstwƿxþ])', 'r",
"ˠ",
", tex",
"t",
"",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'l(?=[bcdðfgġhlmnprstwƿxþ])', 'ɫ",
"'",
" tex",
"t",
"",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(?<=[aæeiouyǣāēīūōȳ])f(?=[aæeiouyǣāēīūōȳ])', 'v', text)",
"",
"",
"",
"",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(?<=[aæeiouyǣāēīūōȳ])þ(?=[aæeiouyǣāēīūōȳ])', 'ð', text)",
"",
"",
"",
"",
"",
"text",
"=",
"re",
".",
"sub",
"(",
"r'(?<=[aæeiouyǣāēīūōȳ])s(?=[aæeiouyǣāēīūōȳ])', 'z', text)",
"",
"",
"",
"",
"",
"for",
"w",
",",
"val",
"in",
"zip",
"(",
"IPA_rules",
".",
"keys",
"(",
")",
",",
"IPA_rules",
".",
"values",
"(",
")",
")",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"w",
",",
"val",
")",
"for",
"w",
",",
"val",
"in",
"zip",
"(",
"IPA",
".",
"keys",
"(",
")",
",",
"IPA",
".",
"values",
"(",
")",
")",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"w",
",",
"val",
")",
"return",
"'['",
"+",
"text",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"+",
"']'"
] | 38.381818 | 25 |
def fromFile(cls, filepath):
"""
Creates a proxy instance from the inputted registry file.
:param filepath | <str>
:return <PluginProxy> || None
"""
xdata = ElementTree.parse(nstr(filepath))
xroot = xdata.getroot()
# collect variable information
name = xroot.get('name')
ver = float(xroot.get('version', '1.0'))
if not name:
name = os.path.basename(filepath).split('.')
if name == '__init__':
name = os.path.normpath(filepath).split(os.path.sep)[-2]
name = projex.text.pretty(name)
icon = xroot.get('icon', './icon.png')
ximport = xroot.find('import')
if ximport is not None:
importpath = ximport.get('path', './__init__.py')
else:
importpath = './__init__.py'
params = {'description': '', 'author': '', 'email': '', 'url': ''}
for param, default in params.items():
xdata = xroot.find(param)
if xdata is not None:
params[param] = xdata.text
# generate the proxy information
proxy = PluginProxy(cls, name, ver)
proxy.setImportPath(importpath)
proxy.setDescription(params['description'])
proxy.setAuthor(params['author'])
proxy.setEmail(params['email'])
proxy.setUrl(params['url'])
proxy.setFilepath(filepath)
return proxy | [
"def",
"fromFile",
"(",
"cls",
",",
"filepath",
")",
":",
"xdata",
"=",
"ElementTree",
".",
"parse",
"(",
"nstr",
"(",
"filepath",
")",
")",
"xroot",
"=",
"xdata",
".",
"getroot",
"(",
")",
"# collect variable information",
"name",
"=",
"xroot",
".",
"get",
"(",
"'name'",
")",
"ver",
"=",
"float",
"(",
"xroot",
".",
"get",
"(",
"'version'",
",",
"'1.0'",
")",
")",
"if",
"not",
"name",
":",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filepath",
")",
".",
"split",
"(",
"'.'",
")",
"if",
"name",
"==",
"'__init__'",
":",
"name",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"filepath",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"[",
"-",
"2",
"]",
"name",
"=",
"projex",
".",
"text",
".",
"pretty",
"(",
"name",
")",
"icon",
"=",
"xroot",
".",
"get",
"(",
"'icon'",
",",
"'./icon.png'",
")",
"ximport",
"=",
"xroot",
".",
"find",
"(",
"'import'",
")",
"if",
"ximport",
"is",
"not",
"None",
":",
"importpath",
"=",
"ximport",
".",
"get",
"(",
"'path'",
",",
"'./__init__.py'",
")",
"else",
":",
"importpath",
"=",
"'./__init__.py'",
"params",
"=",
"{",
"'description'",
":",
"''",
",",
"'author'",
":",
"''",
",",
"'email'",
":",
"''",
",",
"'url'",
":",
"''",
"}",
"for",
"param",
",",
"default",
"in",
"params",
".",
"items",
"(",
")",
":",
"xdata",
"=",
"xroot",
".",
"find",
"(",
"param",
")",
"if",
"xdata",
"is",
"not",
"None",
":",
"params",
"[",
"param",
"]",
"=",
"xdata",
".",
"text",
"# generate the proxy information",
"proxy",
"=",
"PluginProxy",
"(",
"cls",
",",
"name",
",",
"ver",
")",
"proxy",
".",
"setImportPath",
"(",
"importpath",
")",
"proxy",
".",
"setDescription",
"(",
"params",
"[",
"'description'",
"]",
")",
"proxy",
".",
"setAuthor",
"(",
"params",
"[",
"'author'",
"]",
")",
"proxy",
".",
"setEmail",
"(",
"params",
"[",
"'email'",
"]",
")",
"proxy",
".",
"setUrl",
"(",
"params",
"[",
"'url'",
"]",
")",
"proxy",
".",
"setFilepath",
"(",
"filepath",
")",
"return",
"proxy"
] | 31.866667 | 14.711111 |
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, placement=[i])
new_blocks.append(block)
return new_blocks | [
"def",
"_sparse_blockify",
"(",
"tuples",
",",
"dtype",
"=",
"None",
")",
":",
"new_blocks",
"=",
"[",
"]",
"for",
"i",
",",
"names",
",",
"array",
"in",
"tuples",
":",
"array",
"=",
"_maybe_to_sparse",
"(",
"array",
")",
"block",
"=",
"make_block",
"(",
"array",
",",
"placement",
"=",
"[",
"i",
"]",
")",
"new_blocks",
".",
"append",
"(",
"block",
")",
"return",
"new_blocks"
] | 27.75 | 14.083333 |
def save(self, update_site=False, *args, **kwargs):
"""
Set the site to the current site when the record is first
created, or the ``update_site`` argument is explicitly set
to ``True``.
"""
if update_site or (self.id is None and self.site_id is None):
self.site_id = current_site_id()
super(SiteRelated, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"update_site",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"update_site",
"or",
"(",
"self",
".",
"id",
"is",
"None",
"and",
"self",
".",
"site_id",
"is",
"None",
")",
":",
"self",
".",
"site_id",
"=",
"current_site_id",
"(",
")",
"super",
"(",
"SiteRelated",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 43.444444 | 14.333333 |
def __insert_action(self, revision):
"""
Handle the insert action type.
Creates new document to be created in this collection.
This allows you to stage a creation of an object
:param dict revision: The revision dictionary
"""
revision["patch"]["_id"] = ObjectId(revision.get("master_id"))
insert_response = yield self.collection.insert(revision.get("patch"))
if not isinstance(insert_response, str):
raise DocumentRevisionInsertFailed() | [
"def",
"__insert_action",
"(",
"self",
",",
"revision",
")",
":",
"revision",
"[",
"\"patch\"",
"]",
"[",
"\"_id\"",
"]",
"=",
"ObjectId",
"(",
"revision",
".",
"get",
"(",
"\"master_id\"",
")",
")",
"insert_response",
"=",
"yield",
"self",
".",
"collection",
".",
"insert",
"(",
"revision",
".",
"get",
"(",
"\"patch\"",
")",
")",
"if",
"not",
"isinstance",
"(",
"insert_response",
",",
"str",
")",
":",
"raise",
"DocumentRevisionInsertFailed",
"(",
")"
] | 30 | 22.352941 |
def poisson_random_measure(t, rate, rate_max):
"""A function that returns the arrival time of the next arrival for
a Poisson random measure.
Parameters
----------
t : float
The start time from which to simulate the next arrival time.
rate : function
The *intensity function* for the measure, where ``rate(t)`` is
the expected arrival rate at time ``t``.
rate_max : float
The maximum value of the ``rate`` function.
Returns
-------
out : float
The time of the next arrival.
Notes
-----
This function returns the time of the next arrival, where the
distribution of the number of arrivals between times :math:`t` and
:math:`t+s` is Poisson with mean
.. math::
\int_{t}^{t+s} dx \, r(x)
where :math:`r(t)` is the supplied ``rate`` function. This function
can only simulate processes that have bounded intensity functions.
See chapter 6 of [3]_ for more on the mathematics behind Poisson
random measures; the book's publisher, Springer, has that chapter
available online for free at (`pdf`_\).
A Poisson random measure is sometimes called a non-homogeneous
Poisson process. A Poisson process is a special type of Poisson
random measure.
.. _pdf: http://www.springer.com/cda/content/document/\
cda_downloaddocument/9780387878584-c1.pdf
Examples
--------
Suppose you wanted to model the arrival process as a Poisson
random measure with rate function :math:`r(t) = 2 + \sin( 2\pi t)`.
Then you could do so as follows:
>>> import queueing_tool as qt
>>> import numpy as np
>>> np.random.seed(10)
>>> rate = lambda t: 2 + np.sin(2 * np.pi * t)
>>> arr_f = lambda t: qt.poisson_random_measure(t, rate, 3)
>>> arr_f(1) # doctest: +ELLIPSIS
1.491...
References
----------
.. [3] Cinlar, Erhan. *Probability and stochastics*. Graduate Texts in\
Mathematics. Vol. 261. Springer, New York, 2011.\
:doi:`10.1007/978-0-387-87859-1`
"""
scale = 1.0 / rate_max
t = t + exponential(scale)
while rate_max * uniform() > rate(t):
t = t + exponential(scale)
return t | [
"def",
"poisson_random_measure",
"(",
"t",
",",
"rate",
",",
"rate_max",
")",
":",
"scale",
"=",
"1.0",
"/",
"rate_max",
"t",
"=",
"t",
"+",
"exponential",
"(",
"scale",
")",
"while",
"rate_max",
"*",
"uniform",
"(",
")",
">",
"rate",
"(",
"t",
")",
":",
"t",
"=",
"t",
"+",
"exponential",
"(",
"scale",
")",
"return",
"t"
] | 32.253731 | 22.522388 |
def p_review_comment_1(self, p):
"""review_comment : REVIEW_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_review_comment(self.document, value)
except CardinalityError:
self.more_than_one_error('ReviewComment', p.lineno(1))
except OrderError:
self.order_error('ReviewComment', 'Reviewer', p.lineno(1)) | [
"def",
"p_review_comment_1",
"(",
"self",
",",
"p",
")",
":",
"try",
":",
"if",
"six",
".",
"PY2",
":",
"value",
"=",
"p",
"[",
"2",
"]",
".",
"decode",
"(",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"value",
"=",
"p",
"[",
"2",
"]",
"self",
".",
"builder",
".",
"add_review_comment",
"(",
"self",
".",
"document",
",",
"value",
")",
"except",
"CardinalityError",
":",
"self",
".",
"more_than_one_error",
"(",
"'ReviewComment'",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"except",
"OrderError",
":",
"self",
".",
"order_error",
"(",
"'ReviewComment'",
",",
"'Reviewer'",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | 39.5 | 17 |
def get_activities(self, before=None, after=None, limit=None):
"""
Get activities for authenticated user sorted by newest first.
http://strava.github.io/api/v3/activities/
:param before: Result will start with activities whose start date is
before specified date. (UTC)
:type before: datetime.datetime or str or None
:param after: Result will start with activities whose start date is after
specified value. (UTC)
:type after: datetime.datetime or str or None
:param limit: How many maximum activities to return.
:type limit: int or None
:return: An iterator of :class:`stravalib.model.Activity` objects.
:rtype: :class:`BatchedResultsIterator`
"""
if before:
before = self._utc_datetime_to_epoch(before)
if after:
after = self._utc_datetime_to_epoch(after)
params = dict(before=before, after=after)
result_fetcher = functools.partial(self.protocol.get,
'/athlete/activities',
**params)
return BatchedResultsIterator(entity=model.Activity,
bind_client=self,
result_fetcher=result_fetcher,
limit=limit) | [
"def",
"get_activities",
"(",
"self",
",",
"before",
"=",
"None",
",",
"after",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"if",
"before",
":",
"before",
"=",
"self",
".",
"_utc_datetime_to_epoch",
"(",
"before",
")",
"if",
"after",
":",
"after",
"=",
"self",
".",
"_utc_datetime_to_epoch",
"(",
"after",
")",
"params",
"=",
"dict",
"(",
"before",
"=",
"before",
",",
"after",
"=",
"after",
")",
"result_fetcher",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"protocol",
".",
"get",
",",
"'/athlete/activities'",
",",
"*",
"*",
"params",
")",
"return",
"BatchedResultsIterator",
"(",
"entity",
"=",
"model",
".",
"Activity",
",",
"bind_client",
"=",
"self",
",",
"result_fetcher",
"=",
"result_fetcher",
",",
"limit",
"=",
"limit",
")"
] | 37.297297 | 23.351351 |
def GetPlatformRestrictions(campaign_feed):
"""Get the Platform Restrictions for a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Platform Restrictons for.
Returns:
The Platform Restrictions for the given feed.
"""
platform_restrictions = None
if campaign_feed['matchingFunction']['operator'] == 'AND':
for argument in campaign_feed['matchingFunction']['lhsOperand']:
# Check if matchingFunction is EQUALS(CONTEXT.DEVICE, 'Mobile')
if argument['value']['operator'] == 'EQUALS':
request_context_operand = argument['value']['lhsOperand'][0]
if (request_context_operand and
request_context_operand == 'DEVICE_PLATFORM'):
# This needs to be capitalized for ExtensionSettingPlatform.
platform_restrictions = argument['value']['rhsOperand'][0].upper()
return platform_restrictions | [
"def",
"GetPlatformRestrictions",
"(",
"campaign_feed",
")",
":",
"platform_restrictions",
"=",
"None",
"if",
"campaign_feed",
"[",
"'matchingFunction'",
"]",
"[",
"'operator'",
"]",
"==",
"'AND'",
":",
"for",
"argument",
"in",
"campaign_feed",
"[",
"'matchingFunction'",
"]",
"[",
"'lhsOperand'",
"]",
":",
"# Check if matchingFunction is EQUALS(CONTEXT.DEVICE, 'Mobile')",
"if",
"argument",
"[",
"'value'",
"]",
"[",
"'operator'",
"]",
"==",
"'EQUALS'",
":",
"request_context_operand",
"=",
"argument",
"[",
"'value'",
"]",
"[",
"'lhsOperand'",
"]",
"[",
"0",
"]",
"if",
"(",
"request_context_operand",
"and",
"request_context_operand",
"==",
"'DEVICE_PLATFORM'",
")",
":",
"# This needs to be capitalized for ExtensionSettingPlatform.",
"platform_restrictions",
"=",
"argument",
"[",
"'value'",
"]",
"[",
"'rhsOperand'",
"]",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"return",
"platform_restrictions"
] | 38.347826 | 23.652174 |
def sample_conditional(self, y, t=None, size=None):
"""
Sample from the conditional (predictive) distribution
Note: this method scales as ``O(M^3)`` for large ``M``, where
``M == len(t)``.
Args:
y (array[n]): The observations at coordinates ``x`` from
:func:`GP.compute`.
t (Optional[array[ntest]]): The independent coordinates where the
prediction should be made. If this is omitted the coordinates
will be assumed to be ``x`` from :func:`GP.compute` and an
efficient method will be used to compute the prediction.
size (Optional[int]): The number of samples to draw.
Returns:
array[n] or array[size, n]: The samples from the conditional
distribution over datasets.
"""
mu, cov = self.predict(y, t, return_cov=True)
return np.random.multivariate_normal(mu, cov, size=size) | [
"def",
"sample_conditional",
"(",
"self",
",",
"y",
",",
"t",
"=",
"None",
",",
"size",
"=",
"None",
")",
":",
"mu",
",",
"cov",
"=",
"self",
".",
"predict",
"(",
"y",
",",
"t",
",",
"return_cov",
"=",
"True",
")",
"return",
"np",
".",
"random",
".",
"multivariate_normal",
"(",
"mu",
",",
"cov",
",",
"size",
"=",
"size",
")"
] | 41.347826 | 24.130435 |
def results( self ):
"""Return a list of all the results currently available. This
excludes pending results. Results are returned as a single flat
list, so any repetition structure is lost.
:returns: a list of results"""
rs = []
for k in self._results.keys():
# filter out pending job ids, which can be anything except dicts
ars = [ res for res in self._results[k] if isinstance(res, dict) ]
rs.extend(ars)
return rs | [
"def",
"results",
"(",
"self",
")",
":",
"rs",
"=",
"[",
"]",
"for",
"k",
"in",
"self",
".",
"_results",
".",
"keys",
"(",
")",
":",
"# filter out pending job ids, which can be anything except dicts",
"ars",
"=",
"[",
"res",
"for",
"res",
"in",
"self",
".",
"_results",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"res",
",",
"dict",
")",
"]",
"rs",
".",
"extend",
"(",
"ars",
")",
"return",
"rs"
] | 41.5 | 19.916667 |
def parseArgv():
"""
Command line option parser.
"""
parser = OptionParser()
parser.usage = r""" cat <TEXT> | %prog [--unit <UNIT>] [--output <SA_FILE>]
Create the suffix array of TEXT with the processing UNIT and optionally store it in SA_FILE for subsequent use.
UNIT may be set to 'byte', 'character' (given an encoding with the --encoding option) or 'word', which is the default.
"""
parser.add_option("-i", "--input",
action="store", type="string", dest="input",
default=False,
help="Path of the file containing the input text. When '-' is given, read the standard input (default). If the path ends with '.gz', reads the decompressed file.")
parser.add_option("-o", "--output",
action="store", type="string", dest="output",
default=False,
help="Store the suffix array of the input to the file OUTPUT. When '-' is given, writes to the standard output. If the filename ends with '.gz', the suffix array will be stored compressed.")
parser.add_option("", "--load",
action="store", type="string", dest="SAFile",
default=False,
help="Load a suffix array from SAFILE, this option and --input are mutually exclusive.")
parser.add_option("-u", "--unit",
action="store", type="string", dest="unit",
default=DEFAULT_UNIT_STR,
help="Processing unit used for the creation of the suffix array." + \
'Possible values are "byte", "character" and "word". Default is "%s".' % DEFAULT_UNIT_STR + \
"This option is ignored when the suffix array is loaded from SAFILE." + \
'For characters, the input is decoded according to the encoding set via the option --encoding.')
parser.add_option("-e", "--encoding",
action="store", type="string", dest="encoding",
default=DEFAULT_ENCODING,
help="Encoding of the input. This information is required only when processing characters. Default is '%s'." % DEFAULT_ENCODING)
parser.add_option("-p", "--print",
action="store_true", dest="printSA",
default=False,
help="Prints the suffix array in a human readable format to the standard error output.")
parser.add_option("", "--verbose",
action="store_true", dest="verbose",
default=False,
help="Prints more information.")
parser.add_option("", "--no-lcps",
action="store_true", dest="noLCPs",
default=False,
help="Switch off the computation of LCPs. By doing so, the find functions are unusable.")
(options, args) = parser.parse_args(_argv)
strings = args[1:]
return (options, strings) | [
"def",
"parseArgv",
"(",
")",
":",
"parser",
"=",
"OptionParser",
"(",
")",
"parser",
".",
"usage",
"=",
"r\"\"\" cat <TEXT> | %prog [--unit <UNIT>] [--output <SA_FILE>]\n\nCreate the suffix array of TEXT with the processing UNIT and optionally store it in SA_FILE for subsequent use.\nUNIT may be set to 'byte', 'character' (given an encoding with the --encoding option) or 'word', which is the default.\n\"\"\"",
"parser",
".",
"add_option",
"(",
"\"-i\"",
",",
"\"--input\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"input\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Path of the file containing the input text. When '-' is given, read the standard input (default). If the path ends with '.gz', reads the decompressed file.\"",
")",
"parser",
".",
"add_option",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"output\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Store the suffix array of the input to the file OUTPUT. When '-' is given, writes to the standard output. If the filename ends with '.gz', the suffix array will be stored compressed.\"",
")",
"parser",
".",
"add_option",
"(",
"\"\"",
",",
"\"--load\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"SAFile\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Load a suffix array from SAFILE, this option and --input are mutually exclusive.\"",
")",
"parser",
".",
"add_option",
"(",
"\"-u\"",
",",
"\"--unit\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"unit\"",
",",
"default",
"=",
"DEFAULT_UNIT_STR",
",",
"help",
"=",
"\"Processing unit used for the creation of the suffix array.\"",
"+",
"'Possible values are \"byte\", \"character\" and \"word\". Default is \"%s\".'",
"%",
"DEFAULT_UNIT_STR",
"+",
"\"This option is ignored when the suffix array is loaded from SAFILE.\"",
"+",
"'For characters, the input is decoded according to the encoding set via the option --encoding.'",
")",
"parser",
".",
"add_option",
"(",
"\"-e\"",
",",
"\"--encoding\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"encoding\"",
",",
"default",
"=",
"DEFAULT_ENCODING",
",",
"help",
"=",
"\"Encoding of the input. This information is required only when processing characters. Default is '%s'.\"",
"%",
"DEFAULT_ENCODING",
")",
"parser",
".",
"add_option",
"(",
"\"-p\"",
",",
"\"--print\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"printSA\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Prints the suffix array in a human readable format to the standard error output.\"",
")",
"parser",
".",
"add_option",
"(",
"\"\"",
",",
"\"--verbose\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"verbose\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Prints more information.\"",
")",
"parser",
".",
"add_option",
"(",
"\"\"",
",",
"\"--no-lcps\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"noLCPs\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Switch off the computation of LCPs. By doing so, the find functions are unusable.\"",
")",
"(",
"options",
",",
"args",
")",
"=",
"parser",
".",
"parse_args",
"(",
"_argv",
")",
"strings",
"=",
"args",
"[",
"1",
":",
"]",
"return",
"(",
"options",
",",
"strings",
")"
] | 52.350877 | 31.578947 |
def equals(series1, series2, ignore_order=False, ignore_index=False, all_close=False, _return_reason=False):
'''
Get whether 2 series are equal.
``NaN`` is considered equal to ``NaN`` and `None`.
Parameters
----------
series1 : pandas.Series
Series to compare.
series2 : pandas.Series
Series to compare.
ignore_order : bool
Ignore order of values (and index).
ignore_index : bool
Ignore index values and name.
all_close : bool
If `False`, values must match exactly, if `True`, floats are compared as if
compared with `numpy.isclose`.
_return_reason : bool
Internal. If `True`, `equals` returns a tuple containing the reason, else
`equals` only returns a bool indicating equality (or equivalence
rather).
Returns
-------
bool
Whether they are equal (after ignoring according to the parameters).
Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is
returned. The former is whether they're equal, the latter is `None` if
equal or a short explanation of why the series aren't equal, otherwise.
Notes
-----
All values (including those of indices) must be copyable and ``__eq__`` must
be such that a copy must equal its original. A value must equal itself
unless it's ``NaN``. Values needn't be orderable or hashable (however
pandas requires index values to be orderable and hashable). By consequence,
this is not an efficient function, but it is flexible.
'''
result = _equals(series1, series2, ignore_order, ignore_index, all_close)
if _return_reason:
return result
else:
return result[0] | [
"def",
"equals",
"(",
"series1",
",",
"series2",
",",
"ignore_order",
"=",
"False",
",",
"ignore_index",
"=",
"False",
",",
"all_close",
"=",
"False",
",",
"_return_reason",
"=",
"False",
")",
":",
"result",
"=",
"_equals",
"(",
"series1",
",",
"series2",
",",
"ignore_order",
",",
"ignore_index",
",",
"all_close",
")",
"if",
"_return_reason",
":",
"return",
"result",
"else",
":",
"return",
"result",
"[",
"0",
"]"
] | 36.543478 | 27.456522 |
def _serialize_default(cls, obj):
"""
:type obj: int|str|bool|float|bytes|unicode|list|dict|object
:rtype: int|str|bool|list|dict
"""
if obj is None or cls._is_primitive(obj):
return obj
elif cls._is_bytes(obj):
return obj.decode()
elif type(obj) == list:
return cls._serialize_list(obj)
else:
dict_ = cls._get_obj_raw(obj)
return cls._serialize_dict(dict_) | [
"def",
"_serialize_default",
"(",
"cls",
",",
"obj",
")",
":",
"if",
"obj",
"is",
"None",
"or",
"cls",
".",
"_is_primitive",
"(",
"obj",
")",
":",
"return",
"obj",
"elif",
"cls",
".",
"_is_bytes",
"(",
"obj",
")",
":",
"return",
"obj",
".",
"decode",
"(",
")",
"elif",
"type",
"(",
"obj",
")",
"==",
"list",
":",
"return",
"cls",
".",
"_serialize_list",
"(",
"obj",
")",
"else",
":",
"dict_",
"=",
"cls",
".",
"_get_obj_raw",
"(",
"obj",
")",
"return",
"cls",
".",
"_serialize_dict",
"(",
"dict_",
")"
] | 27.529412 | 14.470588 |
def to_csv(self):
"""
Renders a legend as a CSV string.
No arguments.
Returns:
str: The legend as a CSV.
"""
# We can't delegate this to Decor because we need to know the superset
# of all Decor properties. There may be lots of blanks.
header = []
component_header = []
for row in self:
for j in row.__dict__.keys():
if j == '_colour':
j = 'colour'
header.append(j)
for k in row.component.__dict__.keys():
component_header.append(k)
header = set(header)
component_header = set(component_header)
header.remove('component')
header_row = ''
if 'colour' in header:
header_row += 'colour,'
header.remove('colour')
has_colour = True
for item in header:
header_row += item + ','
for item in component_header:
header_row += 'component ' + item + ','
# Now we have a header row! Phew.
# Next we'll go back over the legend and collect everything.
result = header_row.strip(',') + '\n'
for row in self:
if has_colour:
result += row.__dict__.get('_colour', '') + ','
for item in header:
result += str(row.__dict__.get(item, '')) + ','
for item in component_header:
result += str(row.component.__dict__.get(item, '')) + ','
result += '\n'
return result | [
"def",
"to_csv",
"(",
"self",
")",
":",
"# We can't delegate this to Decor because we need to know the superset",
"# of all Decor properties. There may be lots of blanks.",
"header",
"=",
"[",
"]",
"component_header",
"=",
"[",
"]",
"for",
"row",
"in",
"self",
":",
"for",
"j",
"in",
"row",
".",
"__dict__",
".",
"keys",
"(",
")",
":",
"if",
"j",
"==",
"'_colour'",
":",
"j",
"=",
"'colour'",
"header",
".",
"append",
"(",
"j",
")",
"for",
"k",
"in",
"row",
".",
"component",
".",
"__dict__",
".",
"keys",
"(",
")",
":",
"component_header",
".",
"append",
"(",
"k",
")",
"header",
"=",
"set",
"(",
"header",
")",
"component_header",
"=",
"set",
"(",
"component_header",
")",
"header",
".",
"remove",
"(",
"'component'",
")",
"header_row",
"=",
"''",
"if",
"'colour'",
"in",
"header",
":",
"header_row",
"+=",
"'colour,'",
"header",
".",
"remove",
"(",
"'colour'",
")",
"has_colour",
"=",
"True",
"for",
"item",
"in",
"header",
":",
"header_row",
"+=",
"item",
"+",
"','",
"for",
"item",
"in",
"component_header",
":",
"header_row",
"+=",
"'component '",
"+",
"item",
"+",
"','",
"# Now we have a header row! Phew.",
"# Next we'll go back over the legend and collect everything.",
"result",
"=",
"header_row",
".",
"strip",
"(",
"','",
")",
"+",
"'\\n'",
"for",
"row",
"in",
"self",
":",
"if",
"has_colour",
":",
"result",
"+=",
"row",
".",
"__dict__",
".",
"get",
"(",
"'_colour'",
",",
"''",
")",
"+",
"','",
"for",
"item",
"in",
"header",
":",
"result",
"+=",
"str",
"(",
"row",
".",
"__dict__",
".",
"get",
"(",
"item",
",",
"''",
")",
")",
"+",
"','",
"for",
"item",
"in",
"component_header",
":",
"result",
"+=",
"str",
"(",
"row",
".",
"component",
".",
"__dict__",
".",
"get",
"(",
"item",
",",
"''",
")",
")",
"+",
"','",
"result",
"+=",
"'\\n'",
"return",
"result"
] | 33.347826 | 14.478261 |
def un(source, wrapper=list, error_bad_lines=True):
"""Parse a text stream to TSV
If the source is a string, it is converted to a line-iterable stream. If
it is a file handle or other object, we assume that we can iterate over
the lines in it.
The result is a generator, and what it contains depends on whether the
second argument is set and what it is set to.
If the second argument is set to list, the default, then each element of
the result is a list of strings. If it is set to a class generated with
namedtuple(), then each element is an instance of this class, or None if
there were too many or too few fields.
Although newline separated input is preferred, carriage-return-newline is
accepted on every platform.
Since there is no definite order to the fields of a dict, there is no
consistent way to format dicts for output. To avoid the asymmetry of a
type that can be read but not written, plain dictionary parsing is
omitted.
"""
if isinstance(source, six.string_types):
source = six.StringIO(source)
# Prepare source lines for reading
rows = parse_lines(source)
# Get columns
if is_namedtuple(wrapper):
columns = wrapper._fields
wrapper = wrapper._make
else:
columns = next(rows, None)
if columns is not None:
i, columns = columns
yield wrapper(columns)
# Get values
for i, values in rows:
if check_line_consistency(columns, values, i, error_bad_lines):
yield wrapper(values) | [
"def",
"un",
"(",
"source",
",",
"wrapper",
"=",
"list",
",",
"error_bad_lines",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"source",
",",
"six",
".",
"string_types",
")",
":",
"source",
"=",
"six",
".",
"StringIO",
"(",
"source",
")",
"# Prepare source lines for reading",
"rows",
"=",
"parse_lines",
"(",
"source",
")",
"# Get columns",
"if",
"is_namedtuple",
"(",
"wrapper",
")",
":",
"columns",
"=",
"wrapper",
".",
"_fields",
"wrapper",
"=",
"wrapper",
".",
"_make",
"else",
":",
"columns",
"=",
"next",
"(",
"rows",
",",
"None",
")",
"if",
"columns",
"is",
"not",
"None",
":",
"i",
",",
"columns",
"=",
"columns",
"yield",
"wrapper",
"(",
"columns",
")",
"# Get values",
"for",
"i",
",",
"values",
"in",
"rows",
":",
"if",
"check_line_consistency",
"(",
"columns",
",",
"values",
",",
"i",
",",
"error_bad_lines",
")",
":",
"yield",
"wrapper",
"(",
"values",
")"
] | 35.744186 | 22.069767 |
def transform_to_data_coordinates(obj, xdata, ydata):
"""The coordinates might not be in data coordinates, but could be sometimes in axes
coordinates. For example, the matplotlib command
axes.axvline(2)
will have the y coordinates set to 0 and 1, not to the limits. Therefore, a
two-stage transform has to be applied:
1. first transforming to display coordinates, then
2. from display to data.
"""
if obj.axes is not None and obj.get_transform() != obj.axes.transData:
points = numpy.array([xdata, ydata]).T
transform = matplotlib.transforms.composite_transform_factory(
obj.get_transform(), obj.axes.transData.inverted()
)
return transform.transform(points).T
return xdata, ydata | [
"def",
"transform_to_data_coordinates",
"(",
"obj",
",",
"xdata",
",",
"ydata",
")",
":",
"if",
"obj",
".",
"axes",
"is",
"not",
"None",
"and",
"obj",
".",
"get_transform",
"(",
")",
"!=",
"obj",
".",
"axes",
".",
"transData",
":",
"points",
"=",
"numpy",
".",
"array",
"(",
"[",
"xdata",
",",
"ydata",
"]",
")",
".",
"T",
"transform",
"=",
"matplotlib",
".",
"transforms",
".",
"composite_transform_factory",
"(",
"obj",
".",
"get_transform",
"(",
")",
",",
"obj",
".",
"axes",
".",
"transData",
".",
"inverted",
"(",
")",
")",
"return",
"transform",
".",
"transform",
"(",
"points",
")",
".",
"T",
"return",
"xdata",
",",
"ydata"
] | 47.1875 | 15.9375 |
def get_ec_index(self, ec_handle):
'''Get the index of the execution context with the given handle.
@param ec_handle The handle of the execution context to look for.
@type ec_handle str
@return The index into the owned + participated arrays, suitable for
use in methods such as @ref activate_in_ec, or -1 if the EC was not
found.
@raises NoECWithHandleError
'''
with self._mutex:
for ii, ec in enumerate(self.owned_ecs):
if ec.handle == ec_handle:
return ii
for ii, ec in enumerate(self.participating_ecs):
if ec.handle == ec_handle:
return ii + len(self.owned_ecs)
raise exceptions.NoECWithHandleError | [
"def",
"get_ec_index",
"(",
"self",
",",
"ec_handle",
")",
":",
"with",
"self",
".",
"_mutex",
":",
"for",
"ii",
",",
"ec",
"in",
"enumerate",
"(",
"self",
".",
"owned_ecs",
")",
":",
"if",
"ec",
".",
"handle",
"==",
"ec_handle",
":",
"return",
"ii",
"for",
"ii",
",",
"ec",
"in",
"enumerate",
"(",
"self",
".",
"participating_ecs",
")",
":",
"if",
"ec",
".",
"handle",
"==",
"ec_handle",
":",
"return",
"ii",
"+",
"len",
"(",
"self",
".",
"owned_ecs",
")",
"raise",
"exceptions",
".",
"NoECWithHandleError"
] | 40.315789 | 19.789474 |
def write(self, buf, url):
"""Write buffer to storage at a given url"""
(store_name, path) = self._split_url(url)
adapter = self._create_adapter(store_name)
with adapter.open(path, 'wb') as f:
f.write(buf.encode()) | [
"def",
"write",
"(",
"self",
",",
"buf",
",",
"url",
")",
":",
"(",
"store_name",
",",
"path",
")",
"=",
"self",
".",
"_split_url",
"(",
"url",
")",
"adapter",
"=",
"self",
".",
"_create_adapter",
"(",
"store_name",
")",
"with",
"adapter",
".",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"buf",
".",
"encode",
"(",
")",
")"
] | 42.166667 | 7.166667 |
def fit(self, X, y=None, input_type='data'):
"""
Fit the model from data in X.
Parameters
----------
input_type : string, one of: 'data', 'distance' or 'affinity'.
The values of input data X. (default = 'data')
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If self.input_type is distance, or affinity:
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed distance or adjacency graph
computed from samples.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_input(X, input_type)
self.fit_geometry(X, input_type)
random_state = check_random_state(self.random_state)
self.embedding_, self.eigenvalues_, self.eigenvectors_ = spectral_embedding(self.geom_,
n_components = self.n_components,
eigen_solver = self.eigen_solver,
random_state = random_state,
drop_first = self.drop_first,
diffusion_maps = self.diffusion_maps,
diffusion_time = self.diffusion_time,
solver_kwds = self.solver_kwds)
self.affinity_matrix_ = self.geom_.affinity_matrix
self.laplacian_matrix_ = self.geom_.laplacian_matrix
self.laplacian_matrix_type_ = self.geom_.laplacian_method
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"input_type",
"=",
"'data'",
")",
":",
"X",
"=",
"self",
".",
"_validate_input",
"(",
"X",
",",
"input_type",
")",
"self",
".",
"fit_geometry",
"(",
"X",
",",
"input_type",
")",
"random_state",
"=",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"self",
".",
"embedding_",
",",
"self",
".",
"eigenvalues_",
",",
"self",
".",
"eigenvectors_",
"=",
"spectral_embedding",
"(",
"self",
".",
"geom_",
",",
"n_components",
"=",
"self",
".",
"n_components",
",",
"eigen_solver",
"=",
"self",
".",
"eigen_solver",
",",
"random_state",
"=",
"random_state",
",",
"drop_first",
"=",
"self",
".",
"drop_first",
",",
"diffusion_maps",
"=",
"self",
".",
"diffusion_maps",
",",
"diffusion_time",
"=",
"self",
".",
"diffusion_time",
",",
"solver_kwds",
"=",
"self",
".",
"solver_kwds",
")",
"self",
".",
"affinity_matrix_",
"=",
"self",
".",
"geom_",
".",
"affinity_matrix",
"self",
".",
"laplacian_matrix_",
"=",
"self",
".",
"geom_",
".",
"laplacian_matrix",
"self",
".",
"laplacian_matrix_type_",
"=",
"self",
".",
"geom_",
".",
"laplacian_method",
"return",
"self"
] | 45.473684 | 22.947368 |
def write(self, data, debug_info=None):
"""
Write data to YHSM device.
"""
self.num_write_bytes += len(data)
if self.debug:
if not debug_info:
debug_info = str(len(data))
sys.stderr.write("%s: WRITE %s:\n%s\n" %(
self.__class__.__name__,
debug_info,
pyhsm.util.hexdump(data)
))
return self.ser.write(data) | [
"def",
"write",
"(",
"self",
",",
"data",
",",
"debug_info",
"=",
"None",
")",
":",
"self",
".",
"num_write_bytes",
"+=",
"len",
"(",
"data",
")",
"if",
"self",
".",
"debug",
":",
"if",
"not",
"debug_info",
":",
"debug_info",
"=",
"str",
"(",
"len",
"(",
"data",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"%s: WRITE %s:\\n%s\\n\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"debug_info",
",",
"pyhsm",
".",
"util",
".",
"hexdump",
"(",
"data",
")",
")",
")",
"return",
"self",
".",
"ser",
".",
"write",
"(",
"data",
")"
] | 32.857143 | 6.571429 |
def get_filelikeobject(filename: str = None,
blob: bytes = None) -> BinaryIO:
"""
Open a file-like object.
Guard the use of this function with ``with``.
Args:
filename: for specifying via a filename
blob: for specifying via an in-memory ``bytes`` object
Returns:
a :class:`BinaryIO` object
"""
if not filename and not blob:
raise ValueError("no filename and no blob")
if filename and blob:
raise ValueError("specify either filename or blob")
if filename:
return open(filename, 'rb')
else:
return io.BytesIO(blob) | [
"def",
"get_filelikeobject",
"(",
"filename",
":",
"str",
"=",
"None",
",",
"blob",
":",
"bytes",
"=",
"None",
")",
"->",
"BinaryIO",
":",
"if",
"not",
"filename",
"and",
"not",
"blob",
":",
"raise",
"ValueError",
"(",
"\"no filename and no blob\"",
")",
"if",
"filename",
"and",
"blob",
":",
"raise",
"ValueError",
"(",
"\"specify either filename or blob\"",
")",
"if",
"filename",
":",
"return",
"open",
"(",
"filename",
",",
"'rb'",
")",
"else",
":",
"return",
"io",
".",
"BytesIO",
"(",
"blob",
")"
] | 27.863636 | 17.045455 |
def load_pyproject_toml(
use_pep517, # type: Optional[bool]
pyproject_toml, # type: str
setup_py, # type: str
req_name # type: str
):
# type: (...) -> Optional[Tuple[List[str], str, List[str]]]
"""Load the pyproject.toml file.
Parameters:
use_pep517 - Has the user requested PEP 517 processing? None
means the user hasn't explicitly specified.
pyproject_toml - Location of the project's pyproject.toml file
setup_py - Location of the project's setup.py file
req_name - The name of the requirement we're processing (for
error reporting)
Returns:
None if we should use the legacy code path, otherwise a tuple
(
requirements from pyproject.toml,
name of PEP 517 backend,
requirements we should check are installed after setting
up the build environment
)
"""
has_pyproject = os.path.isfile(pyproject_toml)
has_setup = os.path.isfile(setup_py)
if has_pyproject:
with io.open(pyproject_toml, encoding="utf-8") as f:
pp_toml = pytoml.load(f)
build_system = pp_toml.get("build-system")
else:
build_system = None
# The following cases must use PEP 517
# We check for use_pep517 being non-None and falsey because that means
# the user explicitly requested --no-use-pep517. The value 0 as
# opposed to False can occur when the value is provided via an
# environment variable or config file option (due to the quirk of
# strtobool() returning an integer in pip's configuration code).
if has_pyproject and not has_setup:
if use_pep517 is not None and not use_pep517:
raise InstallationError(
"Disabling PEP 517 processing is invalid: "
"project does not have a setup.py"
)
use_pep517 = True
elif build_system and "build-backend" in build_system:
if use_pep517 is not None and not use_pep517:
raise InstallationError(
"Disabling PEP 517 processing is invalid: "
"project specifies a build backend of {} "
"in pyproject.toml".format(
build_system["build-backend"]
)
)
use_pep517 = True
# If we haven't worked out whether to use PEP 517 yet,
# and the user hasn't explicitly stated a preference,
# we do so if the project has a pyproject.toml file.
elif use_pep517 is None:
use_pep517 = has_pyproject
# At this point, we know whether we're going to use PEP 517.
assert use_pep517 is not None
# If we're using the legacy code path, there is nothing further
# for us to do here.
if not use_pep517:
return None
if build_system is None:
# Either the user has a pyproject.toml with no build-system
# section, or the user has no pyproject.toml, but has opted in
# explicitly via --use-pep517.
# In the absence of any explicit backend specification, we
# assume the setuptools backend that most closely emulates the
# traditional direct setup.py execution, and require wheel and
# a version of setuptools that supports that backend.
build_system = {
"requires": ["setuptools>=40.8.0", "wheel"],
"build-backend": "setuptools.build_meta:__legacy__",
}
# If we're using PEP 517, we have build system information (either
# from pyproject.toml, or defaulted by the code above).
# Note that at this point, we do not know if the user has actually
# specified a backend, though.
assert build_system is not None
# Ensure that the build-system section in pyproject.toml conforms
# to PEP 518.
error_template = (
"{package} has a pyproject.toml file that does not comply "
"with PEP 518: {reason}"
)
# Specifying the build-system table but not the requires key is invalid
if "requires" not in build_system:
raise InstallationError(
error_template.format(package=req_name, reason=(
"it has a 'build-system' table but not "
"'build-system.requires' which is mandatory in the table"
))
)
# Error out if requires is not a list of strings
requires = build_system["requires"]
if not _is_list_of_str(requires):
raise InstallationError(error_template.format(
package=req_name,
reason="'build-system.requires' is not a list of strings.",
))
backend = build_system.get("build-backend")
check = [] # type: List[str]
if backend is None:
# If the user didn't specify a backend, we assume they want to use
# the setuptools backend. But we can't be sure they have included
# a version of setuptools which supplies the backend, or wheel
# (which is needed by the backend) in their requirements. So we
# make a note to check that those requirements are present once
# we have set up the environment.
# This is quite a lot of work to check for a very specific case. But
# the problem is, that case is potentially quite common - projects that
# adopted PEP 518 early for the ability to specify requirements to
# execute setup.py, but never considered needing to mention the build
# tools themselves. The original PEP 518 code had a similar check (but
# implemented in a different way).
backend = "setuptools.build_meta:__legacy__"
check = ["setuptools>=40.8.0", "wheel"]
return (requires, backend, check) | [
"def",
"load_pyproject_toml",
"(",
"use_pep517",
",",
"# type: Optional[bool]",
"pyproject_toml",
",",
"# type: str",
"setup_py",
",",
"# type: str",
"req_name",
"# type: str",
")",
":",
"# type: (...) -> Optional[Tuple[List[str], str, List[str]]]",
"has_pyproject",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"pyproject_toml",
")",
"has_setup",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"setup_py",
")",
"if",
"has_pyproject",
":",
"with",
"io",
".",
"open",
"(",
"pyproject_toml",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"pp_toml",
"=",
"pytoml",
".",
"load",
"(",
"f",
")",
"build_system",
"=",
"pp_toml",
".",
"get",
"(",
"\"build-system\"",
")",
"else",
":",
"build_system",
"=",
"None",
"# The following cases must use PEP 517",
"# We check for use_pep517 being non-None and falsey because that means",
"# the user explicitly requested --no-use-pep517. The value 0 as",
"# opposed to False can occur when the value is provided via an",
"# environment variable or config file option (due to the quirk of",
"# strtobool() returning an integer in pip's configuration code).",
"if",
"has_pyproject",
"and",
"not",
"has_setup",
":",
"if",
"use_pep517",
"is",
"not",
"None",
"and",
"not",
"use_pep517",
":",
"raise",
"InstallationError",
"(",
"\"Disabling PEP 517 processing is invalid: \"",
"\"project does not have a setup.py\"",
")",
"use_pep517",
"=",
"True",
"elif",
"build_system",
"and",
"\"build-backend\"",
"in",
"build_system",
":",
"if",
"use_pep517",
"is",
"not",
"None",
"and",
"not",
"use_pep517",
":",
"raise",
"InstallationError",
"(",
"\"Disabling PEP 517 processing is invalid: \"",
"\"project specifies a build backend of {} \"",
"\"in pyproject.toml\"",
".",
"format",
"(",
"build_system",
"[",
"\"build-backend\"",
"]",
")",
")",
"use_pep517",
"=",
"True",
"# If we haven't worked out whether to use PEP 517 yet,",
"# and the user hasn't explicitly stated a preference,",
"# we do so if the project has a pyproject.toml file.",
"elif",
"use_pep517",
"is",
"None",
":",
"use_pep517",
"=",
"has_pyproject",
"# At this point, we know whether we're going to use PEP 517.",
"assert",
"use_pep517",
"is",
"not",
"None",
"# If we're using the legacy code path, there is nothing further",
"# for us to do here.",
"if",
"not",
"use_pep517",
":",
"return",
"None",
"if",
"build_system",
"is",
"None",
":",
"# Either the user has a pyproject.toml with no build-system",
"# section, or the user has no pyproject.toml, but has opted in",
"# explicitly via --use-pep517.",
"# In the absence of any explicit backend specification, we",
"# assume the setuptools backend that most closely emulates the",
"# traditional direct setup.py execution, and require wheel and",
"# a version of setuptools that supports that backend.",
"build_system",
"=",
"{",
"\"requires\"",
":",
"[",
"\"setuptools>=40.8.0\"",
",",
"\"wheel\"",
"]",
",",
"\"build-backend\"",
":",
"\"setuptools.build_meta:__legacy__\"",
",",
"}",
"# If we're using PEP 517, we have build system information (either",
"# from pyproject.toml, or defaulted by the code above).",
"# Note that at this point, we do not know if the user has actually",
"# specified a backend, though.",
"assert",
"build_system",
"is",
"not",
"None",
"# Ensure that the build-system section in pyproject.toml conforms",
"# to PEP 518.",
"error_template",
"=",
"(",
"\"{package} has a pyproject.toml file that does not comply \"",
"\"with PEP 518: {reason}\"",
")",
"# Specifying the build-system table but not the requires key is invalid",
"if",
"\"requires\"",
"not",
"in",
"build_system",
":",
"raise",
"InstallationError",
"(",
"error_template",
".",
"format",
"(",
"package",
"=",
"req_name",
",",
"reason",
"=",
"(",
"\"it has a 'build-system' table but not \"",
"\"'build-system.requires' which is mandatory in the table\"",
")",
")",
")",
"# Error out if requires is not a list of strings",
"requires",
"=",
"build_system",
"[",
"\"requires\"",
"]",
"if",
"not",
"_is_list_of_str",
"(",
"requires",
")",
":",
"raise",
"InstallationError",
"(",
"error_template",
".",
"format",
"(",
"package",
"=",
"req_name",
",",
"reason",
"=",
"\"'build-system.requires' is not a list of strings.\"",
",",
")",
")",
"backend",
"=",
"build_system",
".",
"get",
"(",
"\"build-backend\"",
")",
"check",
"=",
"[",
"]",
"# type: List[str]",
"if",
"backend",
"is",
"None",
":",
"# If the user didn't specify a backend, we assume they want to use",
"# the setuptools backend. But we can't be sure they have included",
"# a version of setuptools which supplies the backend, or wheel",
"# (which is needed by the backend) in their requirements. So we",
"# make a note to check that those requirements are present once",
"# we have set up the environment.",
"# This is quite a lot of work to check for a very specific case. But",
"# the problem is, that case is potentially quite common - projects that",
"# adopted PEP 518 early for the ability to specify requirements to",
"# execute setup.py, but never considered needing to mention the build",
"# tools themselves. The original PEP 518 code had a similar check (but",
"# implemented in a different way).",
"backend",
"=",
"\"setuptools.build_meta:__legacy__\"",
"check",
"=",
"[",
"\"setuptools>=40.8.0\"",
",",
"\"wheel\"",
"]",
"return",
"(",
"requires",
",",
"backend",
",",
"check",
")"
] | 40.620438 | 20.948905 |
def from_client(cls, client):
"""
Constructs a configuration object from an existing client instance. If the client has already been created with
a configuration object, returns that instance.
:param client: Client object to derive the configuration from.
:type client: docker.client.Client
:return: ClientConfiguration
"""
if hasattr(client, 'client_configuration'):
return client.client_configuration
kwargs = {'client': client}
for attr in cls.init_kwargs:
if hasattr(client, attr):
kwargs[attr] = getattr(client, attr)
if hasattr(client, 'api_version'):
kwargs['version'] = client.api_version
return cls(**kwargs) | [
"def",
"from_client",
"(",
"cls",
",",
"client",
")",
":",
"if",
"hasattr",
"(",
"client",
",",
"'client_configuration'",
")",
":",
"return",
"client",
".",
"client_configuration",
"kwargs",
"=",
"{",
"'client'",
":",
"client",
"}",
"for",
"attr",
"in",
"cls",
".",
"init_kwargs",
":",
"if",
"hasattr",
"(",
"client",
",",
"attr",
")",
":",
"kwargs",
"[",
"attr",
"]",
"=",
"getattr",
"(",
"client",
",",
"attr",
")",
"if",
"hasattr",
"(",
"client",
",",
"'api_version'",
")",
":",
"kwargs",
"[",
"'version'",
"]",
"=",
"client",
".",
"api_version",
"return",
"cls",
"(",
"*",
"*",
"kwargs",
")"
] | 41.611111 | 13.611111 |
def set_contributor_details(self, contdetails):
""" Sets 'contributor_details' parameter used to enhance the \
contributors element of the status response to include \
the screen_name of the contributor. By default only \
the user_id of the contributor is included
:param contdetails: Boolean triggering the usage of the parameter
:raises: TwitterSearchException
"""
if not isinstance(contdetails, bool):
raise TwitterSearchException(1008)
self.arguments.update({'contributor_details': 'true'
if contdetails
else 'false'}) | [
"def",
"set_contributor_details",
"(",
"self",
",",
"contdetails",
")",
":",
"if",
"not",
"isinstance",
"(",
"contdetails",
",",
"bool",
")",
":",
"raise",
"TwitterSearchException",
"(",
"1008",
")",
"self",
".",
"arguments",
".",
"update",
"(",
"{",
"'contributor_details'",
":",
"'true'",
"if",
"contdetails",
"else",
"'false'",
"}",
")"
] | 46.8 | 17.533333 |
def map(self, key, value):
"""
Args:
key: Image name
value: Image as jpeg byte data
Yields:
A tuple in the form of (key, value)
key: Constant dummy value
value: (l2sqr_dist, value)
"""
try:
image = imfeat.resize_image(imfeat.image_fromstring(value, {'type': 'numpy', 'mode': 'bgr', 'dtype': 'uint8'}),
100, 100)
except:
hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
return
# Distance metric
diff = image - self.target_image
dist = np.sum(diff * diff)
# Output
if dist < self.min_dist:
self.min_dist = dist
self.min_key = key
self.min_value = value | [
"def",
"map",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"try",
":",
"image",
"=",
"imfeat",
".",
"resize_image",
"(",
"imfeat",
".",
"image_fromstring",
"(",
"value",
",",
"{",
"'type'",
":",
"'numpy'",
",",
"'mode'",
":",
"'bgr'",
",",
"'dtype'",
":",
"'uint8'",
"}",
")",
",",
"100",
",",
"100",
")",
"except",
":",
"hadoopy",
".",
"counter",
"(",
"'DATA_ERRORS'",
",",
"'ImageLoadError'",
")",
"return",
"# Distance metric",
"diff",
"=",
"image",
"-",
"self",
".",
"target_image",
"dist",
"=",
"np",
".",
"sum",
"(",
"diff",
"*",
"diff",
")",
"# Output",
"if",
"dist",
"<",
"self",
".",
"min_dist",
":",
"self",
".",
"min_dist",
"=",
"dist",
"self",
".",
"min_key",
"=",
"key",
"self",
".",
"min_value",
"=",
"value"
] | 29.148148 | 17.666667 |
def send_mail(template_name, context, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None, **kwargs):
"""
Easy wrapper for sending a single email message to a recipient list using
django template system.
It works almost the same way as the standard
:func:`send_mail()<django.core.mail.send_mail>` function.
.. |main_difference| replace:: The main
difference is that two first arguments ``subject`` and ``body`` are
replaced with ``template_name`` and ``context``. However you still can
pass subject or body as keyword arguments to provide static content if
needed.
|main_difference|
The ``template_name``, ``context``, ``from_email`` and ``recipient_list``
parameters are required.
Note
----
|args_note|
Arguments
---------
template_name : str
|template_name|
context : dict
|context|
from_email : str
|from_email|
recipient_list : list
|recipient_list|
Keyword Arguments
-----------------
fail_silently : bool
If it's False, send_mail will raise an :exc:`smtplib.SMTPException`.
See the :mod:`smtplib` docs for a list of possible exceptions, all of
which are subclasses of :exc:`smtplib.SMTPException`.
auth_user | str
The optional username to use to authenticate to the SMTP server. If
this isn't provided, Django will use the value of the
:django:setting:`EMAIL_HOST_USER` setting.
auth_password | str
The optional password to use to authenticate to the SMTP server. If
this isn't provided, Django will use the value of the
:django:setting:`EMAIL_HOST_PASSWORD` setting.
connection : EmailBackend
The optional email backend to use to send the mail. If unspecified,
an instance of the default backend will be used. See the documentation
on :ref:`Email backends<django:topic-email-backends>` for more details.
subject : str
|subject|
body : str
|body|
render : bool
|render|
Returns
-------
int
The number of successfully delivered messages (which can be 0 or 1
since it can only send one message).
See Also
--------
:func:`django.core.mail.send_mail`
Documentation for the standard ``send_mail()`` function.
"""
connection = connection or mail.get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
clean = kwargs.pop('clean', True)
return EmailMessage(
template_name, context, from_email, recipient_list,
connection=connection, **kwargs).send(clean=clean) | [
"def",
"send_mail",
"(",
"template_name",
",",
"context",
",",
"from_email",
",",
"recipient_list",
",",
"fail_silently",
"=",
"False",
",",
"auth_user",
"=",
"None",
",",
"auth_password",
"=",
"None",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"connection",
"=",
"connection",
"or",
"mail",
".",
"get_connection",
"(",
"username",
"=",
"auth_user",
",",
"password",
"=",
"auth_password",
",",
"fail_silently",
"=",
"fail_silently",
")",
"clean",
"=",
"kwargs",
".",
"pop",
"(",
"'clean'",
",",
"True",
")",
"return",
"EmailMessage",
"(",
"template_name",
",",
"context",
",",
"from_email",
",",
"recipient_list",
",",
"connection",
"=",
"connection",
",",
"*",
"*",
"kwargs",
")",
".",
"send",
"(",
"clean",
"=",
"clean",
")"
] | 34.234568 | 25.123457 |
def isTransiting(self):
""" Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue
version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented
"""
try:
isTransiting = self.params['istransiting']
except KeyError:
return False
if isTransiting == '1':
return True
else:
return False | [
"def",
"isTransiting",
"(",
"self",
")",
":",
"try",
":",
"isTransiting",
"=",
"self",
".",
"params",
"[",
"'istransiting'",
"]",
"except",
"KeyError",
":",
"return",
"False",
"if",
"isTransiting",
"==",
"'1'",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | 36.384615 | 21.384615 |
async def connect(self) -> Connection:
"""
Get or create a connection.
"""
if self._conn is None:
self._conn = await r.connect(host=self._host,
port=self._port,
db=self._db,
auth_key=self._auth_key,
user=self._user,
password=self._password,
timeout=self._timeout,
ssl=self._ssl,
io_loop=self._loop)
return self._conn | [
"async",
"def",
"connect",
"(",
"self",
")",
"->",
"Connection",
":",
"if",
"self",
".",
"_conn",
"is",
"None",
":",
"self",
".",
"_conn",
"=",
"await",
"r",
".",
"connect",
"(",
"host",
"=",
"self",
".",
"_host",
",",
"port",
"=",
"self",
".",
"_port",
",",
"db",
"=",
"self",
".",
"_db",
",",
"auth_key",
"=",
"self",
".",
"_auth_key",
",",
"user",
"=",
"self",
".",
"_user",
",",
"password",
"=",
"self",
".",
"_password",
",",
"timeout",
"=",
"self",
".",
"_timeout",
",",
"ssl",
"=",
"self",
".",
"_ssl",
",",
"io_loop",
"=",
"self",
".",
"_loop",
")",
"return",
"self",
".",
"_conn"
] | 45.466667 | 13.6 |
def regional_maximum(image, mask = None, structure=None, ties_are_ok=False):
'''Return a binary mask containing only points that are regional maxima
image - image to be transformed
mask - mask of relevant pixels
structure - binary structure giving the neighborhood and connectivity
in which to search for maxima. Default is 8-connected.
ties_are_ok - if this is true, then adjacent points of the same magnitude
are rated as maxima.
Find locations for which all neighbors as defined by the structure have
lower values. The algorithm selects only one of a set of adjacent locations
with identical values, first using a distance transform to find the
innermost location, then, among equals, selected randomly.
A location cannot be a local maximum if it is touching the edge or a
masked pixel.
'''
global eight_connect
if not ties_are_ok:
#
# Get an an initial pass with the ties.
#
result = regional_maximum(image, mask, structure, True)
if not np.any(result):
return result
distance = scind.distance_transform_edt(result)
#
# Rank-order the distances and then add a randomizing factor
# to break ties for pixels equidistant from the background.
# Pick the best value within a contiguous region
#
labels, label_count = scind.label(result, eight_connect)
np.random.seed(0)
ro_distance = rank_order(distance)[0].astype(float)
count = np.product(ro_distance.shape)
ro_distance.flat += (np.random.permutation(count).astype(float) /
float(count))
positions = scind.maximum_position(ro_distance, labels,
np.arange(label_count)+1)
positions = np.array(positions, np.uint32)
result = np.zeros(image.shape, bool)
if positions.ndim == 1:
result[positions[0],positions[1]] = True
else:
result[positions[:,0],positions[:,1]] = True
return result
result = np.ones(image.shape,bool)
if structure is None:
structure = scind.generate_binary_structure(image.ndim, image.ndim)
#
# The edges of the image are losers because they are touching undefined
# points. Construct a big mask that represents the edges.
#
big_mask = np.zeros(np.array(image.shape) + np.array(structure.shape), bool)
structure_half_shape = np.array(structure.shape)//2
big_mask[structure_half_shape[0]:structure_half_shape[0]+image.shape[0],
structure_half_shape[1]:structure_half_shape[1]+image.shape[1]]=\
mask if mask is not None else True
for i in range(structure.shape[0]):
off_i = i-structure_half_shape[0]
for j in range(structure.shape[1]):
if i == structure_half_shape[0] and j == structure_half_shape[1]:
continue
off_j = j-structure_half_shape[1]
if structure[i,j]:
result = np.logical_and(result, big_mask[i:i+image.shape[0],
j:j+image.shape[1]])
#
# Get the boundaries of the source image and the offset
# image so we can make similarly shaped, but offset slices
#
src_i_min = max(0,-off_i)
src_i_max = min(image.shape[0], image.shape[0]-off_i)
off_i_min = max(0,off_i)
off_i_max = min(image.shape[0], image.shape[0]+off_i)
src_j_min = max(0,-off_j)
src_j_max = min(image.shape[1], image.shape[1]-off_j)
off_j_min = max(0,off_j)
off_j_max = min(image.shape[1], image.shape[1]+off_j)
min_mask = (image[src_i_min:src_i_max,
src_j_min:src_j_max] <
image[off_i_min:off_i_max,
off_j_min:off_j_max])
result[src_i_min:src_i_max,
src_j_min:src_j_max][min_mask] = False
return result | [
"def",
"regional_maximum",
"(",
"image",
",",
"mask",
"=",
"None",
",",
"structure",
"=",
"None",
",",
"ties_are_ok",
"=",
"False",
")",
":",
"global",
"eight_connect",
"if",
"not",
"ties_are_ok",
":",
"#",
"# Get an an initial pass with the ties.",
"#",
"result",
"=",
"regional_maximum",
"(",
"image",
",",
"mask",
",",
"structure",
",",
"True",
")",
"if",
"not",
"np",
".",
"any",
"(",
"result",
")",
":",
"return",
"result",
"distance",
"=",
"scind",
".",
"distance_transform_edt",
"(",
"result",
")",
"#",
"# Rank-order the distances and then add a randomizing factor",
"# to break ties for pixels equidistant from the background.",
"# Pick the best value within a contiguous region",
"#",
"labels",
",",
"label_count",
"=",
"scind",
".",
"label",
"(",
"result",
",",
"eight_connect",
")",
"np",
".",
"random",
".",
"seed",
"(",
"0",
")",
"ro_distance",
"=",
"rank_order",
"(",
"distance",
")",
"[",
"0",
"]",
".",
"astype",
"(",
"float",
")",
"count",
"=",
"np",
".",
"product",
"(",
"ro_distance",
".",
"shape",
")",
"ro_distance",
".",
"flat",
"+=",
"(",
"np",
".",
"random",
".",
"permutation",
"(",
"count",
")",
".",
"astype",
"(",
"float",
")",
"/",
"float",
"(",
"count",
")",
")",
"positions",
"=",
"scind",
".",
"maximum_position",
"(",
"ro_distance",
",",
"labels",
",",
"np",
".",
"arange",
"(",
"label_count",
")",
"+",
"1",
")",
"positions",
"=",
"np",
".",
"array",
"(",
"positions",
",",
"np",
".",
"uint32",
")",
"result",
"=",
"np",
".",
"zeros",
"(",
"image",
".",
"shape",
",",
"bool",
")",
"if",
"positions",
".",
"ndim",
"==",
"1",
":",
"result",
"[",
"positions",
"[",
"0",
"]",
",",
"positions",
"[",
"1",
"]",
"]",
"=",
"True",
"else",
":",
"result",
"[",
"positions",
"[",
":",
",",
"0",
"]",
",",
"positions",
"[",
":",
",",
"1",
"]",
"]",
"=",
"True",
"return",
"result",
"result",
"=",
"np",
".",
"ones",
"(",
"image",
".",
"shape",
",",
"bool",
")",
"if",
"structure",
"is",
"None",
":",
"structure",
"=",
"scind",
".",
"generate_binary_structure",
"(",
"image",
".",
"ndim",
",",
"image",
".",
"ndim",
")",
"#",
"# The edges of the image are losers because they are touching undefined",
"# points. Construct a big mask that represents the edges.",
"#",
"big_mask",
"=",
"np",
".",
"zeros",
"(",
"np",
".",
"array",
"(",
"image",
".",
"shape",
")",
"+",
"np",
".",
"array",
"(",
"structure",
".",
"shape",
")",
",",
"bool",
")",
"structure_half_shape",
"=",
"np",
".",
"array",
"(",
"structure",
".",
"shape",
")",
"//",
"2",
"big_mask",
"[",
"structure_half_shape",
"[",
"0",
"]",
":",
"structure_half_shape",
"[",
"0",
"]",
"+",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"structure_half_shape",
"[",
"1",
"]",
":",
"structure_half_shape",
"[",
"1",
"]",
"+",
"image",
".",
"shape",
"[",
"1",
"]",
"]",
"=",
"mask",
"if",
"mask",
"is",
"not",
"None",
"else",
"True",
"for",
"i",
"in",
"range",
"(",
"structure",
".",
"shape",
"[",
"0",
"]",
")",
":",
"off_i",
"=",
"i",
"-",
"structure_half_shape",
"[",
"0",
"]",
"for",
"j",
"in",
"range",
"(",
"structure",
".",
"shape",
"[",
"1",
"]",
")",
":",
"if",
"i",
"==",
"structure_half_shape",
"[",
"0",
"]",
"and",
"j",
"==",
"structure_half_shape",
"[",
"1",
"]",
":",
"continue",
"off_j",
"=",
"j",
"-",
"structure_half_shape",
"[",
"1",
"]",
"if",
"structure",
"[",
"i",
",",
"j",
"]",
":",
"result",
"=",
"np",
".",
"logical_and",
"(",
"result",
",",
"big_mask",
"[",
"i",
":",
"i",
"+",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"j",
":",
"j",
"+",
"image",
".",
"shape",
"[",
"1",
"]",
"]",
")",
"#",
"# Get the boundaries of the source image and the offset",
"# image so we can make similarly shaped, but offset slices",
"#",
"src_i_min",
"=",
"max",
"(",
"0",
",",
"-",
"off_i",
")",
"src_i_max",
"=",
"min",
"(",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"image",
".",
"shape",
"[",
"0",
"]",
"-",
"off_i",
")",
"off_i_min",
"=",
"max",
"(",
"0",
",",
"off_i",
")",
"off_i_max",
"=",
"min",
"(",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"image",
".",
"shape",
"[",
"0",
"]",
"+",
"off_i",
")",
"src_j_min",
"=",
"max",
"(",
"0",
",",
"-",
"off_j",
")",
"src_j_max",
"=",
"min",
"(",
"image",
".",
"shape",
"[",
"1",
"]",
",",
"image",
".",
"shape",
"[",
"1",
"]",
"-",
"off_j",
")",
"off_j_min",
"=",
"max",
"(",
"0",
",",
"off_j",
")",
"off_j_max",
"=",
"min",
"(",
"image",
".",
"shape",
"[",
"1",
"]",
",",
"image",
".",
"shape",
"[",
"1",
"]",
"+",
"off_j",
")",
"min_mask",
"=",
"(",
"image",
"[",
"src_i_min",
":",
"src_i_max",
",",
"src_j_min",
":",
"src_j_max",
"]",
"<",
"image",
"[",
"off_i_min",
":",
"off_i_max",
",",
"off_j_min",
":",
"off_j_max",
"]",
")",
"result",
"[",
"src_i_min",
":",
"src_i_max",
",",
"src_j_min",
":",
"src_j_max",
"]",
"[",
"min_mask",
"]",
"=",
"False",
"return",
"result"
] | 47.218391 | 21.057471 |
def update(self, *args, **kw):
'''
Update the dictionary with items and names::
(items, names, **kw)
(dict, names, **kw)
(MIDict, names, **kw)
Optional positional argument ``names`` is only allowed when ``self.indices``
is empty (no indices are set yet).
'''
if len(args) > 1 and self.indices:
raise ValueError('Only one positional argument is allowed when the'
'index names are already set.')
if not self.indices: # empty; init again
_MI_init(self, *args, **kw)
return
d = MIMapping(*args, **kw)
if not d.indices:
return
names = force_list(self.indices.keys())
if len(d.indices) != len(names):
raise ValueError('Length of update items (%s) does not match '
'length of original items (%s)' %
(len(d.indices), len(names)))
for key in d:
# use __setitem__() to handle duplicate
self[key] = d[key] | [
"def",
"update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"1",
"and",
"self",
".",
"indices",
":",
"raise",
"ValueError",
"(",
"'Only one positional argument is allowed when the'",
"'index names are already set.'",
")",
"if",
"not",
"self",
".",
"indices",
":",
"# empty; init again",
"_MI_init",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"d",
"=",
"MIMapping",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"if",
"not",
"d",
".",
"indices",
":",
"return",
"names",
"=",
"force_list",
"(",
"self",
".",
"indices",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"d",
".",
"indices",
")",
"!=",
"len",
"(",
"names",
")",
":",
"raise",
"ValueError",
"(",
"'Length of update items (%s) does not match '",
"'length of original items (%s)'",
"%",
"(",
"len",
"(",
"d",
".",
"indices",
")",
",",
"len",
"(",
"names",
")",
")",
")",
"for",
"key",
"in",
"d",
":",
"# use __setitem__() to handle duplicate",
"self",
"[",
"key",
"]",
"=",
"d",
"[",
"key",
"]"
] | 32.515152 | 20.818182 |
def read_config(cls, configparser):
"""Read configuration file options."""
config = dict()
section = cls.__name__
option = "prefixes"
if configparser.has_option(section, option):
value = configparser.get(section, option)
names = [x.strip().lower() for x in value.split(",")]
else:
names = []
config[option] = names
return config | [
"def",
"read_config",
"(",
"cls",
",",
"configparser",
")",
":",
"config",
"=",
"dict",
"(",
")",
"section",
"=",
"cls",
".",
"__name__",
"option",
"=",
"\"prefixes\"",
"if",
"configparser",
".",
"has_option",
"(",
"section",
",",
"option",
")",
":",
"value",
"=",
"configparser",
".",
"get",
"(",
"section",
",",
"option",
")",
"names",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"value",
".",
"split",
"(",
"\",\"",
")",
"]",
"else",
":",
"names",
"=",
"[",
"]",
"config",
"[",
"option",
"]",
"=",
"names",
"return",
"config"
] | 34.75 | 14.083333 |
def render(self, template, **kwargs):
"""Renders the template
:param template: The template to render.
The template is actually a file, which is usually generated
by :class:`rtcclient.template.Templater.getTemplate`
and can also be modified by user accordingly.
:param kwargs: The `kwargs` dict is used to fill the template.
These two parameter are mandatory:
* description
* title
Some of below parameters (which may not be included in some
customized workitem type ) are mandatory if `keep` (parameter in
:class:`rtcclient.template.Templater.getTemplate`) is set to
`False`; Optional for otherwise.
* teamArea (Team Area)
* ownedBy (Owned By)
* plannedFor(Planned For)
* severity(Severity)
* priority(Priority)
* filedAgainst(Filed Against)
Actually all these needed keywords/attributes/fields can be
retrieved by :class:`rtcclient.template.Templater.listFields`
:return: the :class:`string` object
:rtype: string
"""
try:
temp = self.environment.get_template(template)
return temp.render(**kwargs)
except AttributeError:
err_msg = "Invalid value for 'template'"
self.log.error(err_msg)
raise exception.BadValue(err_msg) | [
"def",
"render",
"(",
"self",
",",
"template",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"temp",
"=",
"self",
".",
"environment",
".",
"get_template",
"(",
"template",
")",
"return",
"temp",
".",
"render",
"(",
"*",
"*",
"kwargs",
")",
"except",
"AttributeError",
":",
"err_msg",
"=",
"\"Invalid value for 'template'\"",
"self",
".",
"log",
".",
"error",
"(",
"err_msg",
")",
"raise",
"exception",
".",
"BadValue",
"(",
"err_msg",
")"
] | 37.512821 | 18.282051 |
def get_branch(self, i):
"""Gets a branch associated with leaf i. This will trace the tree
from the leaves down to the root, constructing a list of tuples that
represent the pairs of nodes all the way from leaf i to the root.
:param i: the leaf identifying the branch to retrieve
"""
branch = MerkleBranch(self.order)
j = i + 2 ** self.order - 1
for k in range(0, self.order):
if (self.is_left(j)):
branch.set_row(k, (self.nodes[j], self.nodes[j + 1]))
else:
branch.set_row(k, (self.nodes[j - 1], self.nodes[j]))
j = MerkleTree.get_parent(j)
return branch | [
"def",
"get_branch",
"(",
"self",
",",
"i",
")",
":",
"branch",
"=",
"MerkleBranch",
"(",
"self",
".",
"order",
")",
"j",
"=",
"i",
"+",
"2",
"**",
"self",
".",
"order",
"-",
"1",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"self",
".",
"order",
")",
":",
"if",
"(",
"self",
".",
"is_left",
"(",
"j",
")",
")",
":",
"branch",
".",
"set_row",
"(",
"k",
",",
"(",
"self",
".",
"nodes",
"[",
"j",
"]",
",",
"self",
".",
"nodes",
"[",
"j",
"+",
"1",
"]",
")",
")",
"else",
":",
"branch",
".",
"set_row",
"(",
"k",
",",
"(",
"self",
".",
"nodes",
"[",
"j",
"-",
"1",
"]",
",",
"self",
".",
"nodes",
"[",
"j",
"]",
")",
")",
"j",
"=",
"MerkleTree",
".",
"get_parent",
"(",
"j",
")",
"return",
"branch"
] | 37.888889 | 18.944444 |
def writeObject(self, obj, is_proxy=False):
"""
Writes an object to the stream.
"""
if self.use_proxies and not is_proxy:
self.writeProxy(obj)
return
self.stream.write(TYPE_OBJECT)
ref = self.context.getObjectReference(obj)
if ref != -1:
self._writeInteger(ref << 1)
return
self.context.addObject(obj)
# object is not referenced, serialise it
kls = obj.__class__
definition = self.context.getClass(kls)
alias = None
class_ref = False # if the class definition is a reference
if definition:
class_ref = True
alias = definition.alias
else:
alias = self.context.getClassAlias(kls)
definition = ClassDefinition(alias)
self.context.addClass(definition, alias.klass)
if class_ref:
self.stream.write(definition.reference)
else:
ref = 0
if definition.encoding != ObjectEncoding.EXTERNAL:
ref += definition.attr_len << 4
final_reference = encode_int(ref | definition.encoding << 2 |
REFERENCE_BIT << 1 | REFERENCE_BIT)
self.stream.write(final_reference)
definition.reference = encode_int(
definition.reference << 2 | REFERENCE_BIT)
if alias.anonymous:
self.stream.write('\x01')
else:
self.serialiseString(alias.alias)
# work out what the final reference for the class will be.
# this is okay because the next time an object of the same
# class is encoded, class_ref will be True and never get here
# again.
if alias.external:
obj.__writeamf__(DataOutput(self))
return
attrs = alias.getEncodableAttributes(obj, codec=self)
if alias.static_attrs:
if not class_ref:
[self.serialiseString(attr) for attr in alias.static_attrs]
for attr in alias.static_attrs:
value = attrs.pop(attr)
self.writeElement(value)
if definition.encoding == ObjectEncoding.STATIC:
return
if definition.encoding == ObjectEncoding.DYNAMIC:
if attrs:
for attr, value in attrs.iteritems():
if type(attr) in python.int_types:
attr = str(attr)
self.serialiseString(attr)
self.writeElement(value)
self.stream.write('\x01') | [
"def",
"writeObject",
"(",
"self",
",",
"obj",
",",
"is_proxy",
"=",
"False",
")",
":",
"if",
"self",
".",
"use_proxies",
"and",
"not",
"is_proxy",
":",
"self",
".",
"writeProxy",
"(",
"obj",
")",
"return",
"self",
".",
"stream",
".",
"write",
"(",
"TYPE_OBJECT",
")",
"ref",
"=",
"self",
".",
"context",
".",
"getObjectReference",
"(",
"obj",
")",
"if",
"ref",
"!=",
"-",
"1",
":",
"self",
".",
"_writeInteger",
"(",
"ref",
"<<",
"1",
")",
"return",
"self",
".",
"context",
".",
"addObject",
"(",
"obj",
")",
"# object is not referenced, serialise it",
"kls",
"=",
"obj",
".",
"__class__",
"definition",
"=",
"self",
".",
"context",
".",
"getClass",
"(",
"kls",
")",
"alias",
"=",
"None",
"class_ref",
"=",
"False",
"# if the class definition is a reference",
"if",
"definition",
":",
"class_ref",
"=",
"True",
"alias",
"=",
"definition",
".",
"alias",
"else",
":",
"alias",
"=",
"self",
".",
"context",
".",
"getClassAlias",
"(",
"kls",
")",
"definition",
"=",
"ClassDefinition",
"(",
"alias",
")",
"self",
".",
"context",
".",
"addClass",
"(",
"definition",
",",
"alias",
".",
"klass",
")",
"if",
"class_ref",
":",
"self",
".",
"stream",
".",
"write",
"(",
"definition",
".",
"reference",
")",
"else",
":",
"ref",
"=",
"0",
"if",
"definition",
".",
"encoding",
"!=",
"ObjectEncoding",
".",
"EXTERNAL",
":",
"ref",
"+=",
"definition",
".",
"attr_len",
"<<",
"4",
"final_reference",
"=",
"encode_int",
"(",
"ref",
"|",
"definition",
".",
"encoding",
"<<",
"2",
"|",
"REFERENCE_BIT",
"<<",
"1",
"|",
"REFERENCE_BIT",
")",
"self",
".",
"stream",
".",
"write",
"(",
"final_reference",
")",
"definition",
".",
"reference",
"=",
"encode_int",
"(",
"definition",
".",
"reference",
"<<",
"2",
"|",
"REFERENCE_BIT",
")",
"if",
"alias",
".",
"anonymous",
":",
"self",
".",
"stream",
".",
"write",
"(",
"'\\x01'",
")",
"else",
":",
"self",
".",
"serialiseString",
"(",
"alias",
".",
"alias",
")",
"# work out what the final reference for the class will be.",
"# this is okay because the next time an object of the same",
"# class is encoded, class_ref will be True and never get here",
"# again.",
"if",
"alias",
".",
"external",
":",
"obj",
".",
"__writeamf__",
"(",
"DataOutput",
"(",
"self",
")",
")",
"return",
"attrs",
"=",
"alias",
".",
"getEncodableAttributes",
"(",
"obj",
",",
"codec",
"=",
"self",
")",
"if",
"alias",
".",
"static_attrs",
":",
"if",
"not",
"class_ref",
":",
"[",
"self",
".",
"serialiseString",
"(",
"attr",
")",
"for",
"attr",
"in",
"alias",
".",
"static_attrs",
"]",
"for",
"attr",
"in",
"alias",
".",
"static_attrs",
":",
"value",
"=",
"attrs",
".",
"pop",
"(",
"attr",
")",
"self",
".",
"writeElement",
"(",
"value",
")",
"if",
"definition",
".",
"encoding",
"==",
"ObjectEncoding",
".",
"STATIC",
":",
"return",
"if",
"definition",
".",
"encoding",
"==",
"ObjectEncoding",
".",
"DYNAMIC",
":",
"if",
"attrs",
":",
"for",
"attr",
",",
"value",
"in",
"attrs",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"(",
"attr",
")",
"in",
"python",
".",
"int_types",
":",
"attr",
"=",
"str",
"(",
"attr",
")",
"self",
".",
"serialiseString",
"(",
"attr",
")",
"self",
".",
"writeElement",
"(",
"value",
")",
"self",
".",
"stream",
".",
"write",
"(",
"'\\x01'",
")"
] | 28.477778 | 20.9 |
def _other_to_lon(func):
"""Wrapper for casting Longitude operator arguments to Longitude"""
def func_other_to_lon(obj, other):
return func(obj, _maybe_cast_to_lon(other))
return func_other_to_lon | [
"def",
"_other_to_lon",
"(",
"func",
")",
":",
"def",
"func_other_to_lon",
"(",
"obj",
",",
"other",
")",
":",
"return",
"func",
"(",
"obj",
",",
"_maybe_cast_to_lon",
"(",
"other",
")",
")",
"return",
"func_other_to_lon"
] | 42.4 | 8.2 |
def get_top_war_clans(self, country_key='', **params: keys):
"""Get a list of top clans by war
location_id: Optional[str] = ''
A location ID or '' (global)
See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json
for a list of acceptable location IDs
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOP + '/war/' + str(country_key)
return self._get_model(url, PartialClan, **params) | [
"def",
"get_top_war_clans",
"(",
"self",
",",
"country_key",
"=",
"''",
",",
"*",
"*",
"params",
":",
"keys",
")",
":",
"url",
"=",
"self",
".",
"api",
".",
"TOP",
"+",
"'/war/'",
"+",
"str",
"(",
"country_key",
")",
"return",
"self",
".",
"_get_model",
"(",
"url",
",",
"PartialClan",
",",
"*",
"*",
"params",
")"
] | 42.521739 | 13.304348 |
def set_font_size(self, size):
"""Convenience method for just changing font size."""
if self.font.font_size == size:
pass
else:
self.font._set_size(size) | [
"def",
"set_font_size",
"(",
"self",
",",
"size",
")",
":",
"if",
"self",
".",
"font",
".",
"font_size",
"==",
"size",
":",
"pass",
"else",
":",
"self",
".",
"font",
".",
"_set_size",
"(",
"size",
")"
] | 33.5 | 10.166667 |
def _expectation(p, mean, none, kern, feat, nghp=None):
"""
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,} :: Linear kernel
or the equivalent for MarkovGaussian
:return: NxDxM
"""
return tf.matrix_transpose(expectation(p, (kern, feat), mean)) | [
"def",
"_expectation",
"(",
"p",
",",
"mean",
",",
"none",
",",
"kern",
",",
"feat",
",",
"nghp",
"=",
"None",
")",
":",
"return",
"tf",
".",
"matrix_transpose",
"(",
"expectation",
"(",
"p",
",",
"(",
"kern",
",",
"feat",
")",
",",
"mean",
")",
")"
] | 29.8 | 12.6 |
def set_items(self, items):
""" Defer until later so the view is only updated after all items
are added.
"""
self._pending_view_refreshes +=1
timed_call(self._pending_timeout, self._refresh_layout) | [
"def",
"set_items",
"(",
"self",
",",
"items",
")",
":",
"self",
".",
"_pending_view_refreshes",
"+=",
"1",
"timed_call",
"(",
"self",
".",
"_pending_timeout",
",",
"self",
".",
"_refresh_layout",
")"
] | 34.428571 | 12.714286 |
def _datapaths(self):
"""Returns a simple key-value map for easy access to data paths"""
paths = { }
try:
data = self._config['data']
for k in data:
paths[k] = data[k]['path']
except KeyError as e:
raise AitConfigMissing(e.message)
except Exception as e:
raise AitConfigError('Error reading data paths: %s' % e)
return paths | [
"def",
"_datapaths",
"(",
"self",
")",
":",
"paths",
"=",
"{",
"}",
"try",
":",
"data",
"=",
"self",
".",
"_config",
"[",
"'data'",
"]",
"for",
"k",
"in",
"data",
":",
"paths",
"[",
"k",
"]",
"=",
"data",
"[",
"k",
"]",
"[",
"'path'",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"AitConfigMissing",
"(",
"e",
".",
"message",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"AitConfigError",
"(",
"'Error reading data paths: %s'",
"%",
"e",
")",
"return",
"paths"
] | 32.692308 | 15.307692 |
def read_cell_array(fd, endian, header):
"""Read a cell array.
Returns an array with rows of the cell array.
"""
array = [list() for i in range(header['dims'][0])]
for row in range(header['dims'][0]):
for col in range(header['dims'][1]):
# read the matrix header and array
vheader, next_pos, fd_var = read_var_header(fd, endian)
varray = read_var_array(fd_var, endian, vheader)
array[row].append(varray)
# move on to next field
fd.seek(next_pos)
# pack and return the array
if header['dims'][0] == 1:
return squeeze(array[0])
return squeeze(array) | [
"def",
"read_cell_array",
"(",
"fd",
",",
"endian",
",",
"header",
")",
":",
"array",
"=",
"[",
"list",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"header",
"[",
"'dims'",
"]",
"[",
"0",
"]",
")",
"]",
"for",
"row",
"in",
"range",
"(",
"header",
"[",
"'dims'",
"]",
"[",
"0",
"]",
")",
":",
"for",
"col",
"in",
"range",
"(",
"header",
"[",
"'dims'",
"]",
"[",
"1",
"]",
")",
":",
"# read the matrix header and array",
"vheader",
",",
"next_pos",
",",
"fd_var",
"=",
"read_var_header",
"(",
"fd",
",",
"endian",
")",
"varray",
"=",
"read_var_array",
"(",
"fd_var",
",",
"endian",
",",
"vheader",
")",
"array",
"[",
"row",
"]",
".",
"append",
"(",
"varray",
")",
"# move on to next field",
"fd",
".",
"seek",
"(",
"next_pos",
")",
"# pack and return the array",
"if",
"header",
"[",
"'dims'",
"]",
"[",
"0",
"]",
"==",
"1",
":",
"return",
"squeeze",
"(",
"array",
"[",
"0",
"]",
")",
"return",
"squeeze",
"(",
"array",
")"
] | 38.294118 | 8.294118 |
def notify_task(self, task_id, **kwargs):
"""
Notify PNC about a BPM task event. Accepts polymorphic JSON {\"eventType\": \"string\"} based on \"eventType\" field.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.notify_task(task_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int task_id: BPM task ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.notify_task_with_http_info(task_id, **kwargs)
else:
(data) = self.notify_task_with_http_info(task_id, **kwargs)
return data | [
"def",
"notify_task",
"(",
"self",
",",
"task_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'callback'",
")",
":",
"return",
"self",
".",
"notify_task_with_http_info",
"(",
"task_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"notify_task_with_http_info",
"(",
"task_id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 43.28 | 19.84 |
def sodium_pad(s, blocksize):
"""
Pad the input bytearray ``s`` to a multiple of ``blocksize``
using the ISO/IEC 7816-4 algorithm
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: padded string
:rtype: bytes
"""
ensure(isinstance(s, bytes),
raising=exc.TypeError)
ensure(isinstance(blocksize, integer_types),
raising=exc.TypeError)
if blocksize <= 0:
raise exc.ValueError
s_len = len(s)
m_len = s_len + blocksize
buf = ffi.new("unsigned char []", m_len)
p_len = ffi.new("size_t []", 1)
ffi.memmove(buf, s, s_len)
rc = lib.sodium_pad(p_len, buf, s_len, blocksize, m_len)
ensure(rc == 0, "Padding failure", raising=exc.CryptoError)
return ffi.buffer(buf, p_len[0])[:] | [
"def",
"sodium_pad",
"(",
"s",
",",
"blocksize",
")",
":",
"ensure",
"(",
"isinstance",
"(",
"s",
",",
"bytes",
")",
",",
"raising",
"=",
"exc",
".",
"TypeError",
")",
"ensure",
"(",
"isinstance",
"(",
"blocksize",
",",
"integer_types",
")",
",",
"raising",
"=",
"exc",
".",
"TypeError",
")",
"if",
"blocksize",
"<=",
"0",
":",
"raise",
"exc",
".",
"ValueError",
"s_len",
"=",
"len",
"(",
"s",
")",
"m_len",
"=",
"s_len",
"+",
"blocksize",
"buf",
"=",
"ffi",
".",
"new",
"(",
"\"unsigned char []\"",
",",
"m_len",
")",
"p_len",
"=",
"ffi",
".",
"new",
"(",
"\"size_t []\"",
",",
"1",
")",
"ffi",
".",
"memmove",
"(",
"buf",
",",
"s",
",",
"s_len",
")",
"rc",
"=",
"lib",
".",
"sodium_pad",
"(",
"p_len",
",",
"buf",
",",
"s_len",
",",
"blocksize",
",",
"m_len",
")",
"ensure",
"(",
"rc",
"==",
"0",
",",
"\"Padding failure\"",
",",
"raising",
"=",
"exc",
".",
"CryptoError",
")",
"return",
"ffi",
".",
"buffer",
"(",
"buf",
",",
"p_len",
"[",
"0",
"]",
")",
"[",
":",
"]"
] | 30.653846 | 12.884615 |
def set_exc_info(self, exc_info):
"""Sets the exception information of a ``Future.``
Preserves tracebacks on Python 2.
.. versionadded:: 4.0
"""
self._exc_info = exc_info
self._log_traceback = True
if not _GC_CYCLE_FINALIZERS:
self._tb_logger = _TracebackLogger(exc_info)
try:
self._set_done()
finally:
# Activate the logger after all callbacks have had a
# chance to call result() or exception().
if self._log_traceback and self._tb_logger is not None:
self._tb_logger.activate()
self._exc_info = exc_info | [
"def",
"set_exc_info",
"(",
"self",
",",
"exc_info",
")",
":",
"self",
".",
"_exc_info",
"=",
"exc_info",
"self",
".",
"_log_traceback",
"=",
"True",
"if",
"not",
"_GC_CYCLE_FINALIZERS",
":",
"self",
".",
"_tb_logger",
"=",
"_TracebackLogger",
"(",
"exc_info",
")",
"try",
":",
"self",
".",
"_set_done",
"(",
")",
"finally",
":",
"# Activate the logger after all callbacks have had a",
"# chance to call result() or exception().",
"if",
"self",
".",
"_log_traceback",
"and",
"self",
".",
"_tb_logger",
"is",
"not",
"None",
":",
"self",
".",
"_tb_logger",
".",
"activate",
"(",
")",
"self",
".",
"_exc_info",
"=",
"exc_info"
] | 32.3 | 15.45 |
def smart_content_type_for_model(model):
"""
Returns the Django ContentType for a given model. If model is a proxy model, the proxy model's ContentType will
be returned. This differs from Django's standard behavior - the default behavior is to return the parent
ContentType for proxy models.
"""
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from django.contrib.contenttypes.models import ContentType
except ImportError:
print("Django is required but cannot be imported.")
raise
if model._meta.proxy:
return ContentType.objects.get(app_label=model._meta.app_label,
model=model._meta.object_name.lower())
else:
return ContentType.objects.get_for_model(model) | [
"def",
"smart_content_type_for_model",
"(",
"model",
")",
":",
"try",
":",
"# noinspection PyPackageRequirements,PyUnresolvedReferences",
"from",
"django",
".",
"contrib",
".",
"contenttypes",
".",
"models",
"import",
"ContentType",
"except",
"ImportError",
":",
"print",
"(",
"\"Django is required but cannot be imported.\"",
")",
"raise",
"if",
"model",
".",
"_meta",
".",
"proxy",
":",
"return",
"ContentType",
".",
"objects",
".",
"get",
"(",
"app_label",
"=",
"model",
".",
"_meta",
".",
"app_label",
",",
"model",
"=",
"model",
".",
"_meta",
".",
"object_name",
".",
"lower",
"(",
")",
")",
"else",
":",
"return",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"model",
")"
] | 43.5 | 25.944444 |
def lambda_handler(event, context):
'''Demonstrates a simple HTTP endpoint using API Gateway. You have full
access to the request and response payload, including headers and
status code.
TableName provided by template.yaml.
To scan a DynamoDB table, make a GET request with optional query string parameter.
To put, update, or delete an item, make a POST, PUT, or DELETE request respectively,
passing in the payload to the DynamoDB API as a JSON body.
'''
print("Received event: " + json.dumps(event, indent=2))
operations = {
'DELETE': lambda dynamo, x: dynamo.delete_item(TableName=table_name, **x),
'GET': lambda dynamo, x: dynamo.scan(TableName=table_name, **x) if x else dynamo.scan(TableName=table_name),
'POST': lambda dynamo, x: dynamo.put_item(TableName=table_name, **x),
'PUT': lambda dynamo, x: dynamo.update_item(TableName=table_name, **x),
}
operation = event['httpMethod']
if operation in operations:
payload = event['queryStringParameters'] if operation == 'GET' else json.loads(event['body'])
return respond(None, operations[operation](dynamo, payload))
else:
return respond(ValueError('Unsupported method "{}"'.format(operation))) | [
"def",
"lambda_handler",
"(",
"event",
",",
"context",
")",
":",
"print",
"(",
"\"Received event: \"",
"+",
"json",
".",
"dumps",
"(",
"event",
",",
"indent",
"=",
"2",
")",
")",
"operations",
"=",
"{",
"'DELETE'",
":",
"lambda",
"dynamo",
",",
"x",
":",
"dynamo",
".",
"delete_item",
"(",
"TableName",
"=",
"table_name",
",",
"*",
"*",
"x",
")",
",",
"'GET'",
":",
"lambda",
"dynamo",
",",
"x",
":",
"dynamo",
".",
"scan",
"(",
"TableName",
"=",
"table_name",
",",
"*",
"*",
"x",
")",
"if",
"x",
"else",
"dynamo",
".",
"scan",
"(",
"TableName",
"=",
"table_name",
")",
",",
"'POST'",
":",
"lambda",
"dynamo",
",",
"x",
":",
"dynamo",
".",
"put_item",
"(",
"TableName",
"=",
"table_name",
",",
"*",
"*",
"x",
")",
",",
"'PUT'",
":",
"lambda",
"dynamo",
",",
"x",
":",
"dynamo",
".",
"update_item",
"(",
"TableName",
"=",
"table_name",
",",
"*",
"*",
"x",
")",
",",
"}",
"operation",
"=",
"event",
"[",
"'httpMethod'",
"]",
"if",
"operation",
"in",
"operations",
":",
"payload",
"=",
"event",
"[",
"'queryStringParameters'",
"]",
"if",
"operation",
"==",
"'GET'",
"else",
"json",
".",
"loads",
"(",
"event",
"[",
"'body'",
"]",
")",
"return",
"respond",
"(",
"None",
",",
"operations",
"[",
"operation",
"]",
"(",
"dynamo",
",",
"payload",
")",
")",
"else",
":",
"return",
"respond",
"(",
"ValueError",
"(",
"'Unsupported method \"{}\"'",
".",
"format",
"(",
"operation",
")",
")",
")"
] | 47 | 32.153846 |
def add_single_feature_methods(cls):
"""Custom decorator intended for :class:`~vision.helpers.VisionHelpers`.
This metaclass adds a `{feature}` method for every feature
defined on the Feature enum.
"""
# Sanity check: This only makes sense if we are building the GAPIC
# subclass and have enums already attached.
if not hasattr(cls, "enums"):
return cls
# Add each single-feature method to the class.
for feature in cls.enums.Feature.Type:
# Sanity check: Do not make a method for the falsy feature.
if feature.name == "TYPE_UNSPECIFIED":
continue
# Assign the appropriate metadata to the function.
detect = _create_single_feature_method(feature)
# Assign a qualified name to the function, and perform module
# replacement on the docstring.
detect.__qualname__ = "{cls}.{name}".format(
cls=cls.__name__, name=detect.__name__
)
detect.__doc__ = detect.__doc__.format(module=cls.__module__)
# Place the function on the class being created.
setattr(cls, detect.__name__, detect)
# Done; return the class.
return cls | [
"def",
"add_single_feature_methods",
"(",
"cls",
")",
":",
"# Sanity check: This only makes sense if we are building the GAPIC",
"# subclass and have enums already attached.",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"\"enums\"",
")",
":",
"return",
"cls",
"# Add each single-feature method to the class.",
"for",
"feature",
"in",
"cls",
".",
"enums",
".",
"Feature",
".",
"Type",
":",
"# Sanity check: Do not make a method for the falsy feature.",
"if",
"feature",
".",
"name",
"==",
"\"TYPE_UNSPECIFIED\"",
":",
"continue",
"# Assign the appropriate metadata to the function.",
"detect",
"=",
"_create_single_feature_method",
"(",
"feature",
")",
"# Assign a qualified name to the function, and perform module",
"# replacement on the docstring.",
"detect",
".",
"__qualname__",
"=",
"\"{cls}.{name}\"",
".",
"format",
"(",
"cls",
"=",
"cls",
".",
"__name__",
",",
"name",
"=",
"detect",
".",
"__name__",
")",
"detect",
".",
"__doc__",
"=",
"detect",
".",
"__doc__",
".",
"format",
"(",
"module",
"=",
"cls",
".",
"__module__",
")",
"# Place the function on the class being created.",
"setattr",
"(",
"cls",
",",
"detect",
".",
"__name__",
",",
"detect",
")",
"# Done; return the class.",
"return",
"cls"
] | 35.96875 | 19 |
def convert_to_xml(cls, value):
"""
Convert signed angle float like -42.42 to int 60000 per degree,
normalized to positive value.
"""
# modulo normalizes negative and >360 degree values
rot = int(round(value * cls.DEGREE_INCREMENTS)) % cls.THREE_SIXTY
return str(rot) | [
"def",
"convert_to_xml",
"(",
"cls",
",",
"value",
")",
":",
"# modulo normalizes negative and >360 degree values",
"rot",
"=",
"int",
"(",
"round",
"(",
"value",
"*",
"cls",
".",
"DEGREE_INCREMENTS",
")",
")",
"%",
"cls",
".",
"THREE_SIXTY",
"return",
"str",
"(",
"rot",
")"
] | 39.5 | 14 |
def from_prefix(cls, container, prefix):
"""Create from prefix object."""
if prefix is None:
raise errors.NoObjectException
return cls(container,
name=prefix.name,
obj_type=cls.type_cls.SUBDIR) | [
"def",
"from_prefix",
"(",
"cls",
",",
"container",
",",
"prefix",
")",
":",
"if",
"prefix",
"is",
"None",
":",
"raise",
"errors",
".",
"NoObjectException",
"return",
"cls",
"(",
"container",
",",
"name",
"=",
"prefix",
".",
"name",
",",
"obj_type",
"=",
"cls",
".",
"type_cls",
".",
"SUBDIR",
")"
] | 32.625 | 9.875 |
def get_token(self):
"""Get an authentication token."""
# Generate string formatted timestamps for not_before and not_after,
# for the lifetime specified in minutes.
now = datetime.datetime.utcnow()
# Start the not_before time x minutes in the past, to avoid clock skew
# issues.
_not_before = now - datetime.timedelta(minutes=TOKEN_SKEW)
not_before = _not_before.strftime(TIME_FORMAT)
# Set the not_after time in the future, by the lifetime, but ensure the
# skew we applied to not_before is taken into account.
_not_after = now + datetime.timedelta(
minutes=self.token_lifetime - TOKEN_SKEW
)
not_after = _not_after.strftime(TIME_FORMAT)
# Generate a json string for the encryption payload contents.
payload = json.dumps({
'not_before': not_before,
'not_after': not_after
})
token = self._get_cached_token()
if token:
return token
# Generate a base64 encoded KMS encrypted token to use for
# authentication. We encrypt the token lifetime information as the
# payload for verification in Confidant.
try:
token = self.kms_client.encrypt(
KeyId=self.auth_key,
Plaintext=payload,
EncryptionContext=self.auth_context
)['CiphertextBlob']
token = base64.b64encode(ensure_bytes(token))
except (ConnectionError, EndpointConnectionError) as e:
logging.exception('Failure connecting to AWS: {}'.format(str(e)))
raise ServiceConnectionError()
except Exception:
logging.exception('Failed to create auth token.')
raise TokenGenerationError()
self._cache_token(token, not_after)
return token | [
"def",
"get_token",
"(",
"self",
")",
":",
"# Generate string formatted timestamps for not_before and not_after,",
"# for the lifetime specified in minutes.",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"# Start the not_before time x minutes in the past, to avoid clock skew",
"# issues.",
"_not_before",
"=",
"now",
"-",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"TOKEN_SKEW",
")",
"not_before",
"=",
"_not_before",
".",
"strftime",
"(",
"TIME_FORMAT",
")",
"# Set the not_after time in the future, by the lifetime, but ensure the",
"# skew we applied to not_before is taken into account.",
"_not_after",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"self",
".",
"token_lifetime",
"-",
"TOKEN_SKEW",
")",
"not_after",
"=",
"_not_after",
".",
"strftime",
"(",
"TIME_FORMAT",
")",
"# Generate a json string for the encryption payload contents.",
"payload",
"=",
"json",
".",
"dumps",
"(",
"{",
"'not_before'",
":",
"not_before",
",",
"'not_after'",
":",
"not_after",
"}",
")",
"token",
"=",
"self",
".",
"_get_cached_token",
"(",
")",
"if",
"token",
":",
"return",
"token",
"# Generate a base64 encoded KMS encrypted token to use for",
"# authentication. We encrypt the token lifetime information as the",
"# payload for verification in Confidant.",
"try",
":",
"token",
"=",
"self",
".",
"kms_client",
".",
"encrypt",
"(",
"KeyId",
"=",
"self",
".",
"auth_key",
",",
"Plaintext",
"=",
"payload",
",",
"EncryptionContext",
"=",
"self",
".",
"auth_context",
")",
"[",
"'CiphertextBlob'",
"]",
"token",
"=",
"base64",
".",
"b64encode",
"(",
"ensure_bytes",
"(",
"token",
")",
")",
"except",
"(",
"ConnectionError",
",",
"EndpointConnectionError",
")",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"'Failure connecting to AWS: {}'",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"raise",
"ServiceConnectionError",
"(",
")",
"except",
"Exception",
":",
"logging",
".",
"exception",
"(",
"'Failed to create auth token.'",
")",
"raise",
"TokenGenerationError",
"(",
")",
"self",
".",
"_cache_token",
"(",
"token",
",",
"not_after",
")",
"return",
"token"
] | 44.536585 | 16.390244 |
def set_config_for_routing_entity(
self,
routing_entity: Union[web.Resource, web.StaticResource,
web.ResourceRoute],
config):
"""Record configuration for resource or it's route."""
if isinstance(routing_entity, (web.Resource, web.StaticResource)):
resource = routing_entity
# Add resource configuration or fail if it's already added.
if resource in self._resource_config:
raise ValueError(
"CORS is already configured for {!r} resource.".format(
resource))
self._resource_config[resource] = _ResourceConfig(
default_config=config)
elif isinstance(routing_entity, web.ResourceRoute):
route = routing_entity
# Add resource's route configuration or fail if it's already added.
if route.resource not in self._resource_config:
self.set_config_for_routing_entity(route.resource, config)
if route.resource not in self._resource_config:
raise ValueError(
"Can't setup CORS for {!r} request, "
"CORS must be enabled for route's resource first.".format(
route))
resource_config = self._resource_config[route.resource]
if route.method in resource_config.method_config:
raise ValueError(
"Can't setup CORS for {!r} route: CORS already "
"configured on resource {!r} for {} method".format(
route, route.resource, route.method))
resource_config.method_config[route.method] = config
else:
raise ValueError(
"Resource or ResourceRoute expected, got {!r}".format(
routing_entity)) | [
"def",
"set_config_for_routing_entity",
"(",
"self",
",",
"routing_entity",
":",
"Union",
"[",
"web",
".",
"Resource",
",",
"web",
".",
"StaticResource",
",",
"web",
".",
"ResourceRoute",
"]",
",",
"config",
")",
":",
"if",
"isinstance",
"(",
"routing_entity",
",",
"(",
"web",
".",
"Resource",
",",
"web",
".",
"StaticResource",
")",
")",
":",
"resource",
"=",
"routing_entity",
"# Add resource configuration or fail if it's already added.",
"if",
"resource",
"in",
"self",
".",
"_resource_config",
":",
"raise",
"ValueError",
"(",
"\"CORS is already configured for {!r} resource.\"",
".",
"format",
"(",
"resource",
")",
")",
"self",
".",
"_resource_config",
"[",
"resource",
"]",
"=",
"_ResourceConfig",
"(",
"default_config",
"=",
"config",
")",
"elif",
"isinstance",
"(",
"routing_entity",
",",
"web",
".",
"ResourceRoute",
")",
":",
"route",
"=",
"routing_entity",
"# Add resource's route configuration or fail if it's already added.",
"if",
"route",
".",
"resource",
"not",
"in",
"self",
".",
"_resource_config",
":",
"self",
".",
"set_config_for_routing_entity",
"(",
"route",
".",
"resource",
",",
"config",
")",
"if",
"route",
".",
"resource",
"not",
"in",
"self",
".",
"_resource_config",
":",
"raise",
"ValueError",
"(",
"\"Can't setup CORS for {!r} request, \"",
"\"CORS must be enabled for route's resource first.\"",
".",
"format",
"(",
"route",
")",
")",
"resource_config",
"=",
"self",
".",
"_resource_config",
"[",
"route",
".",
"resource",
"]",
"if",
"route",
".",
"method",
"in",
"resource_config",
".",
"method_config",
":",
"raise",
"ValueError",
"(",
"\"Can't setup CORS for {!r} route: CORS already \"",
"\"configured on resource {!r} for {} method\"",
".",
"format",
"(",
"route",
",",
"route",
".",
"resource",
",",
"route",
".",
"method",
")",
")",
"resource_config",
".",
"method_config",
"[",
"route",
".",
"method",
"]",
"=",
"config",
"else",
":",
"raise",
"ValueError",
"(",
"\"Resource or ResourceRoute expected, got {!r}\"",
".",
"format",
"(",
"routing_entity",
")",
")"
] | 40.478261 | 23.391304 |
def _add_document(self, doc_id, conn=None, nosave=False, score=1.0, payload=None,
replace=False, partial=False, language=None, **fields):
"""
Internal add_document used for both batch and single doc indexing
"""
if conn is None:
conn = self.redis
if partial:
replace = True
args = [self.ADD_CMD, self.index_name, doc_id, score]
if nosave:
args.append('NOSAVE')
if payload is not None:
args.append('PAYLOAD')
args.append(payload)
if replace:
args.append('REPLACE')
if partial:
args.append('PARTIAL')
if language:
args += ['LANGUAGE', language]
args.append('FIELDS')
args += list(itertools.chain(*fields.items()))
return conn.execute_command(*args) | [
"def",
"_add_document",
"(",
"self",
",",
"doc_id",
",",
"conn",
"=",
"None",
",",
"nosave",
"=",
"False",
",",
"score",
"=",
"1.0",
",",
"payload",
"=",
"None",
",",
"replace",
"=",
"False",
",",
"partial",
"=",
"False",
",",
"language",
"=",
"None",
",",
"*",
"*",
"fields",
")",
":",
"if",
"conn",
"is",
"None",
":",
"conn",
"=",
"self",
".",
"redis",
"if",
"partial",
":",
"replace",
"=",
"True",
"args",
"=",
"[",
"self",
".",
"ADD_CMD",
",",
"self",
".",
"index_name",
",",
"doc_id",
",",
"score",
"]",
"if",
"nosave",
":",
"args",
".",
"append",
"(",
"'NOSAVE'",
")",
"if",
"payload",
"is",
"not",
"None",
":",
"args",
".",
"append",
"(",
"'PAYLOAD'",
")",
"args",
".",
"append",
"(",
"payload",
")",
"if",
"replace",
":",
"args",
".",
"append",
"(",
"'REPLACE'",
")",
"if",
"partial",
":",
"args",
".",
"append",
"(",
"'PARTIAL'",
")",
"if",
"language",
":",
"args",
"+=",
"[",
"'LANGUAGE'",
",",
"language",
"]",
"args",
".",
"append",
"(",
"'FIELDS'",
")",
"args",
"+=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"fields",
".",
"items",
"(",
")",
")",
")",
"return",
"conn",
".",
"execute_command",
"(",
"*",
"args",
")"
] | 33.192308 | 16.230769 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.