text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
async def get_cred_def_id(self):
"""
Get the ledger ID of the object
Example:
source_id = 'foobar123'
schema_name = 'Schema Name'
payment_handle = 0
credential_def1 = await CredentialDef.create(source_id, name, schema_id, payment_handle)
assert await credential_def.get_cred_def_id() == '2hoqvcwupRTUNkXn6ArYzs:3:CL:2471'
:return: ID string
"""
cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_handle = c_uint32(self.handle)
cred_def_id = await do_call('vcx_credentialdef_get_cred_def_id', c_handle, cb)
return cred_def_id .decode() | [
"async",
"def",
"get_cred_def_id",
"(",
"self",
")",
":",
"cb",
"=",
"create_cb",
"(",
"CFUNCTYPE",
"(",
"None",
",",
"c_uint32",
",",
"c_uint32",
",",
"c_char_p",
")",
")",
"c_handle",
"=",
"c_uint32",
"(",
"self",
".",
"handle",
")",
"cred_def_id",
"="... | 40.3125 | 18.8125 |
def cgnr_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter,
tol, weighting='local', Cpt_params=None):
"""Use CGNR to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
Should be at least nonsymmetric or indefinite
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
IGNORED here, only 'diagonal' preconditioning is used.
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using CGNR to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother
"""
# For non-SPD system, apply CG on Normal Equations with Diagonal
# Preconditioning (requires transpose)
Ah = A.H
Ah.sort_indices()
# Preallocate
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
AP = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
# D for A.H*A
Dinv = get_diagonal(A, norm_eq=1, inv=True)
# Calculate initial residual
# Equivalent to R = -Ah*(A*T); R = R.multiply(Sparsity_Pattern)
# with the added constraint that R has an explicit 0 wherever
# R is 0 and Sparsity_Pattern is not
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
R = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
AT = -1.0*A*T
R.data[:] = 0.0
pyamg.amg_core.incomplete_mat_mult_bsr(Ah.indptr, Ah.indices,
np.ravel(Ah.data),
AT.indptr, AT.indices,
np.ravel(AT.data),
R.indptr, R.indices,
np.ravel(R.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
Ah.blocksize[0], Ah.blocksize[1],
T.blocksize[1])
# Enforce R*B = 0
Satisfy_Constraints(R, B, BtBinv)
if R.nnz == 0:
print("Error in sa_energy_min(..). Initial R no nonzeros on a level. \
Returning tentative prolongator\n")
return T
# Calculate Frobenius norm of the residual
resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum())
# print "Energy Minimization of Prolongator \
# --- Iteration 0 --- r = " + str(resid)
i = 0
while i < maxiter and resid > tol:
# vect = np.ravel((A*T).data)
# print "Iteration " + str(i) + " \
# Energy = %1.3e"%np.sqrt( (vect.conjugate()*vect).sum() )
# Apply diagonal preconditioner
Z = scale_rows(R, Dinv)
# Frobenius innerproduct of (R,Z) = sum(rk.*zk)
newsum = (R.conjugate().multiply(Z)).sum()
if newsum < tol:
# met tolerance, so halt
break
# P is the search direction, not the prolongator, which is T.
if(i == 0):
P = Z
oldsum = newsum
else:
beta = newsum/oldsum
P = Z + beta*P
oldsum = newsum
# Calculate new direction
# Equivalent to: AP = Ah*(A*P); AP = AP.multiply(Sparsity_Pattern)
# with the added constraint that explicit zeros are in AP wherever
# AP = 0 and Sparsity_Pattern does not
AP_temp = A*P
AP.data[:] = 0.0
pyamg.amg_core.incomplete_mat_mult_bsr(Ah.indptr, Ah.indices,
np.ravel(Ah.data),
AP_temp.indptr, AP_temp.indices,
np.ravel(AP_temp.data),
AP.indptr, AP.indices,
np.ravel(AP.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
Ah.blocksize[0],
Ah.blocksize[1], T.blocksize[1])
del AP_temp
# Enforce AP*B = 0
Satisfy_Constraints(AP, B, BtBinv)
# Frobenius inner-product of (P, AP)
alpha = newsum/(P.conjugate().multiply(AP)).sum()
# Update the prolongator, T
T = T + alpha*P
# Ensure identity at C-pts
if Cpt_params[0]:
T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']
# Update residual
R = R - alpha*AP
i += 1
# Calculate Frobenius norm of the residual
resid = R.nnz # np.sqrt((R.data.conjugate()*R.data).sum())
# print "Energy Minimization of Prolongator \
# --- Iteration " + str(i) + " --- r = " + str(resid)
# vect = np.ravel((A*T).data)
# print "Final Iteration " + str(i) + " \
# Energy = %1.3e"%np.sqrt( (vect.conjugate()*vect).sum() )
return T | [
"def",
"cgnr_prolongation_smoothing",
"(",
"A",
",",
"T",
",",
"B",
",",
"BtBinv",
",",
"Sparsity_Pattern",
",",
"maxiter",
",",
"tol",
",",
"weighting",
"=",
"'local'",
",",
"Cpt_params",
"=",
"None",
")",
":",
"# For non-SPD system, apply CG on Normal Equations ... | 38.412791 | 22.325581 |
def transport_connected(self):
"""Called when transport has been connected.
Send the stream head if initiator.
"""
with self.lock:
if self.initiator:
if self._output_state is None:
self._initiate() | [
"def",
"transport_connected",
"(",
"self",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"initiator",
":",
"if",
"self",
".",
"_output_state",
"is",
"None",
":",
"self",
".",
"_initiate",
"(",
")"
] | 30 | 9.888889 |
def usage_palette(parser):
"""Show usage and available palettes."""
parser.print_usage()
print('')
print('available palettes:')
for palette in sorted(PALETTE):
print(' %-12s' % (palette,))
return 0 | [
"def",
"usage_palette",
"(",
"parser",
")",
":",
"parser",
".",
"print_usage",
"(",
")",
"print",
"(",
"''",
")",
"print",
"(",
"'available palettes:'",
")",
"for",
"palette",
"in",
"sorted",
"(",
"PALETTE",
")",
":",
"print",
"(",
"' %-12s'",
"%",
"(... | 25 | 15.444444 |
def _store_documentation(self, path, html, overwrite, quiet):
"""
Stores all documents on the file system.
Target location is **path**. File name is the lowercase name of the document + .rst.
"""
echo("Storing groundwork application documents\n")
echo("Application: %s" % self.app.name)
echo("Number of documents: %s\n" % len(self.app.documents.get()))
if not os.path.isabs(path):
path = os.path.abspath(path)
if not os.path.isdir(path):
echo("Path %s is not a directory!" % path)
sys.exit(1)
if not os.path.exists(path):
echo("Path %s does not exist" % path)
sys.exit(1)
for dirpath, dirnames, files in os.walk(path):
if files:
echo("Path %s is not empty!\n" % path)
if not overwrite:
sys.exit(1)
documents = []
for key, document in self.app.documents.get().items():
file_extension = ".html" if html else ".rst"
# lowers the name, removes all whitespaces and adds the file extension
file_name_parts = key.lower().split()
file_name = "".join(file_name_parts)
file_name += file_extension
documents.append((file_name, document))
echo("Going to write to following files:")
for document in documents:
echo(" %s" % document[0])
echo("\nTarget directory: %s" % path)
answer = None
while answer not in ["N", "Y"] and not quiet:
answer = prompt("Shall we go on? [Y]es, [N]o: ").upper()
if answer == "N":
sys.exit(0)
for document in documents:
try:
with open(os.path.join(path, document[0]), "w") as doc_file:
doc_rendered = Environment().from_string(document[1].content).render(app=self.app,
plugin=document[1].plugin)
if html:
output = publish_parts(doc_rendered, writer_name="html")['whole']
else:
output = doc_rendered
doc_file.write(output)
except Exception as e:
echo("%s error occurred: %s" % (document[0], e))
else:
echo("%s stored." % document[0]) | [
"def",
"_store_documentation",
"(",
"self",
",",
"path",
",",
"html",
",",
"overwrite",
",",
"quiet",
")",
":",
"echo",
"(",
"\"Storing groundwork application documents\\n\"",
")",
"echo",
"(",
"\"Application: %s\"",
"%",
"self",
".",
"app",
".",
"name",
")",
... | 36.769231 | 21.261538 |
def load_plugins(self, plugin_class_name):
"""
load all available plugins
:param plugin_class_name: str, name of plugin class (e.g. 'PreBuildPlugin')
:return: dict, bindings for plugins of the plugin_class_name class
"""
# imp.findmodule('atomic_reactor') doesn't work
plugins_dir = os.path.join(os.path.dirname(__file__), 'plugins')
logger.debug("loading plugins from dir '%s'", plugins_dir)
files = [os.path.join(plugins_dir, f)
for f in os.listdir(plugins_dir)
if f.endswith(".py")]
if self.plugin_files:
logger.debug("loading additional plugins from files '%s'", self.plugin_files)
files += self.plugin_files
plugin_class = globals()[plugin_class_name]
plugin_classes = {}
for f in files:
module_name = os.path.basename(f).rsplit('.', 1)[0]
# Do not reload plugins
if module_name in sys.modules:
f_module = sys.modules[module_name]
else:
try:
logger.debug("load file '%s'", f)
f_module = imp.load_source(module_name, f)
except (IOError, OSError, ImportError, SyntaxError) as ex:
logger.warning("can't load module '%s': %r", f, ex)
continue
for name in dir(f_module):
binding = getattr(f_module, name, None)
try:
# if you try to compare binding and PostBuildPlugin, python won't match them
# if you call this script directly b/c:
# ! <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class
# '__main__.PostBuildPlugin'>
# but
# <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class
# 'atomic_reactor.plugin.PostBuildPlugin'>
is_sub = issubclass(binding, plugin_class)
except TypeError:
is_sub = False
if binding and is_sub and plugin_class.__name__ != binding.__name__:
plugin_classes[binding.key] = binding
return plugin_classes | [
"def",
"load_plugins",
"(",
"self",
",",
"plugin_class_name",
")",
":",
"# imp.findmodule('atomic_reactor') doesn't work",
"plugins_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'plugins'",
")",
... | 48.347826 | 18.826087 |
def _getSmallestDifference(inputList, targetVal):
'''
Returns the value in inputList that is closest to targetVal
Iteratively splits the dataset in two, so it should be pretty fast
'''
targetList = inputList[:]
retVal = None
while True:
# If we're down to one value, stop iterating
if len(targetList) == 1:
retVal = targetList[0]
break
halfPoint = int(len(targetList) / 2.0) - 1
a = targetList[halfPoint]
b = targetList[halfPoint + 1]
leftDiff = abs(targetVal - a)
rightDiff = abs(targetVal - b)
# If the distance is 0, stop iterating, the targetVal is present
# in the inputList
if leftDiff == 0 or rightDiff == 0:
retVal = targetVal
break
# Look at left half or right half
if leftDiff < rightDiff:
targetList = targetList[:halfPoint + 1]
else:
targetList = targetList[halfPoint + 1:]
return retVal | [
"def",
"_getSmallestDifference",
"(",
"inputList",
",",
"targetVal",
")",
":",
"targetList",
"=",
"inputList",
"[",
":",
"]",
"retVal",
"=",
"None",
"while",
"True",
":",
"# If we're down to one value, stop iterating",
"if",
"len",
"(",
"targetList",
")",
"==",
... | 30.818182 | 17.787879 |
def get_top_tracks(self, limit=None, cacheable=True):
"""Returns the most played tracks as a sequence of TopItem objects."""
params = {}
if limit:
params["limit"] = limit
doc = _Request(self, "chart.getTopTracks", params).execute(cacheable)
seq = []
for node in doc.getElementsByTagName("track"):
title = _extract(node, "name")
artist = _extract(node, "name", 1)
track = Track(artist, title, self)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(track, weight))
return seq | [
"def",
"get_top_tracks",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"cacheable",
"=",
"True",
")",
":",
"params",
"=",
"{",
"}",
"if",
"limit",
":",
"params",
"[",
"\"limit\"",
"]",
"=",
"limit",
"doc",
"=",
"_Request",
"(",
"self",
",",
"\"chart.g... | 33.555556 | 19.777778 |
def set_precision(cls, precision):
"""Set the number of decimal places used to report percentages."""
assert 0 <= precision < 10
cls._precision = precision
cls._near0 = 1.0 / 10**precision
cls._near100 = 100.0 - cls._near0 | [
"def",
"set_precision",
"(",
"cls",
",",
"precision",
")",
":",
"assert",
"0",
"<=",
"precision",
"<",
"10",
"cls",
".",
"_precision",
"=",
"precision",
"cls",
".",
"_near0",
"=",
"1.0",
"/",
"10",
"**",
"precision",
"cls",
".",
"_near100",
"=",
"100.0... | 42.833333 | 3.166667 |
def run(self):
"""Compile libfaketime."""
if sys.platform == "linux" or sys.platform == "linux2":
libname = 'libfaketime.so.1'
libnamemt = 'libfaketimeMT.so.1'
elif sys.platform == "darwin":
libname = 'libfaketime.1.dylib'
libnamemt = 'libfaketimeMT.1.dylib'
else:
sys.stderr.write("WARNING : libfaketime does not support platform {}\n".format(sys.platform))
sys.stderr.flush()
return
faketime_lib = join('faketime', libname)
faketime_lib_mt = join('faketime', libnamemt)
self.my_outputs = []
setup_py_directory = dirname(realpath(__file__))
faketime_directory = join(setup_py_directory, "faketime")
os.chdir(faketime_directory)
if sys.platform == "linux" or sys.platform == "linux2":
subprocess.check_call(['make',])
else:
os.chdir(setup_py_directory)
if "10.12" in subprocess.check_output(["sw_vers", "-productVersion"]).decode('utf8'):
self.copy_file(
join('faketime', "libfaketime.c.sierra"),
join('faketime', "libfaketime.c")
)
os.chdir(faketime_directory)
subprocess.check_call(['make', '-f', 'Makefile.OSX'])
os.chdir(setup_py_directory)
dest = join(self.install_purelib, dirname(faketime_lib))
dest_mt = join(self.install_purelib, dirname(faketime_lib_mt))
try:
os.makedirs(dest)
except OSError as e:
if e.errno != 17:
raise
self.copy_file(faketime_lib, dest)
if exists(faketime_lib_mt):
self.copy_file(faketime_lib_mt, dest_mt)
self.my_outputs.append(join(dest, libname))
install.run(self) | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"\"linux\"",
"or",
"sys",
".",
"platform",
"==",
"\"linux2\"",
":",
"libname",
"=",
"'libfaketime.so.1'",
"libnamemt",
"=",
"'libfaketimeMT.so.1'",
"elif",
"sys",
".",
"platform",
"==",... | 36.591837 | 19.040816 |
def fetch_album(self, album_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches an album by given ID.
:param album_id: the album ID.
:type album_id: str
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#albums-album_id`.
'''
url = 'https://api.kkbox.com/v1.1/albums/%s' % album_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token()) | [
"def",
"fetch_album",
"(",
"self",
",",
"album_id",
",",
"terr",
"=",
"KKBOXTerritory",
".",
"TAIWAN",
")",
":",
"url",
"=",
"'https://api.kkbox.com/v1.1/albums/%s'",
"%",
"album_id",
"url",
"+=",
"'?'",
"+",
"url_parse",
".",
"urlencode",
"(",
"{",
"'territor... | 37.333333 | 22.133333 |
def getBucketInfo(self, buckets):
"""See the function description in base.py"""
return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))] | [
"def",
"getBucketInfo",
"(",
"self",
",",
"buckets",
")",
":",
"return",
"[",
"EncoderResult",
"(",
"value",
"=",
"0",
",",
"scalar",
"=",
"0",
",",
"encoding",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"n",
")",
")",
"]"
] | 52.333333 | 14 |
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
self.log.debug('Creating instance '
'({}|{}|{})'.format(instance_name, image_name, flavor))
image = nova.glance.find_image(image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance | [
"def",
"create_instance",
"(",
"self",
",",
"nova",
",",
"image_name",
",",
"instance_name",
",",
"flavor",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Creating instance '",
"'({}|{}|{})'",
".",
"format",
"(",
"instance_name",
",",
"image_name",
",",
... | 38.347826 | 18.347826 |
def sasutil(self) -> 'SASutil':
"""
This methods creates a SASutil object which you can use to run various analytics.
See the sasutil.py module.
:return: sasutil object
"""
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASutil(self) | [
"def",
"sasutil",
"(",
"self",
")",
"->",
"'SASutil'",
":",
"if",
"not",
"self",
".",
"_loaded_macros",
":",
"self",
".",
"_loadmacros",
"(",
")",
"self",
".",
"_loaded_macros",
"=",
"True",
"return",
"SASutil",
"(",
"self",
")"
] | 28.166667 | 15.166667 |
def verify_signature(self, addr):
"""
Given an address, verify whether or not it was signed by it
"""
return verify(virtualchain.address_reencode(addr), self.get_plaintext_to_sign(), self.sig) | [
"def",
"verify_signature",
"(",
"self",
",",
"addr",
")",
":",
"return",
"verify",
"(",
"virtualchain",
".",
"address_reencode",
"(",
"addr",
")",
",",
"self",
".",
"get_plaintext_to_sign",
"(",
")",
",",
"self",
".",
"sig",
")"
] | 44 | 18.4 |
def _ordered_keys(dict_):
"""
:param dict_: dict of OrderedDict to be processed
:return: list of str of keys in the original order
or in alphabetical order
"""
return isinstance(dict_, OrderedDict) and dict_.keys() or \
dict_ and sorted(dict_.keys()) or [] | [
"def",
"_ordered_keys",
"(",
"dict_",
")",
":",
"return",
"isinstance",
"(",
"dict_",
",",
"OrderedDict",
")",
"and",
"dict_",
".",
"keys",
"(",
")",
"or",
"dict_",
"and",
"sorted",
"(",
"dict_",
".",
"keys",
"(",
")",
")",
"or",
"[",
"]"
] | 39.5 | 11.5 |
def closenessScores(self, expValues, actValues, fractional=True):
"""
See the function description in base.py
"""
# Compute the percent error in log space
if expValues[0] > 0:
expValue = math.log10(expValues[0])
else:
expValue = self.minScaledValue
if actValues [0] > 0:
actValue = math.log10(actValues[0])
else:
actValue = self.minScaledValue
if fractional:
err = abs(expValue - actValue)
pctErr = err / (self.maxScaledValue - self.minScaledValue)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
err = abs(expValue - actValue)
closeness = err
#print "log::", "expValue:", expValues[0], "actValue:", actValues[0], \
# "closeness", closeness
#import pdb; pdb.set_trace()
return numpy.array([closeness]) | [
"def",
"closenessScores",
"(",
"self",
",",
"expValues",
",",
"actValues",
",",
"fractional",
"=",
"True",
")",
":",
"# Compute the percent error in log space",
"if",
"expValues",
"[",
"0",
"]",
">",
"0",
":",
"expValue",
"=",
"math",
".",
"log10",
"(",
"exp... | 27.862069 | 16.275862 |
def connect_mysql(host, port, user, password, database):
"""Connect to MySQL with retries."""
return pymysql.connect(
host=host, port=port,
user=user, passwd=password,
db=database
) | [
"def",
"connect_mysql",
"(",
"host",
",",
"port",
",",
"user",
",",
"password",
",",
"database",
")",
":",
"return",
"pymysql",
".",
"connect",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"user",
"=",
"user",
",",
"passwd",
"=",
"passwor... | 30.142857 | 14.428571 |
def _generatePermEncoderStr(options, encoderDict):
""" Generate the string that defines the permutations to apply for a given
encoder.
Parameters:
-----------------------------------------------------------------------
options: experiment params
encoderDict: the encoder dict, which gets placed into the description.py
For example, if the encoderDict contains:
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
The return string will contain:
"PermuteEncoder(fieldName='consumption',
encoderClass='AdaptiveScalarEncoder',
w=21,
n=PermuteInt(28, 521),
clipInput=True)"
"""
permStr = ""
# If it's the encoder for the classifier input, then it's always present so
# put it in as a dict in the permutations.py file instead of a
# PermuteEncoder().
if encoderDict.get('classifierOnly', False):
permStr = "dict("
for key, value in encoderDict.items():
if key == "name":
continue
if key == 'n' and encoderDict['type'] != 'SDRCategoryEncoder':
permStr += "n=PermuteInt(%d, %d), " % (encoderDict["w"] + 7,
encoderDict["w"] + 500)
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
else:
# Scalar encoders
if encoderDict["type"] in ["ScalarSpaceEncoder", "AdaptiveScalarEncoder",
"ScalarEncoder", "LogEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
key = "encoderClass"
elif key == "name":
continue
if key == "n":
permStr += "n=PermuteInt(%d, %d), " % (encoderDict["w"] + 1,
encoderDict["w"] + 500)
elif key == "runDelta":
if value and not "space" in encoderDict:
permStr += "space=PermuteChoices([%s,%s]), " \
% (_quoteAndEscape("delta"), _quoteAndEscape("absolute"))
encoderDict.pop("runDelta")
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
# Category encoder
elif encoderDict["type"] in ["SDRCategoryEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
key = "encoderClass"
elif key == "name":
continue
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
# Datetime encoder
elif encoderDict["type"] in ["DateEncoder"]:
permStr = "PermuteEncoder("
for key, value in encoderDict.items():
if key == "fieldname":
key = "fieldName"
elif key == "type":
continue
elif key == "name":
continue
if key == "timeOfDay":
permStr += "encoderClass='%s.timeOfDay', " % (encoderDict["type"])
permStr += "radius=PermuteFloat(0.5, 12), "
permStr += "w=%d, " % (value[0])
elif key == "dayOfWeek":
permStr += "encoderClass='%s.dayOfWeek', " % (encoderDict["type"])
permStr += "radius=PermuteFloat(1, 6), "
permStr += "w=%d, " % (value[0])
elif key == "weekend":
permStr += "encoderClass='%s.weekend', " % (encoderDict["type"])
permStr += "radius=PermuteChoices([1]), "
permStr += "w=%d, " % (value)
else:
if issubclass(type(value), basestring):
permStr += "%s='%s', " % (key, value)
else:
permStr += "%s=%s, " % (key, value)
permStr += ")"
else:
raise RuntimeError("Unsupported encoder type '%s'" % \
(encoderDict["type"]))
return permStr | [
"def",
"_generatePermEncoderStr",
"(",
"options",
",",
"encoderDict",
")",
":",
"permStr",
"=",
"\"\"",
"# If it's the encoder for the classifier input, then it's always present so",
"# put it in as a dict in the permutations.py file instead of a",
"# PermuteEncoder().",
"if",
"encoder... | 32.135338 | 19.165414 |
def animation(self, animation):
"""Setter for animation property.
Parameters
----------
animation: str
Defines the animation of the spinner
"""
self._animation = animation
self._text = self._get_text(self._text['original']) | [
"def",
"animation",
"(",
"self",
",",
"animation",
")",
":",
"self",
".",
"_animation",
"=",
"animation",
"self",
".",
"_text",
"=",
"self",
".",
"_get_text",
"(",
"self",
".",
"_text",
"[",
"'original'",
"]",
")"
] | 31.444444 | 11.444444 |
def load(self, size):
"""open and read the file is existent"""
if self.exists() and self.isfile():
return eval(open(self).read(size)) | [
"def",
"load",
"(",
"self",
",",
"size",
")",
":",
"if",
"self",
".",
"exists",
"(",
")",
"and",
"self",
".",
"isfile",
"(",
")",
":",
"return",
"eval",
"(",
"open",
"(",
"self",
")",
".",
"read",
"(",
"size",
")",
")"
] | 39.5 | 7 |
def add_field(self, field):
"""
Adds a field to this table
:param field: This can be a string of a field name, a dict of {'alias': field}, or
a ``Field`` instance
:type field: str or dict or Field
"""
field = FieldFactory(
field,
)
field.set_table(self)
# make sure field is not already added
field_name = field.get_name()
for existing_field in self.fields:
if existing_field.get_name() == field_name:
return None
self.before_add_field(field)
field.before_add()
if field.ignore is False:
self.fields.append(field)
return field | [
"def",
"add_field",
"(",
"self",
",",
"field",
")",
":",
"field",
"=",
"FieldFactory",
"(",
"field",
",",
")",
"field",
".",
"set_table",
"(",
"self",
")",
"# make sure field is not already added",
"field_name",
"=",
"field",
".",
"get_name",
"(",
")",
"for"... | 26.538462 | 16.923077 |
def _on_process_finished(self):
"""
Write the process finished message and emit the `finished` signal.
"""
exit_code = self._process.exitCode()
if self._process.exitStatus() != self._process.NormalExit:
exit_code = 139
self._formatter.append_message('\x1b[0m\nProcess finished with exit code %d' % exit_code,
output_format=OutputFormat.CustomFormat)
self.setReadOnly(True)
self.process_finished.emit() | [
"def",
"_on_process_finished",
"(",
"self",
")",
":",
"exit_code",
"=",
"self",
".",
"_process",
".",
"exitCode",
"(",
")",
"if",
"self",
".",
"_process",
".",
"exitStatus",
"(",
")",
"!=",
"self",
".",
"_process",
".",
"NormalExit",
":",
"exit_code",
"=... | 46 | 17.818182 |
def convert_pkt_to_json(pkg):
""" convert_pkt_to_json
Inspired by:
https://gist.githubusercontent.com/cr0hn/1b0c2e672cd0721d3a07/raw/9144676ceb12dbd545e6dce366822bbedde8de2c/pkg_to_json.py
This function convert a Scapy packet to JSON
:param pkg: A kamene package
:type pkg: objects
:return: A JSON data
:rtype: dict()
"""
results = defaultdict(dict)
try:
for index in range(0, len(pkg)):
layer = pkg[index]
# Get layer name
layer_tmp_name = str(layer.__dict__["aliastypes"][0])
layer_start_pos = layer_tmp_name.rfind(".") + 1
layer_name = layer_tmp_name[layer_start_pos:-2].lower()
# Get the layer info
tmp_t = {}
for default_x, y in layer.__dict__["default_fields"].items():
x = "default_{}".format(default_x)
if DEBUG_PACKETS:
log.info("default: key={} val={}".format(x, y))
try:
tmp_t["hex_default_{}".format(default_x)] = y.hex()
except Exception:
# http://python3porting.com/differences.html#long
if y and not isinstance(y, (str,
int,
int,
float,
list,
dict)):
if x in tmp_t:
tmp_t[x].update(convert_pkt_to_json(y))
else:
tmp_t[x] = y
else:
tmp_t[x] = y
# end of fields
results[layer_name] = tmp_t
try:
tmp_t = {}
for fields_x, y in layer.__dict__["fields"].items():
if DEBUG_PACKETS:
log.info("fields: key={} val={}".format(x, y))
if fields_x == "qd":
if y:
tmp_t["fields_qd"] = json.loads(
convert_pkt_to_json(y))
elif fields_x == "ar":
if y:
tmp_t["fields_ar"] = json.loads(
convert_pkt_to_json(y))
elif fields_x == "an":
if y:
tmp_t["fields_an"] = json.loads(
convert_pkt_to_json(y))
elif fields_x == "arcount":
if y:
tmp_t["fields_arcount"] = json.loads(
convert_pkt_to_json(y))
elif fields_x == "ns":
if y:
"""
'ns': <DNSRR rrname='ubuntu.com.'
type=SOA rclass=IN ttl=1345
rdata=b'\x03ns1\tcanonical
\xc0\x19\nhostmaster\xc02xHl\x8e
\x00\x00*0\x00\x00\x0e\x10\x00
\t:\x80\x00\x00\x0e\x10' |>,
"""
tmp_t["fields_ns"] = str(y)
elif fields_x == "proto":
if y:
tmp_t[x] = y
elif fields_x == "flags":
if y:
tmp_t[x] = y
elif fields_x == "ack":
if y:
tmp_t[x] = y
elif fields_x == "id":
if y:
tmp_t[x] = y
elif fields_x == "window":
if y:
tmp_t[x] = y
elif fields_x == "dataofs":
if y:
tmp_t[x] = y
elif fields_x == "frag":
if y:
tmp_t[x] = y
elif fields_x == "reserved":
if y:
tmp_t[x] = y
elif fields_x == "ttl":
if y:
tmp_t[x] = y
elif fields_x == "chksum":
if y:
tmp_t[x] = y
elif fields_x == "options":
if y:
cur_d = {}
try:
test = dict(y)
if "EOL" in test:
cur_d["EOL"] = test["EOL"]
if "NOP" in test:
cur_d["NOP"] = test["NOP"]
if "MSS" in test:
cur_d["MSS"] = test["MSS"]
if "WScale" in test:
cur_d["WScale"] = test["WScale"]
if "SAckOK" in test:
cur_d["SAckOK"] = \
test["SAckOK"].decode("utf-8")
if "SAck" in test:
cur_d["SAck"] = test["SAck"]
if "Timestamp" in test:
if test["Timestamp"]:
cur_d["Timestamp"] = \
test["Timestamp"][0]
if "AltChkSum" in test:
cur_d["AltChkSum"] = test["AltChkSum"]
if "AltChkSumOpt" in test:
cur_d["AltChkSumOpt"] = \
test["AltChkSumOpt"]
if "Mood" in test:
cur_d["Mood"] = test["Mood"]
if "Experiment" in test:
cur_d["Experiment"] = test["Experiment"]
except Exception as exct:
log.error(("1 Failed parsing "
"{}={} ex={}")
.format(x,
y,
exct))
cur_d = str(y)
# end of parsing cur_d
tmp_t["fields_{}".format(fields_x)] = cur_d
elif fields_x == "urgptr":
if y:
cur_d = {}
try:
for f in y:
cur_f = "{}_{}".format(fields_x,
f)
try:
cur_d[cur_f] = y.decode("utf-8")
except Exception:
cur_d["hex_" + cur_f] = y[f].hex()
except Exception as exct:
log.error(("2 Failed parsing "
"{}={} ex={}")
.format(x,
y,
exct))
cur_d = y
# end of parsing cur_d
tmp_t["fields_{}".format(fields_x)] = cur_d
else:
x = "{}".format(fields_x)
try:
hex_key = "hex_field_{}".format(fields_x)
if fields_x == "load":
try:
tmp_t["load"] = y.decode("utf-8")
except Exception:
tmp_t[hex_key] = y.hex()
else:
tmp_t[hex_key] = y.hex()
except Exception:
# http://python3porting.com/differences.html#long
if y and not isinstance(y, (str,
int,
int,
float,
list,
dict)):
if x in tmp_t:
tmp_t[x].update(convert_pkt_to_json(y))
else:
tmp_t[x] = y
else:
tmp_t[x] = y
# end of special handling:
# qd
results[layer_name] = tmp_t
except KeyError:
# No custom fields
pass
except Exception:
# Package finish -> do nothing
pass
if "padding" in results:
try:
if "load" in results["padding"]:
results["padding"]["load"] = \
results["padding"]["load"].encode("utf-8").hex()
except Exception:
log.error(("failed parsing padding={}")
.format(results["padding"]))
# end of fixing padding
if "raw" in results:
try:
if "load" in results["raw"]:
results["raw"]["load"] = \
results["raw"]["load"].encode("utf-8").hex()
except Exception:
log.error(("failed parsing raw={}")
.format(results["raw"]))
# end of fixing raw
if DEBUG_PACKETS:
log.debug("")
log.debug("pre json serialization:")
log.debug(results)
log.debug("post json.dumps:")
log.debug(ppj(results))
log.debug("")
else:
log.info(ppj(results))
return results | [
"def",
"convert_pkt_to_json",
"(",
"pkg",
")",
":",
"results",
"=",
"defaultdict",
"(",
"dict",
")",
"try",
":",
"for",
"index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"pkg",
")",
")",
":",
"layer",
"=",
"pkg",
"[",
"index",
"]",
"# Get layer name... | 41.112903 | 16.185484 |
def async_call(self, fn, *args, **kwargs):
"""Schedule `fn` to be called by the event loop soon.
This function is thread-safe, and is the only way code not
on the main thread could interact with nvim api objects.
This function can also be called in a synchronous
event handler, just before it returns, to defer execution
that shouldn't block neovim.
"""
call_point = ''.join(format_stack(None, 5)[:-1])
def handler():
try:
fn(*args, **kwargs)
except Exception as err:
msg = ("error caught while executing async callback:\n"
"{!r}\n{}\n \nthe call was requested at\n{}"
.format(err, format_exc_skip(1), call_point))
self._err_cb(msg)
raise
self._session.threadsafe_call(handler) | [
"def",
"async_call",
"(",
"self",
",",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"call_point",
"=",
"''",
".",
"join",
"(",
"format_stack",
"(",
"None",
",",
"5",
")",
"[",
":",
"-",
"1",
"]",
")",
"def",
"handler",
"(",
")",
... | 39.681818 | 18.318182 |
def _set_nameserver_fc4s(self, v, load=False):
"""
Setter method for nameserver_fc4s, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail/output/show_nameserver/nameserver_fc4s (nameserver-fc4s-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_nameserver_fc4s is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nameserver_fc4s() directly.
YANG Description: Indicates the Fibre Channel FC4
services supported by the device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'none|FCP|IPFC', 'length': [u'0..16']}), is_leaf=True, yang_name="nameserver-fc4s", rest_name="nameserver-fc4s", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'FC4s'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-fc4s-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nameserver_fc4s must be of a type compatible with nameserver-fc4s-type""",
'defined-type': "brocade-nameserver:nameserver-fc4s-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'none|FCP|IPFC', 'length': [u'0..16']}), is_leaf=True, yang_name="nameserver-fc4s", rest_name="nameserver-fc4s", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'FC4s'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-fc4s-type', is_config=True)""",
})
self.__nameserver_fc4s = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_nameserver_fc4s",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
... | 78.44 | 38.96 |
def start(self):
"""
Starts this router.
At least the IOS image must be set before starting it.
"""
# trick: we must send sensors and power supplies info after starting the router
# otherwise they are not taken into account (Dynamips bug?)
yield from Router.start(self)
if self._sensors != [22, 22, 22, 22]:
yield from self.set_sensors(self._sensors)
if self._power_supplies != [1, 1]:
yield from self.set_power_supplies(self._power_supplies) | [
"def",
"start",
"(",
"self",
")",
":",
"# trick: we must send sensors and power supplies info after starting the router",
"# otherwise they are not taken into account (Dynamips bug?)",
"yield",
"from",
"Router",
".",
"start",
"(",
"self",
")",
"if",
"self",
".",
"_sensors",
"... | 40.538462 | 17.307692 |
def server_by_name(name, profile=None, **kwargs):
'''
Return information about a server
name
Server Name
CLI Example:
.. code-block:: bash
salt '*' nova.server_by_name myserver profile=openstack
'''
conn = _auth(profile, **kwargs)
return conn.server_by_name(name) | [
"def",
"server_by_name",
"(",
"name",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
",",
"*",
"*",
"kwargs",
")",
"return",
"conn",
".",
"server_by_name",
"(",
"name",
")"
] | 20.066667 | 24.2 |
def list_dirnames_in_directory(self, dirname):
"""List all names of directories that exist at the root of this
bucket directory.
Note that *directories* don't exist in S3; rather directories are
inferred from path names.
Parameters
----------
dirname : `str`
Directory name in the bucket relative to ``bucket_root``.
Returns
-------
dirnames : `list`
List of directory names (`str`), relative to ``bucket_root/``,
that exist at the root of ``dirname``.
"""
prefix = self._create_prefix(dirname)
dirnames = []
for obj in self._bucket.objects.filter(Prefix=prefix):
# get directory name of every object under this path prefix
dirname = os.path.dirname(obj.key)
# dirname is empty if the object happens to be the directory
# redirect object object for the prefix directory (directory
# redirect objects are named after directories and have metadata
# that tells Fastly to redirect the browser to the index.html
# contained in the directory).
if dirname == '':
dirname = obj.key + '/'
# Strip out the path prefix from the directory name
rel_dirname = os.path.relpath(dirname, start=prefix)
# If there's only one part then this directory is at the root
# relative to the prefix. We want this.
dir_parts = rel_dirname.split('/')
if len(dir_parts) == 1:
dirnames.append(dir_parts[0])
# Above algorithm finds root directories for all *files* in sub
# subdirectories; trim down to the unique set.
dirnames = list(set(dirnames))
# Remove posix-like relative directory names that can appear
# in the bucket listing.
for filtered_dir in ('.', '..'):
if filtered_dir in dirnames:
dirnames.remove(filtered_dir)
return dirnames | [
"def",
"list_dirnames_in_directory",
"(",
"self",
",",
"dirname",
")",
":",
"prefix",
"=",
"self",
".",
"_create_prefix",
"(",
"dirname",
")",
"dirnames",
"=",
"[",
"]",
"for",
"obj",
"in",
"self",
".",
"_bucket",
".",
"objects",
".",
"filter",
"(",
"Pre... | 38.5 | 20.423077 |
def find_segments(stops, shape):
"""Find corresponding shape points for a list of stops and create shape break points.
Parameters
----------
stops: stop-sequence (list)
List of stop points
shape: list of shape points
shape-sequence of shape points
Returns
-------
break_points: list[int]
stops[i] corresponds to shape[break_points[i]]. This list can
be used to partition the shape points into segments between
one stop and the next.
badness: float
Lower indicates better fit to the shape. This is the sum of
distances (in meters) between every each stop and its closest
shape point. This is not needed in normal use, but in the
cases where you must determine the best-fitting shape for a
stop-sequence, use this.
"""
if not shape:
return [], 0
break_points = []
last_i = 0
cumul_d = 0
badness = 0
d_last_stop = float('inf')
lstlat, lstlon = None, None
break_shape_points = []
for stop in stops:
stlat, stlon = stop['lat'], stop['lon']
best_d = float('inf')
# print stop
if badness > 500 and badness > 30 * len(break_points):
return [], badness
for i in range(last_i, len(shape)):
d = wgs84_distance(stlat, stlon, shape[i]['lat'], shape[i]['lon'])
if lstlat:
d_last_stop = wgs84_distance(lstlat, lstlon, shape[i]['lat'], shape[i]['lon'])
# If we are getting closer to next stop, record this as
# the best stop so far.continue
if d < best_d:
best_d = d
best_i = i
# print best_d, i, last_i, len(shape)
cumul_d += d
# We have to be very careful about our stop condition.
# This is trial and error, basically.
if (d_last_stop < d) or (d > 500) or (i < best_i + 100):
continue
# We have decided our best stop, stop looking and continue
# the outer loop.
else:
badness += best_d
break_points.append(best_i)
last_i = best_i
lstlat, lstlon = stlat, stlon
break_shape_points.append(shape[best_i])
break
else:
# Executed if we did *not* break the inner loop
badness += best_d
break_points.append(best_i)
last_i = best_i
lstlat, lstlon = stlat, stlon
break_shape_points.append(shape[best_i])
pass
# print "Badness:", badness
# print_coords(stops, 'stop')
# print_coords(shape, 'shape')
# print_coords(break_shape_points, 'break')
return break_points, badness | [
"def",
"find_segments",
"(",
"stops",
",",
"shape",
")",
":",
"if",
"not",
"shape",
":",
"return",
"[",
"]",
",",
"0",
"break_points",
"=",
"[",
"]",
"last_i",
"=",
"0",
"cumul_d",
"=",
"0",
"badness",
"=",
"0",
"d_last_stop",
"=",
"float",
"(",
"'... | 36.453333 | 16.826667 |
def _ResolvePath(self, path, expand_variables=True):
"""Resolves a Windows path in file system specific format.
This function will check if the individual path segments exists within
the file system. For this it will prefer the first case sensitive match
above a case insensitive match. If no match was found None is returned.
Args:
path (str): Windows path to resolve.
expand_variables (Optional[bool]): True if path variables should be
expanded or not.
Returns:
tuple[str, PathSpec]: location and matching path specification or
(None, None) if not available.
"""
# Allow for paths that start with an environment variable e.g.
# %SystemRoot%\file.txt
if path.startswith('%'):
path_segment, _, _ = path.partition(self._PATH_SEPARATOR)
if not self._PATH_EXPANSION_VARIABLE.match(path_segment):
path = None
else:
path = self._PathStripPrefix(path)
if path is None:
return None, None
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
file_entry = self._file_system.GetFileEntryByPathSpec(self._mount_point)
expanded_path_segments = self._file_system.SplitPath(
self._mount_point.location)
else:
file_entry = self._file_system.GetRootFileEntry()
expanded_path_segments = []
number_of_expanded_path_segments = 0
search_path_segments = path.split(self._PATH_SEPARATOR)
while search_path_segments:
path_segment = search_path_segments.pop(0)
if file_entry is None:
return None, None
# Ignore empty path segments or path segments containing a single dot.
if not path_segment or path_segment == '.':
continue
if path_segment == '..':
# Only allow to traverse back up to the mount point.
if number_of_expanded_path_segments > 0:
_ = expanded_path_segments.pop(0)
number_of_expanded_path_segments -= 1
file_entry = file_entry.GetParentFileEntry()
continue
if (expand_variables and
self._PATH_EXPANSION_VARIABLE.match(path_segment)):
path_segment = self._environment_variables.get(
path_segment[1:-1].upper(), path_segment)
if self._PATH_SEPARATOR in path_segment:
# The expanded path segment itself can consist of multiple
# path segments, hence we need to split it and prepend it to
# the search path segments list.
path_segments = path_segment.split(self._PATH_SEPARATOR)
path_segments.extend(search_path_segments)
search_path_segments = path_segments
path_segment = search_path_segments.pop(0)
sub_file_entry = file_entry.GetSubFileEntryByName(
path_segment, case_sensitive=False)
if sub_file_entry is None:
return None, None
expanded_path_segments.append(sub_file_entry.name)
number_of_expanded_path_segments += 1
file_entry = sub_file_entry
location = self._file_system.JoinPath(expanded_path_segments)
return location, file_entry.path_spec | [
"def",
"_ResolvePath",
"(",
"self",
",",
"path",
",",
"expand_variables",
"=",
"True",
")",
":",
"# Allow for paths that start with an environment variable e.g.",
"# %SystemRoot%\\file.txt",
"if",
"path",
".",
"startswith",
"(",
"'%'",
")",
":",
"path_segment",
",",
"... | 37.292683 | 19.914634 |
def _parse_attribute_details_file(self, prop=ATTRIBUTES):
""" Concatenates a list of Attribute Details data structures parsed from a remote file """
# Parse content from remote file URL, which may be stored in one of two places:
# Starting at: contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation
# ATTRIBUTE: href
# ELEMENT TEXT: CI_Citation/.../CI_Contact/onlineResource/CI_OnlineResource/linkage
self._attr_details_file_url = parse_property(
self._xml_tree, None, self._data_map, '_attributes_file'
)
if not self._attr_details_file_url:
return None
try:
tree_to_parse = get_remote_element(self._attr_details_file_url)
except Exception:
self._attr_details_file_url = None
return None
xpath_map = self._data_structures[ATTRIBUTES]
xpath_root = self._get_xroot_for(prop)
return parse_complex_list(tree_to_parse, xpath_root, xpath_map, prop) | [
"def",
"_parse_attribute_details_file",
"(",
"self",
",",
"prop",
"=",
"ATTRIBUTES",
")",
":",
"# Parse content from remote file URL, which may be stored in one of two places:",
"# Starting at: contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation",
"# ATTRIBUTE: href",
... | 42.083333 | 26.333333 |
def apply_with(self, _, val, ctx):
""" constructor
example val:
{
# header values used in multipart/form-data according to RFC2388
'header': {
'Content-Type': 'text/plain',
# according to RFC2388, available values are '7bit', '8bit', 'binary'
'Content-Transfer-Encoding': 'binary'
},
filename: 'a.txt',
data: None (or any file-like object)
}
:param val: dict containing file info.
"""
self.header = val.get('header', {})
self.data = val.get('data', None)
self.filename = val.get('filename', '')
if self.data == None and self.filename == '':
raise ValidationError('should have file name or file object, not: {0}, {1}'.format(
self.data, self.filename
)) | [
"def",
"apply_with",
"(",
"self",
",",
"_",
",",
"val",
",",
"ctx",
")",
":",
"self",
".",
"header",
"=",
"val",
".",
"get",
"(",
"'header'",
",",
"{",
"}",
")",
"self",
".",
"data",
"=",
"val",
".",
"get",
"(",
"'data'",
",",
"None",
")",
"s... | 35 | 18.52 |
def _get_tough_method(self, method):
"""Return a "tough" version of a connection class method.
The tough version checks whether the connection is bad (lost)
and automatically and transparently tries to reset the connection
if this is the case (for instance, the database has been restarted).
"""
def tough_method(*args, **kwargs):
transaction = self._transaction
if not transaction:
try: # check whether connection status is bad
if not self._con.db.status:
raise AttributeError
if self._maxusage: # or connection used too often
if self._usage >= self._maxusage:
raise AttributeError
except Exception:
self.reset() # then reset the connection
try:
result = method(*args, **kwargs) # try connection method
except Exception: # error in query
if transaction: # inside a transaction
self._transaction = False
raise # propagate the error
elif self._con.db.status: # if it was not a connection problem
raise # then propagate the error
else: # otherwise
self.reset() # reset the connection
result = method(*args, **kwargs) # and try one more time
self._usage += 1
return result
return tough_method | [
"def",
"_get_tough_method",
"(",
"self",
",",
"method",
")",
":",
"def",
"tough_method",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"transaction",
"=",
"self",
".",
"_transaction",
"if",
"not",
"transaction",
":",
"try",
":",
"# check whether con... | 46.393939 | 16.818182 |
def load_classifier():
"""Train the intent classifier."""
path = os.path.join(l.TOPDIR, 'clf.pickle')
obj = pickle.load(open(path, 'r'))
return obj['tfidf_model'], obj['clf'], obj['target_names'] | [
"def",
"load_classifier",
"(",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"l",
".",
"TOPDIR",
",",
"'clf.pickle'",
")",
"obj",
"=",
"pickle",
".",
"load",
"(",
"open",
"(",
"path",
",",
"'r'",
")",
")",
"return",
"obj",
"[",
"'tf... | 34.5 | 14.833333 |
def is_terminal(self, symbol: str) -> bool:
"""
This function will be called on nodes of a logical form tree, which are either non-terminal
symbols that can be expanded or terminal symbols that must be leaf nodes. Returns ``True``
if the given symbol is a terminal symbol.
"""
# We special-case 'lambda' here because it behaves weirdly in action sequences.
return (symbol in self.global_name_mapping or
symbol in self.local_name_mapping or
'lambda' in symbol) | [
"def",
"is_terminal",
"(",
"self",
",",
"symbol",
":",
"str",
")",
"->",
"bool",
":",
"# We special-case 'lambda' here because it behaves weirdly in action sequences.",
"return",
"(",
"symbol",
"in",
"self",
".",
"global_name_mapping",
"or",
"symbol",
"in",
"self",
".... | 53.9 | 20.7 |
def namespaced_function(function, global_dict, defaults=None, preserve_context=False):
'''
Redefine (clone) a function under a different globals() namespace scope
preserve_context:
Allow keeping the context taken from orignal namespace,
and extend it with globals() taken from
new targetted namespace.
'''
if defaults is None:
defaults = function.__defaults__
if preserve_context:
_global_dict = function.__globals__.copy()
_global_dict.update(global_dict)
global_dict = _global_dict
new_namespaced_function = types.FunctionType(
function.__code__,
global_dict,
name=function.__name__,
argdefs=defaults,
closure=function.__closure__
)
new_namespaced_function.__dict__.update(function.__dict__)
return new_namespaced_function | [
"def",
"namespaced_function",
"(",
"function",
",",
"global_dict",
",",
"defaults",
"=",
"None",
",",
"preserve_context",
"=",
"False",
")",
":",
"if",
"defaults",
"is",
"None",
":",
"defaults",
"=",
"function",
".",
"__defaults__",
"if",
"preserve_context",
"... | 34.16 | 18.64 |
def run_vcs_tool(path, action):
"""If path is a valid VCS repository, run the corresponding VCS tool
Supported VCS actions: 'commit', 'browse'
Return False if the VCS tool is not installed"""
info = get_vcs_info(get_vcs_root(path))
tools = info['actions'][action]
for tool, args in tools:
if programs.find_program(tool):
if not running_under_pytest():
programs.run_program(tool, args, cwd=path)
else:
return True
return
else:
cmdnames = [name for name, args in tools]
raise ActionToolNotFound(info['name'], action, cmdnames) | [
"def",
"run_vcs_tool",
"(",
"path",
",",
"action",
")",
":",
"info",
"=",
"get_vcs_info",
"(",
"get_vcs_root",
"(",
"path",
")",
")",
"tools",
"=",
"info",
"[",
"'actions'",
"]",
"[",
"action",
"]",
"for",
"tool",
",",
"args",
"in",
"tools",
":",
"if... | 40.25 | 10.875 |
def motif3funct_wei(W):
'''
Functional motifs are subsets of connection patterns embedded within
anatomical motifs. Motif frequency is the frequency of occurrence of
motifs around a node. Motif intensity and coherence are weighted
generalizations of motif frequency.
Parameters
----------
W : NxN np.ndarray
weighted directed connection matrix (all weights between 0 and 1)
Returns
-------
I : 13xN np.ndarray
motif intensity matrix
Q : 13xN np.ndarray
motif coherence matrix
F : 13xN np.ndarray
motif frequency matrix
Notes
-----
Average intensity and coherence are given by I./F and Q./F.
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3 = mot['m3']
id3 = mot['id3'].squeeze()
n3 = mot['n3'].squeeze()
n = len(W)
I = np.zeros((13, n)) # intensity
Q = np.zeros((13, n)) # coherence
F = np.zeros((13, n)) # frequency
A = binarize(W, copy=True) # create binary adjmat
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, v2]))
ix = (np.dot(m3, a) == n3)
m = np.sum(ix)
w = np.array((W[v1, u], W[v2, u], W[u, v1],
W[v2, v1], W[u, v2], W[v1, v2]))
M = m3[ix, :] * np.tile(w, (m, 1))
id = id3[ix] - 1
l = n3[ix]
x = np.sum(M, axis=1) / l # arithmetic mean
M[M == 0] = 1 # enable geometric mean
i = np.prod(M, axis=1)**(1 / l) # intensity
q = i / x # coherence
# unique motif occurrences
idu, jx = np.unique(id, return_index=True)
jx = np.append((0,), jx + 1)
mu = len(idu) # number of unique motifs
i2, q2, f2 = np.zeros((3, mu))
for h in range(mu):
i2[h] = np.sum(i[jx[h] + 1:jx[h + 1] + 1])
q2[h] = np.sum(q[jx[h] + 1:jx[h + 1] + 1])
f2[h] = jx[h + 1] - jx[h]
# then add to cumulative count
I[idu, u] += i2
I[idu, v1] += i2
I[idu, v2] += i2
Q[idu, u] += q2
Q[idu, v1] += q2
Q[idu, v2] += q2
F[idu, u] += f2
F[idu, v1] += f2
F[idu, v2] += f2
return I, Q, F | [
"def",
"motif3funct_wei",
"(",
"W",
")",
":",
"from",
"scipy",
"import",
"io",
"import",
"os",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"motiflib",
")",
"mot",
"=",
"io",
".",
... | 33.01087 | 18.728261 |
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, str):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
elif isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
else:
raise ValueError("absorbing_atom must be either specie symbol or site index") | [
"def",
"get_absorbing_atom_symbol_index",
"(",
"absorbing_atom",
",",
"structure",
")",
":",
"if",
"isinstance",
"(",
"absorbing_atom",
",",
"str",
")",
":",
"return",
"absorbing_atom",
",",
"structure",
".",
"indices_from_symbol",
"(",
"absorbing_atom",
")",
"[",
... | 36.294118 | 21.705882 |
def _coerceSingleRepetition(self, dataSet):
"""
Make a new liveform with our parameters, and get it to coerce our data
for us.
"""
# make a liveform because there is some logic in _coerced
form = LiveForm(lambda **k: None, self.parameters, self.name)
return form.fromInputs(dataSet) | [
"def",
"_coerceSingleRepetition",
"(",
"self",
",",
"dataSet",
")",
":",
"# make a liveform because there is some logic in _coerced",
"form",
"=",
"LiveForm",
"(",
"lambda",
"*",
"*",
"k",
":",
"None",
",",
"self",
".",
"parameters",
",",
"self",
".",
"name",
")... | 41.375 | 15.125 |
def alias_exists(aliases, indices=None, hosts=None, profile=None):
'''
Return a boolean indicating whether given alias exists
indices
Single or multiple indices separated by comma, use _all to perform the operation on all indices.
aliases
Alias names separated by comma
CLI example::
salt myminion elasticsearch.alias_exists None testindex
'''
es = _get_instance(hosts, profile)
try:
return es.indices.exists_alias(name=aliases, index=indices)
except elasticsearch.exceptions.NotFoundError:
return False
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot get alias {0} in index {1}, server returned code {2} with message {3}".format(aliases, indices, e.status_code, e.error)) | [
"def",
"alias_exists",
"(",
"aliases",
",",
"indices",
"=",
"None",
",",
"hosts",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"es",
"=",
"_get_instance",
"(",
"hosts",
",",
"profile",
")",
"try",
":",
"return",
"es",
".",
"indices",
".",
"exi... | 38.7 | 31 |
def spkpos(targ, et, ref, abcorr, obs):
"""
Return the position of a target body relative to an observing
body, optionally corrected for light time (planetary aberration)
and stellar aberration.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkpos_c.html
:param targ: Target body name.
:type targ: str
:param et: Observer epoch.
:type et: Union[float,Iterable[float]]
:param ref: Reference frame of output position vector.
:type ref: str
:param abcorr: Aberration correction flag.
:type abcorr: str
:param obs: Observing body name.
:type obs: str
:return:
Position of target,
One way light time between observer and target.
:rtype: tuple
"""
targ = stypes.stringToCharP(targ)
ref = stypes.stringToCharP(ref)
abcorr = stypes.stringToCharP(abcorr)
obs = stypes.stringToCharP(obs)
ptarg = stypes.emptyDoubleVector(3)
lt = ctypes.c_double()
if hasattr(et, "__iter__"):
ptargs = []
lts = []
for t in et:
libspice.spkpos_c(targ, t, ref, abcorr, obs, ptarg, ctypes.byref(lt))
checkForSpiceError(None)
ptargs.append(stypes.cVectorToPython(ptarg))
lts.append(lt.value)
return ptargs, lts
else:
libspice.spkpos_c(targ, et, ref, abcorr, obs, ptarg, ctypes.byref(lt))
return stypes.cVectorToPython(ptarg), lt.value | [
"def",
"spkpos",
"(",
"targ",
",",
"et",
",",
"ref",
",",
"abcorr",
",",
"obs",
")",
":",
"targ",
"=",
"stypes",
".",
"stringToCharP",
"(",
"targ",
")",
"ref",
"=",
"stypes",
".",
"stringToCharP",
"(",
"ref",
")",
"abcorr",
"=",
"stypes",
".",
"str... | 34.170732 | 15.97561 |
def get_default_config(self):
""" Returns the default collector settings
"""
config = super(IPCollector, self).get_default_config()
config.update({
'path': 'ip',
'allowed_names': 'InAddrErrors, InDelivers, InDiscards, ' +
'InHdrErrors, InReceives, InUnknownProtos, OutDiscards, ' +
'OutNoRoutes, OutRequests'
})
return config | [
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"IPCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'path'",
":",
"'ip'",
",",
"'allowed_names'",
":",
"'InAddrErrors, InDe... | 37.363636 | 16.181818 |
def loadtitlefont(self):
"""Auxiliary method to load font if not yet done."""
if self.titlefont == None:
# print 'the bloody fonts dir is????', fontsdir
# print 'pero esto que hace??', os.path.join(fontsdir, "courR18.pil")
# /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts/f2n_fonts/courR18.pil
# /home/vital/Workspace/pyResources/Scientific_Lib/f2n_fonts
self.titlefont = imft.load_path(os.path.join(fontsdir, "courR18.pil")) | [
"def",
"loadtitlefont",
"(",
"self",
")",
":",
"if",
"self",
".",
"titlefont",
"==",
"None",
":",
"# print 'the bloody fonts dir is????', fontsdir",
"# print 'pero esto que hace??', os.path.join(fontsdir, \"courR18.pil\")",
"# /home/vital/Workspace/p... | 63.25 | 26.25 |
def writeObject(self, obj, output, setReferencePosition=False):
"""Serializes the given object to the output. Returns output.
If setReferencePosition is True, will set the position the
object was written.
"""
def proc_variable_length(format, length):
result = b''
if length > 0b1110:
result += pack('!B', (format << 4) | 0b1111)
result = self.writeObject(length, result)
else:
result += pack('!B', (format << 4) | length)
return result
def timedelta_total_seconds(td):
# Shim for Python 2.6 compatibility, which doesn't have total_seconds.
# Make one argument a float to ensure the right calculation.
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10.0**6) / 10.0**6
if setReferencePosition:
self.referencePositions[obj] = len(output)
if obj is None:
output += pack('!B', 0b00000000)
elif isinstance(obj, BoolWrapper):
if obj.value is False:
output += pack('!B', 0b00001000)
else:
output += pack('!B', 0b00001001)
elif isinstance(obj, Uid):
size = self.intSize(obj.integer)
output += pack('!B', (0b1000 << 4) | size - 1)
output += self.binaryInt(obj.integer)
elif isinstance(obj, (int, long)):
byteSize = self.intSize(obj)
root = math.log(byteSize, 2)
output += pack('!B', (0b0001 << 4) | int(root))
output += self.binaryInt(obj, as_number=True)
elif isinstance(obj, FloatWrapper):
# just use doubles
output += pack('!B', (0b0010 << 4) | 3)
output += self.binaryReal(obj)
elif isinstance(obj, datetime.datetime):
try:
timestamp = (obj - apple_reference_date).total_seconds()
except AttributeError:
timestamp = timedelta_total_seconds(obj - apple_reference_date)
output += pack('!B', 0b00110011)
output += pack('!d', float(timestamp))
elif isinstance(obj, Data):
output += proc_variable_length(0b0100, len(obj))
output += obj
elif isinstance(obj, StringWrapper):
output += proc_variable_length(obj.encodingMarker, len(obj))
output += obj.encodedValue
elif isinstance(obj, bytes):
output += proc_variable_length(0b0101, len(obj))
output += obj
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, (set, list, tuple)):
if isinstance(obj, set):
output += proc_variable_length(0b1100, len(obj))
else:
output += proc_variable_length(0b1010, len(obj))
objectsToWrite = []
for objRef in sorted(obj) if isinstance(obj, set) else obj:
(isNew, output) = self.writeObjectReference(objRef, output)
if isNew:
objectsToWrite.append(objRef)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
elif isinstance(obj, dict):
output += proc_variable_length(0b1101, len(obj))
keys = []
values = []
objectsToWrite = []
for key, value in sorted(iteritems(obj)):
keys.append(key)
values.append(value)
for key in keys:
(isNew, output) = self.writeObjectReference(key, output)
if isNew:
objectsToWrite.append(key)
for value in values:
(isNew, output) = self.writeObjectReference(value, output)
if isNew:
objectsToWrite.append(value)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
return output | [
"def",
"writeObject",
"(",
"self",
",",
"obj",
",",
"output",
",",
"setReferencePosition",
"=",
"False",
")",
":",
"def",
"proc_variable_length",
"(",
"format",
",",
"length",
")",
":",
"result",
"=",
"b''",
"if",
"length",
">",
"0b1110",
":",
"result",
... | 45 | 15.391304 |
def display(self, content = None, **settings):
"""
Perform widget rendering and output the result.
"""
lines = self.render(content, **settings)
for l in lines:
print(l) | [
"def",
"display",
"(",
"self",
",",
"content",
"=",
"None",
",",
"*",
"*",
"settings",
")",
":",
"lines",
"=",
"self",
".",
"render",
"(",
"content",
",",
"*",
"*",
"settings",
")",
"for",
"l",
"in",
"lines",
":",
"print",
"(",
"l",
")"
] | 30.571429 | 9.428571 |
def LinearContrast(alpha=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_linear`.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the
difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform contrast adjustment by linearly scaling the distance to 128.
"""
params1d = [
iap.handle_continuous_param(alpha, "alpha", value_range=None, tuple_to_uniform=True, list_to_choice=True)
]
func = adjust_contrast_linear
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32",
"int8", "int16", "int32",
"float16", "float32", "float64"],
dtypes_disallowed=["uint64", "int64", "float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
) | [
"def",
"LinearContrast",
"(",
"alpha",
"=",
"1",
",",
"per_channel",
"=",
"False",
",",
"name",
"=",
"None",
",",
"deterministic",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"params1d",
"=",
"[",
"iap",
".",
"handle_continuous_param",
"(",
... | 44.480769 | 30.673077 |
def prepack(self, namedstruct, skip_self=False, skip_sub=False):
'''
Run prepack
'''
if not skip_sub and hasattr(namedstruct, self.name) and hasattr(self.basetypeparser, 'fullprepack'):
self.basetypeparser.fullprepack(getattr(namedstruct, self.name))
Parser.prepack(self, namedstruct, skip_self, skip_sub) | [
"def",
"prepack",
"(",
"self",
",",
"namedstruct",
",",
"skip_self",
"=",
"False",
",",
"skip_sub",
"=",
"False",
")",
":",
"if",
"not",
"skip_sub",
"and",
"hasattr",
"(",
"namedstruct",
",",
"self",
".",
"name",
")",
"and",
"hasattr",
"(",
"self",
"."... | 50.142857 | 32.714286 |
def config_filename(filename):
"""
Obtains the first filename found that is included in one of the configuration folders.
This function returs the full path for the file.
* It is useful for files that are not config-formatted (e.g. hosts files, json, etc.)
that will be read using other mechanisms
"""
global _ETC_PATHS
if filename.startswith('/'):
_LOGGER.info("using absolute path for filename \"%s\"" % filename)
return filename
import os.path
for fpath in _ETC_PATHS:
current_path = "%s/%s" % (fpath, filename)
if os.path.isfile(current_path):
current_path = os.path.realpath(current_path)
_LOGGER.info("using path \"%s\" for filename \"%s\"" % (current_path, filename))
return current_path
_LOGGER.info("using path \"%s\" for filename \"%s\"" % (filename, filename))
return filename | [
"def",
"config_filename",
"(",
"filename",
")",
":",
"global",
"_ETC_PATHS",
"if",
"filename",
".",
"startswith",
"(",
"'/'",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"using absolute path for filename \\\"%s\\\"\"",
"%",
"filename",
")",
"return",
"filename",
"imp... | 39.565217 | 22.434783 |
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level) | [
"def",
"geo_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"level",
"==",
"'county'",
":",
"return",
"str",
"(",
"self",
".",
"county_name",
")",
"elif",
"self",
".",
"level",
"==",
"'state'",
":",
"return",
"self",
".",
"state_name",
"else",
":",
... | 28.363636 | 19.454545 |
def circular(cls, shape, pixel_scale, radius_arcsec, centre=(0., 0.), invert=False):
"""Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre.
Parameters
----------
shape: (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scale: float
The arc-second to pixel conversion factor of each pixel.
radius_arcsec : float
The radius (in arc seconds) of the circle within which pixels unmasked.
centre: (float, float)
The centre of the circle used to mask pixels.
"""
mask = mask_util.mask_circular_from_shape_pixel_scale_and_radius(shape, pixel_scale, radius_arcsec,
centre)
if invert: mask = np.invert(mask)
return cls(array=mask.astype('bool'), pixel_scale=pixel_scale) | [
"def",
"circular",
"(",
"cls",
",",
"shape",
",",
"pixel_scale",
",",
"radius_arcsec",
",",
"centre",
"=",
"(",
"0.",
",",
"0.",
")",
",",
"invert",
"=",
"False",
")",
":",
"mask",
"=",
"mask_util",
".",
"mask_circular_from_shape_pixel_scale_and_radius",
"("... | 50.722222 | 23.5 |
def _jobresult(self, jobid, json=True, headers=None):
"""Poll the async job result.
To be run via in a Thread, the result is put within
the result list which is a hack.
"""
failures = 0
total_time = self.job_timeout or 2**30
remaining = timedelta(seconds=total_time)
endtime = datetime.now() + remaining
while remaining.total_seconds() > 0:
timeout = max(min(self.timeout, remaining.total_seconds()), 1)
try:
kind, params = self._prepare_request('queryAsyncJobResult',
jobid=jobid)
transform(params)
params['signature'] = self._sign(params)
req = requests.Request(self.method,
self.endpoint,
headers=headers,
**{kind: params})
prepped = req.prepare()
if self.trace:
print(prepped.method, prepped.url, file=sys.stderr)
if prepped.headers:
print(prepped.headers, "\n", file=sys.stderr)
if prepped.body:
print(prepped.body, file=sys.stderr)
else:
print(file=sys.stderr)
with requests.Session() as session:
response = session.send(prepped,
timeout=timeout,
verify=self.verify,
cert=self.cert)
j = self._response_value(response, json)
if self.trace:
print(response.status_code, response.reason,
file=sys.stderr)
headersTrace = "\n".join(
"{}: {}".format(k, v)
for k, v in response.headers.items())
print(headersTrace, "\n", file=sys.stderr)
print(response.text, "\n", file=sys.stderr)
failures = 0
if j['jobstatus'] != PENDING:
if j['jobresultcode'] or j['jobstatus'] != SUCCESS:
raise CloudStackException("Job failure",
response=response)
if 'jobresult' not in j:
raise CloudStackException("Unknown job result",
response=response)
return j['jobresult']
except CloudStackException:
raise
except Exception as e:
failures += 1
if failures > 10:
raise e
time.sleep(self.poll_interval)
remaining = endtime - datetime.now()
if response:
response.status_code = 408
raise CloudStackException("Timeout waiting for async job result",
jobid,
response=response) | [
"def",
"_jobresult",
"(",
"self",
",",
"jobid",
",",
"json",
"=",
"True",
",",
"headers",
"=",
"None",
")",
":",
"failures",
"=",
"0",
"total_time",
"=",
"self",
".",
"job_timeout",
"or",
"2",
"**",
"30",
"remaining",
"=",
"timedelta",
"(",
"seconds",
... | 38.382716 | 19.098765 |
def taskfileinfo_task_data(tfi, role):
"""Return the data for task
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the task
:rtype: depending on role
:raises: None
"""
task = tfi.task
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return task.name | [
"def",
"taskfileinfo_task_data",
"(",
"tfi",
",",
"role",
")",
":",
"task",
"=",
"tfi",
".",
"task",
"if",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"DisplayRole",
"or",
"role",
"==",
"QtCore",
".",
"Qt",
".",
"EditRole",
":",
"return",
"task",
".",
"n... | 33.142857 | 15.142857 |
def delete_answer(self, answer_id):
"""Deletes the ``Answer`` identified by the given ``Id``.
arg: answer_id (osid.id.Id): the ``Id`` of the ``Answer`` to
delete
raise: NotFound - an ``Answer`` was not found identified by the
given ``Id``
raise: NullArgument - ``answer_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.repository.AssetAdminSession.delete_asset_content_template
from dlkit.abstract_osid.id.primitives import Id as ABCId
from .objects import Answer
collection = JSONClientValidated('assessment',
collection='Item',
runtime=self._runtime)
if not isinstance(answer_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
item = collection.find_one({'answers._id': ObjectId(answer_id.get_identifier())})
index = 0
found = False
for i in item['answers']:
if i['_id'] == ObjectId(answer_id.get_identifier()):
answer_map = item['answers'].pop(index)
index += 1
found = True
if not found:
raise errors.OperationFailed()
Answer(
osid_object_map=answer_map,
runtime=self._runtime,
proxy=self._proxy)._delete()
collection.save(item) | [
"def",
"delete_answer",
"(",
"self",
",",
"answer_id",
")",
":",
"# Implemented from template for",
"# osid.repository.AssetAdminSession.delete_asset_content_template",
"from",
"dlkit",
".",
"abstract_osid",
".",
"id",
".",
"primitives",
"import",
"Id",
"as",
"ABCId",
"fr... | 42.315789 | 18.526316 |
def yaml_tag_constructor(loader, tag, node):
"""convert shorthand intrinsic function to full name
"""
def _f(loader, tag, node):
if tag == '!GetAtt':
return node.value.split('.')
elif type(node) == yaml.SequenceNode:
return loader.construct_sequence(node)
else:
return node.value
if tag == '!Ref':
key = 'Ref'
else:
key = 'Fn::{}'.format(tag[1:])
return {key: _f(loader, tag, node)} | [
"def",
"yaml_tag_constructor",
"(",
"loader",
",",
"tag",
",",
"node",
")",
":",
"def",
"_f",
"(",
"loader",
",",
"tag",
",",
"node",
")",
":",
"if",
"tag",
"==",
"'!GetAtt'",
":",
"return",
"node",
".",
"value",
".",
"split",
"(",
"'.'",
")",
"eli... | 27.529412 | 13.705882 |
def render_html(self, obj, context=None):
"""
Generate the 'html' attribute of an oembed resource using a template.
Sort of a corollary to the parser's render_oembed method. By default,
the current mapping will be passed in as the context.
OEmbed templates are stored in:
oembed/provider/[app_label]_[model].html
-- or --
oembed/provider/media_video.html
"""
provided_context = context or Context()
context = RequestContext(mock_request())
context.update(provided_context)
context.push()
context[self._meta.context_varname] = obj
rendered = render_to_string(self._meta.template_name, context)
context.pop()
return rendered | [
"def",
"render_html",
"(",
"self",
",",
"obj",
",",
"context",
"=",
"None",
")",
":",
"provided_context",
"=",
"context",
"or",
"Context",
"(",
")",
"context",
"=",
"RequestContext",
"(",
"mock_request",
"(",
")",
")",
"context",
".",
"update",
"(",
"pro... | 34.347826 | 17.304348 |
def set_string(self, string_options):
"""Set a series of properties using a string.
For example::
'fred=12, tile'
'[fred=12]'
"""
vo = ffi.cast('VipsObject *', self.pointer)
cstr = _to_bytes(string_options)
result = vips_lib.vips_object_set_from_string(vo, cstr)
return result == 0 | [
"def",
"set_string",
"(",
"self",
",",
"string_options",
")",
":",
"vo",
"=",
"ffi",
".",
"cast",
"(",
"'VipsObject *'",
",",
"self",
".",
"pointer",
")",
"cstr",
"=",
"_to_bytes",
"(",
"string_options",
")",
"result",
"=",
"vips_lib",
".",
"vips_object_se... | 23.466667 | 20 |
def unique_(self, col):
"""
Returns unique values in a column
"""
try:
df = self.df.drop_duplicates(subset=[col], inplace=False)
return list(df[col])
except Exception as e:
self.err(e, "Can not select unique data") | [
"def",
"unique_",
"(",
"self",
",",
"col",
")",
":",
"try",
":",
"df",
"=",
"self",
".",
"df",
".",
"drop_duplicates",
"(",
"subset",
"=",
"[",
"col",
"]",
",",
"inplace",
"=",
"False",
")",
"return",
"list",
"(",
"df",
"[",
"col",
"]",
")",
"e... | 31.333333 | 11.777778 |
def parallel_epd_lcdir(
lcdir,
externalparams,
lcfileglob=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
nworkers=NCPUS,
maxworkertasks=1000
):
'''This applies EPD in parallel to all LCs in a directory.
Parameters
----------
lcdir : str
The light curve directory to process.
externalparams : dict or None
This is a dict that indicates which keys in the lcdict obtained from the
lcfile correspond to the required external parameters. As with timecol,
magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound
keys ('magaperture1.mags'). The dict should look something like::
{'fsv':'<lcdict key>' array: S values for each observation,
'fdv':'<lcdict key>' array: D values for each observation,
'fkv':'<lcdict key>' array: K values for each observation,
'xcc':'<lcdict key>' array: x coords for each observation,
'ycc':'<lcdict key>' array: y coords for each observation,
'bgv':'<lcdict key>' array: sky background for each observation,
'bge':'<lcdict key>' array: sky background err for each observation,
'iha':'<lcdict key>' array: hour angle for each observation,
'izd':'<lcdict key>' array: zenith distance for each observation}
lcfileglob : str or None
A UNIX fileglob to use to select light curve files in `lcdir`. If this
is not None, the value provided will override the default fileglob for
your light curve format.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the EPD process. If these are None, the
default values for `timecols`, `magcols`, and `errcols` for your light
curve format will be used here.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
nworkers : int
The number of parallel workers to launch when processing the LCs.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before it is
replaced with a new one (sometimes helps with memory-leaks).
Returns
-------
dict
Returns a dict organized by all the keys in the input `magcols` list,
containing lists of EPD pickle light curves for that `magcol`.
Notes
-----
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# find all the files matching the lcglob in lcdir
if lcfileglob is None:
lcfileglob = fileglob
lclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))
return parallel_epd_lclist(
lclist,
externalparams,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
epdsmooth_sigclip=epdsmooth_sigclip,
epdsmooth_windowsize=epdsmooth_windowsize,
epdsmooth_func=epdsmooth_func,
epdsmooth_extraparams=epdsmooth_extraparams,
nworkers=nworkers,
maxworkertasks=maxworkertasks
) | [
"def",
"parallel_epd_lcdir",
"(",
"lcdir",
",",
"externalparams",
",",
"lcfileglob",
"=",
"None",
",",
"timecols",
"=",
"None",
",",
"magcols",
"=",
"None",
",",
"errcols",
"=",
"None",
",",
"lcformat",
"=",
"'hat-sql'",
",",
"lcformatdir",
"=",
"None",
",... | 40.175758 | 27 |
def arraylike_to_numpy(array_like):
"""Convert a 1d array-like (e.g,. list, tensor, etc.) to an np.ndarray"""
orig_type = type(array_like)
# Convert to np.ndarray
if isinstance(array_like, np.ndarray):
pass
elif isinstance(array_like, list):
array_like = np.array(array_like)
elif issparse(array_like):
array_like = array_like.toarray()
elif isinstance(array_like, torch.Tensor):
array_like = array_like.numpy()
elif not isinstance(array_like, np.ndarray):
array_like = np.array(array_like)
else:
msg = f"Input of type {orig_type} could not be converted to 1d " "np.ndarray"
raise ValueError(msg)
# Correct shape
if (array_like.ndim > 1) and (1 in array_like.shape):
array_like = array_like.flatten()
if array_like.ndim != 1:
raise ValueError("Input could not be converted to 1d np.array")
# Convert to ints
if any(array_like % 1):
raise ValueError("Input contains at least one non-integer value.")
array_like = array_like.astype(np.dtype(int))
return array_like | [
"def",
"arraylike_to_numpy",
"(",
"array_like",
")",
":",
"orig_type",
"=",
"type",
"(",
"array_like",
")",
"# Convert to np.ndarray",
"if",
"isinstance",
"(",
"array_like",
",",
"np",
".",
"ndarray",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"array_like",
... | 33.75 | 17.15625 |
def has(self, character):
'''
Get if character (or character code point) is contained by any range on
this range group.
:param character: character or unicode code point to look for
:type character: str or int
:returns: True if character is contained by any range, False otherwise
:rtype: bool
'''
if not self:
return False
character = character if isinstance(character, int) else ord(character)
last = self[-1][-1]
start, end = self[bisect.bisect_right(self, (character, last)) - 1]
return start <= character < end | [
"def",
"has",
"(",
"self",
",",
"character",
")",
":",
"if",
"not",
"self",
":",
"return",
"False",
"character",
"=",
"character",
"if",
"isinstance",
"(",
"character",
",",
"int",
")",
"else",
"ord",
"(",
"character",
")",
"last",
"=",
"self",
"[",
... | 38.5625 | 23.9375 |
def get_text_path(self):
"""
Returns the path of the directory containing text if they exist in this dataset.
"""
for res in self.dsDoc['dataResources']:
resPath = res['resPath']
resType = res['resType']
isCollection = res['isCollection']
if resType == 'text' and isCollection:
return os.path.join(self.dsHome, resPath)
# if the for loop is over and no image directory is found, then return None
raise RuntimeError('could not find learningData file the dataset') | [
"def",
"get_text_path",
"(",
"self",
")",
":",
"for",
"res",
"in",
"self",
".",
"dsDoc",
"[",
"'dataResources'",
"]",
":",
"resPath",
"=",
"res",
"[",
"'resPath'",
"]",
"resType",
"=",
"res",
"[",
"'resType'",
"]",
"isCollection",
"=",
"res",
"[",
"'is... | 43.307692 | 17.615385 |
def title(self, title=None):
"""Returns or sets (if a value is provided) the chart's title.
:param str title: If given, the chart's title will be set to this.
:rtype: ``str``"""
if title is None:
return self._title
else:
if not isinstance(title, str):
raise TypeError("title must be str, not '%s'" % str(title))
self._title = title | [
"def",
"title",
"(",
"self",
",",
"title",
"=",
"None",
")",
":",
"if",
"title",
"is",
"None",
":",
"return",
"self",
".",
"_title",
"else",
":",
"if",
"not",
"isinstance",
"(",
"title",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"title must ... | 34.5 | 18.666667 |
def tempo_account_update_customer_by_id(self, customer_id=1, data=None):
"""
Updates an Attribute. Caller must have Manage Account Permission. Attribute can be a Category or Customer.
:param customer_id: id of Customer record
:param data: format is
{
isNew:boolean
name:string
key:string
id:number
}
:return: json with parameters name, key and id.
"""
if data is None:
return """Please, set the data as { isNew:boolean
name:string
key:string
id:number }"""
url = 'rest/tempo-accounts/1/customer/{id}'.format(id=customer_id)
return self.put(url, data=data) | [
"def",
"tempo_account_update_customer_by_id",
"(",
"self",
",",
"customer_id",
"=",
"1",
",",
"data",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"return",
"\"\"\"Please, set the data as { isNew:boolean\n name:string\... | 45 | 14.35 |
def parse_phone(phone):
"""Parses the given phone, or returns ``None`` if it's invalid."""
if isinstance(phone, int):
return str(phone)
else:
phone = re.sub(r'[+()\s-]', '', str(phone))
if phone.isdigit():
return phone | [
"def",
"parse_phone",
"(",
"phone",
")",
":",
"if",
"isinstance",
"(",
"phone",
",",
"int",
")",
":",
"return",
"str",
"(",
"phone",
")",
"else",
":",
"phone",
"=",
"re",
".",
"sub",
"(",
"r'[+()\\s-]'",
",",
"''",
",",
"str",
"(",
"phone",
")",
... | 32.375 | 14.125 |
def _asString(self, value):
"""converts the value as a string"""
if sys.version_info[0] == 3:
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode('utf-8')
elif sys.version_info[0] == 2:
return value.encode('ascii') | [
"def",
"_asString",
"(",
"self",
",",
"value",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"bytes",
"... | 37.444444 | 4.333333 |
def extract_variants(pattern):
"""Extract the pattern variants (ie. {foo,bar}baz = foobaz or barbaz)."""
v1, v2 = pattern.find('{'), pattern.find('}')
if v1 > -1 and v2 > v1:
variations = pattern[v1+1:v2].split(',')
variants = [pattern[:v1] + v + pattern[v2+1:] for v in variations]
else:
variants = [pattern]
return list(_deduplicate(variants)) | [
"def",
"extract_variants",
"(",
"pattern",
")",
":",
"v1",
",",
"v2",
"=",
"pattern",
".",
"find",
"(",
"'{'",
")",
",",
"pattern",
".",
"find",
"(",
"'}'",
")",
"if",
"v1",
">",
"-",
"1",
"and",
"v2",
">",
"v1",
":",
"variations",
"=",
"pattern"... | 42.333333 | 13.111111 |
def __get_doc_block_lines(self):
"""
Returns the start and end line of the DOcBlock of the stored routine code.
"""
line1 = None
line2 = None
i = 0
for line in self._routine_source_code_lines:
if re.match(r'\s*/\*\*', line):
line1 = i
if re.match(r'\s*\*/', line):
line2 = i
if self._is_start_of_stored_routine(line):
break
i += 1
return line1, line2 | [
"def",
"__get_doc_block_lines",
"(",
"self",
")",
":",
"line1",
"=",
"None",
"line2",
"=",
"None",
"i",
"=",
"0",
"for",
"line",
"in",
"self",
".",
"_routine_source_code_lines",
":",
"if",
"re",
".",
"match",
"(",
"r'\\s*/\\*\\*'",
",",
"line",
")",
":",... | 23.571429 | 20.52381 |
def _from_dict(cls, _dict):
"""Initialize a Tables object from a json dictionary."""
args = {}
if 'location' in _dict:
args['location'] = Location._from_dict(_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'section_title' in _dict:
args['section_title'] = SectionTitle._from_dict(
_dict.get('section_title'))
if 'table_headers' in _dict:
args['table_headers'] = [
TableHeaders._from_dict(x) for x in (_dict.get('table_headers'))
]
if 'row_headers' in _dict:
args['row_headers'] = [
RowHeaders._from_dict(x) for x in (_dict.get('row_headers'))
]
if 'column_headers' in _dict:
args['column_headers'] = [
ColumnHeaders._from_dict(x)
for x in (_dict.get('column_headers'))
]
if 'key_value_pairs' in _dict:
args['key_value_pairs'] = [
KeyValuePair._from_dict(x)
for x in (_dict.get('key_value_pairs'))
]
if 'body_cells' in _dict:
args['body_cells'] = [
BodyCells._from_dict(x) for x in (_dict.get('body_cells'))
]
return cls(**args) | [
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'location'",
"in",
"_dict",
":",
"args",
"[",
"'location'",
"]",
"=",
"Location",
".",
"_from_dict",
"(",
"_dict",
".",
"get",
"(",
"'location'",
")",
")",
"if",
... | 39.333333 | 13.757576 |
def export_to_pem(self, private_key=False, password=False):
"""Exports keys to a data buffer suitable to be stored as a PEM file.
Either the public or the private key can be exported to a PEM file.
For private keys the PKCS#8 format is used. If a password is provided
the best encryption method available as determined by the cryptography
module is used to wrap the key.
:param private_key: Whether the private key should be exported.
Defaults to `False` which means the public key is exported by default.
:param password(bytes): A password for wrapping the private key.
Defaults to False which will cause the operation to fail. To avoid
encryption the user must explicitly pass None, otherwise the user
needs to provide a password in a bytes buffer.
"""
e = serialization.Encoding.PEM
if private_key:
if not self.has_private:
raise InvalidJWKType("No private key available")
f = serialization.PrivateFormat.PKCS8
if password is None:
a = serialization.NoEncryption()
elif isinstance(password, bytes):
a = serialization.BestAvailableEncryption(password)
elif password is False:
raise ValueError("The password must be None or a bytes string")
else:
raise TypeError("The password string must be bytes")
return self._get_private_key().private_bytes(
encoding=e, format=f, encryption_algorithm=a)
else:
if not self.has_public:
raise InvalidJWKType("No public key available")
f = serialization.PublicFormat.SubjectPublicKeyInfo
return self._get_public_key().public_bytes(encoding=e, format=f) | [
"def",
"export_to_pem",
"(",
"self",
",",
"private_key",
"=",
"False",
",",
"password",
"=",
"False",
")",
":",
"e",
"=",
"serialization",
".",
"Encoding",
".",
"PEM",
"if",
"private_key",
":",
"if",
"not",
"self",
".",
"has_private",
":",
"raise",
"Inva... | 53.264706 | 20.794118 |
def plot_T_dependent_property(self, Tmin=None, Tmax=None, methods=[],
pts=50, only_valid=True, order=0): # pragma: no cover
r'''Method to create a plot of the property vs temperature according to
either a specified list of methods, or user methods (if set), or all
methods. User-selectable number of points, and temperature range. If
only_valid is set,`test_method_validity` will be used to check if each
temperature in the specified range is valid, and
`test_property_validity` will be used to test the answer, and the
method is allowed to fail; only the valid points will be plotted.
Otherwise, the result will be calculated and displayed as-is. This will
not suceed if the method fails.
Parameters
----------
Tmin : float
Minimum temperature, to begin calculating the property, [K]
Tmax : float
Maximum temperature, to stop calculating the property, [K]
methods : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at; if Tmin to Tmax
covers a wide range of method validities, only a few points may end
up calculated for a given method so this may need to be large
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
'''
# This function cannot be tested
if not has_matplotlib:
raise Exception('Optional dependency matplotlib is required for plotting')
if Tmin is None:
if self.Tmin is not None:
Tmin = self.Tmin
else:
raise Exception('Minimum temperature could not be auto-detected; please provide it')
if Tmax is None:
if self.Tmax is not None:
Tmax = self.Tmax
else:
raise Exception('Maximum temperature could not be auto-detected; please provide it')
if not methods:
if self.user_methods:
methods = self.user_methods
else:
methods = self.all_methods
Ts = np.linspace(Tmin, Tmax, pts)
if order == 0:
for method in methods:
if only_valid:
properties, Ts2 = [], []
for T in Ts:
if self.test_method_validity(T, method):
try:
p = self.calculate(T=T, method=method)
if self.test_property_validity(p):
properties.append(p)
Ts2.append(T)
except:
pass
plt.semilogy(Ts2, properties, label=method)
else:
properties = [self.calculate(T=T, method=method) for T in Ts]
plt.semilogy(Ts, properties, label=method)
plt.ylabel(self.name + ', ' + self.units)
plt.title(self.name + ' of ' + self.CASRN)
elif order > 0:
for method in methods:
if only_valid:
properties, Ts2 = [], []
for T in Ts:
if self.test_method_validity(T, method):
try:
p = self.calculate_derivative(T=T, method=method, order=order)
properties.append(p)
Ts2.append(T)
except:
pass
plt.semilogy(Ts2, properties, label=method)
else:
properties = [self.calculate_derivative(T=T, method=method, order=order) for T in Ts]
plt.semilogy(Ts, properties, label=method)
plt.ylabel(self.name + ', ' + self.units + '/K^%d derivative of order %d' % (order, order))
plt.title(self.name + ' derivative of order %d' % order + ' of ' + self.CASRN)
plt.legend(loc='best')
plt.xlabel('Temperature, K')
plt.show() | [
"def",
"plot_T_dependent_property",
"(",
"self",
",",
"Tmin",
"=",
"None",
",",
"Tmax",
"=",
"None",
",",
"methods",
"=",
"[",
"]",
",",
"pts",
"=",
"50",
",",
"only_valid",
"=",
"True",
",",
"order",
"=",
"0",
")",
":",
"# pragma: no cover",
"# This f... | 48.269663 | 21.280899 |
def _get_what_to_read_next(fp, previously_read_position, chunk_size):
"""Return information on which file pointer position to read from and how many bytes.
Args:
fp
past_read_positon (int): The file pointer position that has been read previously
chunk_size(int): ideal io chunk_size
Returns:
(int, int): The next seek position, how many bytes to read next
"""
seek_position = max(previously_read_position - chunk_size, 0)
read_size = chunk_size
# examples: say, our new_lines are potentially "\r\n", "\n", "\r"
# find a reading point where it is not "\n", rewind further if necessary
# if we have "\r\n" and we read in "\n",
# the next iteration would treat "\r" as a different new line.
# Q: why don't I just check if it is b"\n", but use a function ?
# A: so that we can potentially expand this into generic sets of separators, later on.
while seek_position > 0:
fp.seek(seek_position)
if _is_partially_read_new_line(fp.read(1)):
seek_position -= 1
read_size += 1 # as we rewind further, let's make sure we read more to compensate
else:
break
# take care of special case when we are back to the beginnin of the file
read_size = min(previously_read_position - seek_position, read_size)
return seek_position, read_size | [
"def",
"_get_what_to_read_next",
"(",
"fp",
",",
"previously_read_position",
",",
"chunk_size",
")",
":",
"seek_position",
"=",
"max",
"(",
"previously_read_position",
"-",
"chunk_size",
",",
"0",
")",
"read_size",
"=",
"chunk_size",
"# examples: say, our new_lines are ... | 43.516129 | 25.580645 |
def get_routing_tuples(cls):
'''A generator of (rule, callback) tuples.'''
for callback in cls.callbacks:
ep_name = '{}.{}'.format(cls.api.__name__, callback.__name__)
yield (Rule(cls.endpoint_path,
endpoint=ep_name,
methods=callback.swagger_ops),
callback) | [
"def",
"get_routing_tuples",
"(",
"cls",
")",
":",
"for",
"callback",
"in",
"cls",
".",
"callbacks",
":",
"ep_name",
"=",
"'{}.{}'",
".",
"format",
"(",
"cls",
".",
"api",
".",
"__name__",
",",
"callback",
".",
"__name__",
")",
"yield",
"(",
"Rule",
"(... | 44.625 | 11.125 |
def full_task(self, token_id, presented_pronunciation, pronunciation, pronunciation_probability,
warn=True, default=True):
"""Provide the prediction of the full task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is provided
:param pronunciation: The pronunciation for which the prediction is being made (as a list of strings
or space separated string)
:param pronunciation_probability: The probability of the pronunciation for the given token
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
"""
if pronunciation_probability is not None and not 0. < pronunciation_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to pronunciation [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(pronunciation_probability,
pronunciation,
token_id))
key = pronunciation
if isinstance(key, list):
if not all([isinstance(phoneme, basestring) for phoneme in key]):
raise ValueError('The pronunciation must be of type string (a sequence of space separated phonemes) '
'or of type list (containing phonemes of type strings).'
'User supplied: {}'.format(key))
key = ' '.join(pronunciation)
default_preds = self._full_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('full', default_preds)
if key is not None:
if pronunciation_probability is not None:
self['tokens'][token_id]['full'][key] = pronunciation_probability
else:
if key in default_preds:
self['tokens'][token_id]['full'][key] = default_preds[key]
else:
self['tokens'][token_id]['full'].pop(key) | [
"def",
"full_task",
"(",
"self",
",",
"token_id",
",",
"presented_pronunciation",
",",
"pronunciation",
",",
"pronunciation_probability",
",",
"warn",
"=",
"True",
",",
"default",
"=",
"True",
")",
":",
"if",
"pronunciation_probability",
"is",
"not",
"None",
"an... | 52.244444 | 34.022222 |
def _notifications(self):
"""
Get the number of unread notifications.
"""
if not self.username or not self.auth_token:
if not self.notification_warning:
self.py3.notify_user(
"Github module needs username and "
"auth_token to check notifications."
)
self.notification_warning = True
return "?"
if self.notifications == "all" or not self.repo:
url = self.url_api + "/notifications"
else:
url = self.url_api + "/repos/" + self.repo + "/notifications"
url += "?per_page=100"
try:
info = self.py3.request(url, auth=(self.username, self.auth_token))
except (self.py3.RequestException):
return
if info.status_code == 200:
links = info.headers.get("Link")
if not links:
return len(info.json())
last_page = 1
for link in links.split(","):
if 'rel="last"' in link:
last_url = link[link.find("<") + 1 : link.find(">")]
parsed = urlparse.urlparse(last_url)
last_page = int(urlparse.parse_qs(parsed.query)["page"][0])
if last_page == 1:
return len(info.json())
try:
last_page_info = self.py3.request(
last_url, auth=(self.username, self.auth_token)
)
except self.py3.RequestException:
return
return len(info.json()) * (last_page - 1) + len(last_page_info.json())
if info.status_code == 404:
if not self.repo_warning:
self.py3.notify_user("Github repo cannot be found.")
self.repo_warning = True | [
"def",
"_notifications",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"username",
"or",
"not",
"self",
".",
"auth_token",
":",
"if",
"not",
"self",
".",
"notification_warning",
":",
"self",
".",
"py3",
".",
"notify_user",
"(",
"\"Github module needs user... | 36.795918 | 17 |
def _filter_data(self, pattern):
'''
Removes parameters which match the pattern from the config data
'''
removed = []
filtered = []
for param in self.data:
if not param[0].startswith(pattern):
filtered.append(param)
else:
removed.append(param)
self.data = filtered
return removed | [
"def",
"_filter_data",
"(",
"self",
",",
"pattern",
")",
":",
"removed",
"=",
"[",
"]",
"filtered",
"=",
"[",
"]",
"for",
"param",
"in",
"self",
".",
"data",
":",
"if",
"not",
"param",
"[",
"0",
"]",
".",
"startswith",
"(",
"pattern",
")",
":",
"... | 29.769231 | 16.230769 |
def deprecated(message=None):
"""A decorator for deprecated functions"""
def _decorator(func, message=message):
if message is None:
message = '%s is deprecated' % func.__name__
def newfunc(*args, **kwds):
warnings.warn(message, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
return newfunc
return _decorator | [
"def",
"deprecated",
"(",
"message",
"=",
"None",
")",
":",
"def",
"_decorator",
"(",
"func",
",",
"message",
"=",
"message",
")",
":",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"'%s is deprecated'",
"%",
"func",
".",
"__name__",
"def",
"newfun... | 34.909091 | 14 |
def BitVecSym(
name: str, size: int, annotations: Annotations = None
) -> z3.BitVecRef:
"""Creates a new bit vector with a symbolic value."""
return z3.BitVec(name, size) | [
"def",
"BitVecSym",
"(",
"name",
":",
"str",
",",
"size",
":",
"int",
",",
"annotations",
":",
"Annotations",
"=",
"None",
")",
"->",
"z3",
".",
"BitVecRef",
":",
"return",
"z3",
".",
"BitVec",
"(",
"name",
",",
"size",
")"
] | 38.8 | 13.8 |
def _fit_gpu(self, Ciu_host, Cui_host, show_progress=True):
""" specialized training on the gpu. copies inputs to/from cuda device """
if not implicit.cuda.HAS_CUDA:
raise ValueError("No CUDA extension has been built, can't train on GPU.")
if self.dtype == np.float64:
log.warning("Factors of dtype float64 aren't supported with gpu fitting. "
"Converting factors to float32")
self.item_factors = self.item_factors.astype(np.float32)
self.user_factors = self.user_factors.astype(np.float32)
Ciu = implicit.cuda.CuCSRMatrix(Ciu_host)
Cui = implicit.cuda.CuCSRMatrix(Cui_host)
X = implicit.cuda.CuDenseMatrix(self.user_factors.astype(np.float32))
Y = implicit.cuda.CuDenseMatrix(self.item_factors.astype(np.float32))
solver = implicit.cuda.CuLeastSquaresSolver(self.factors)
log.debug("Running %i ALS iterations", self.iterations)
with tqdm.tqdm(total=self.iterations, disable=not show_progress) as progress:
for iteration in range(self.iterations):
s = time.time()
solver.least_squares(Cui, X, Y, self.regularization, self.cg_steps)
progress.update(.5)
solver.least_squares(Ciu, Y, X, self.regularization, self.cg_steps)
progress.update(.5)
if self.fit_callback:
self.fit_callback(iteration, time.time() - s)
if self.calculate_training_loss:
loss = solver.calculate_loss(Cui, X, Y, self.regularization)
progress.set_postfix({"loss": loss})
if self.calculate_training_loss:
log.info("Final training loss %.4f", loss)
X.to_host(self.user_factors)
Y.to_host(self.item_factors) | [
"def",
"_fit_gpu",
"(",
"self",
",",
"Ciu_host",
",",
"Cui_host",
",",
"show_progress",
"=",
"True",
")",
":",
"if",
"not",
"implicit",
".",
"cuda",
".",
"HAS_CUDA",
":",
"raise",
"ValueError",
"(",
"\"No CUDA extension has been built, can't train on GPU.\"",
")",... | 47.736842 | 23.263158 |
def get_case(flags):
"""Parse flags for case sensitivity settings."""
if not bool(flags & CASE_FLAGS):
case_sensitive = util.is_case_sensitive()
elif flags & FORCECASE:
case_sensitive = True
else:
case_sensitive = False
return case_sensitive | [
"def",
"get_case",
"(",
"flags",
")",
":",
"if",
"not",
"bool",
"(",
"flags",
"&",
"CASE_FLAGS",
")",
":",
"case_sensitive",
"=",
"util",
".",
"is_case_sensitive",
"(",
")",
"elif",
"flags",
"&",
"FORCECASE",
":",
"case_sensitive",
"=",
"True",
"else",
"... | 27.7 | 15.3 |
def decoherence_noise_with_asymmetric_ro(gates: Sequence[Gate], p00=0.975, p11=0.911):
"""Similar to :py:func:`_decoherence_noise_model`, but with asymmetric readout.
For simplicity, we use the default values for T1, T2, gate times, et al. and only allow
the specification of readout fidelities.
"""
noise_model = _decoherence_noise_model(gates)
aprobs = np.array([[p00, 1 - p00],
[1 - p11, p11]])
aprobs = {q: aprobs for q in noise_model.assignment_probs.keys()}
return NoiseModel(noise_model.gates, aprobs) | [
"def",
"decoherence_noise_with_asymmetric_ro",
"(",
"gates",
":",
"Sequence",
"[",
"Gate",
"]",
",",
"p00",
"=",
"0.975",
",",
"p11",
"=",
"0.911",
")",
":",
"noise_model",
"=",
"_decoherence_noise_model",
"(",
"gates",
")",
"aprobs",
"=",
"np",
".",
"array"... | 50.363636 | 17.272727 |
def parse_cl_args(in_args):
"""Parse input commandline arguments, handling multiple cases.
Returns the main config file and set of kwargs.
"""
sub_cmds = {"upgrade": install.add_subparser,
"runfn": runfn.add_subparser,
"graph": graph.add_subparser,
"version": programs.add_subparser,
"sequencer": machine.add_subparser}
description = "Community developed high throughput sequencing analysis."
parser = argparse.ArgumentParser(description=description)
sub_cmd = None
if len(in_args) > 0 and in_args[0] in sub_cmds:
subparser_help = "bcbio-nextgen supplemental commands"
subparsers = parser.add_subparsers(help=subparser_help)
sub_cmds[in_args[0]](subparsers)
sub_cmd = in_args[0]
else:
parser.add_argument("global_config", nargs="?",
help=("Global YAML configuration file specifying "
"details about the system (optional, "
"defaults to installed bcbio_system.yaml)"))
parser.add_argument("fc_dir", nargs="?",
help=("A directory of Illumina output or fastq "
"files to process (optional)"))
parser.add_argument("run_config", nargs="*",
help=("YAML file with details about samples to "
"process (required, unless using Galaxy "
"LIMS as input)")),
parser.add_argument("-n", "--numcores", type=int, default=1,
help="Total cores to use for processing")
parser.add_argument("-t", "--paralleltype",
choices=["local", "ipython"],
default="local", help="Approach to parallelization")
parser.add_argument("-s", "--scheduler",
choices=["lsf", "sge", "torque", "slurm", "pbspro"],
help="Scheduler to use for ipython parallel")
parser.add_argument("--local_controller",
default=False,
action="store_true",
help="run controller locally")
parser.add_argument("-q", "--queue",
help=("Scheduler queue to run jobs on, for "
"ipython parallel"))
parser.add_argument("-r", "--resources",
help=("Cluster specific resources specifications. "
"Can be specified multiple times.\n"
"Supports SGE, Torque, LSF and SLURM "
"parameters."), default=[], action="append")
parser.add_argument("--timeout", default=15, type=int,
help=("Number of minutes before cluster startup "
"times out. Defaults to 15"))
parser.add_argument("--retries", default=0, type=int,
help=("Number of retries of failed tasks during "
"distributed processing. Default 0 "
"(no retries)"))
parser.add_argument("-p", "--tag",
help="Tag name to label jobs on the cluster",
default="")
parser.add_argument("-w", "--workflow",
help=("Run a workflow with the given commandline "
"arguments"))
parser.add_argument("--workdir", default=os.getcwd(),
help=("Directory to process in. Defaults to "
"current working directory"))
parser.add_argument("-v", "--version", help="Print current version",
action="store_true")
# Hidden arguments passed downstream
parser.add_argument("--only-metadata", help=argparse.SUPPRESS, action="store_true", default=False)
parser.add_argument("--force-single", help="Treat all files as single reads",
action="store_true", default=False)
parser.add_argument("--separators", help="semicolon separated list of separators that indicates paired files.",
default="R,_,-,.")
args = parser.parse_args(in_args)
if hasattr(args, "workdir") and args.workdir:
args.workdir = utils.safe_makedir(os.path.abspath(args.workdir))
if hasattr(args, "global_config"):
error_msg = _sanity_check_args(args)
if error_msg:
parser.error(error_msg)
kwargs = {"parallel": clargs.to_parallel(args),
"workflow": args.workflow,
"workdir": args.workdir}
kwargs = _add_inputs_to_kwargs(args, kwargs, parser)
error_msg = _sanity_check_kwargs(kwargs)
if error_msg:
parser.error(error_msg)
else:
assert sub_cmd is not None
kwargs = {"args": args,
"config_file": None,
sub_cmd: True}
return kwargs | [
"def",
"parse_cl_args",
"(",
"in_args",
")",
":",
"sub_cmds",
"=",
"{",
"\"upgrade\"",
":",
"install",
".",
"add_subparser",
",",
"\"runfn\"",
":",
"runfn",
".",
"add_subparser",
",",
"\"graph\"",
":",
"graph",
".",
"add_subparser",
",",
"\"version\"",
":",
... | 54.244681 | 19.659574 |
def _compute_ratio(top, bot):
""" Make a map that is the ratio of two maps
"""
data = np.where(bot.data > 0, top.data / bot.data, 0.)
return HpxMap(data, top.hpx) | [
"def",
"_compute_ratio",
"(",
"top",
",",
"bot",
")",
":",
"data",
"=",
"np",
".",
"where",
"(",
"bot",
".",
"data",
">",
"0",
",",
"top",
".",
"data",
"/",
"bot",
".",
"data",
",",
"0.",
")",
"return",
"HpxMap",
"(",
"data",
",",
"top",
".",
... | 38 | 7.4 |
def _rgb_to_hsv(rgbs):
"""Convert Nx3 or Nx4 rgb to hsv"""
rgbs, n_dim = _check_color_dim(rgbs)
hsvs = list()
for rgb in rgbs:
rgb = rgb[:3] # don't use alpha here
idx = np.argmax(rgb)
val = rgb[idx]
c = val - np.min(rgb)
if c == 0:
hue = 0
sat = 0
else:
if idx == 0: # R == max
hue = ((rgb[1] - rgb[2]) / c) % 6
elif idx == 1: # G == max
hue = (rgb[2] - rgb[0]) / c + 2
else: # B == max
hue = (rgb[0] - rgb[1]) / c + 4
hue *= 60
sat = c / val
hsv = [hue, sat, val]
hsvs.append(hsv)
hsvs = np.array(hsvs, dtype=np.float32)
if n_dim == 4:
hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1)
return hsvs | [
"def",
"_rgb_to_hsv",
"(",
"rgbs",
")",
":",
"rgbs",
",",
"n_dim",
"=",
"_check_color_dim",
"(",
"rgbs",
")",
"hsvs",
"=",
"list",
"(",
")",
"for",
"rgb",
"in",
"rgbs",
":",
"rgb",
"=",
"rgb",
"[",
":",
"3",
"]",
"# don't use alpha here",
"idx",
"=",... | 29.962963 | 13.555556 |
def gen_keys(keysize=2048):
'''
Generate Salt minion keys and return them as PEM file strings
'''
# Mandate that keys are at least 2048 in size
if keysize < 2048:
keysize = 2048
tdir = tempfile.mkdtemp()
salt.crypt.gen_keys(tdir, 'minion', keysize)
priv_path = os.path.join(tdir, 'minion.pem')
pub_path = os.path.join(tdir, 'minion.pub')
with salt.utils.files.fopen(priv_path) as fp_:
priv = salt.utils.stringutils.to_unicode(fp_.read())
with salt.utils.files.fopen(pub_path) as fp_:
pub = salt.utils.stringutils.to_unicode(fp_.read())
shutil.rmtree(tdir)
return priv, pub | [
"def",
"gen_keys",
"(",
"keysize",
"=",
"2048",
")",
":",
"# Mandate that keys are at least 2048 in size",
"if",
"keysize",
"<",
"2048",
":",
"keysize",
"=",
"2048",
"tdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"salt",
".",
"crypt",
".",
"gen_keys",
"(... | 35.111111 | 17.666667 |
def add_cats(self, axis, cat_data):
'''
Add categories to rows or columns using cat_data array of objects. Each object in cat_data is a dictionary with one key (category title) and value (rows/column names) that have this category. Categories will be added onto the existing categories and will be added in the order of the objects in the array.
Example ``cat_data``::
[
{
"title": "First Category",
"cats": {
"true": [
"ROS1",
"AAK1"
]
}
},
{
"title": "Second Category",
"cats": {
"something": [
"PDK4"
]
}
}
]
'''
for inst_data in cat_data:
categories.add_cats(self, axis, inst_data) | [
"def",
"add_cats",
"(",
"self",
",",
"axis",
",",
"cat_data",
")",
":",
"for",
"inst_data",
"in",
"cat_data",
":",
"categories",
".",
"add_cats",
"(",
"self",
",",
"axis",
",",
"inst_data",
")"
] | 26.387097 | 31.483871 |
def get_weights(self):
"""
Get weights for this layer
:return: list of numpy arrays which represent weight and bias
"""
tensorWeights = callBigDlFunc(self.bigdl_type,
"getWeights", self.value)
if tensorWeights is not None:
return [tensor.to_ndarray() for tensor in tensorWeights]
else:
print("The layer does not have weight/bias")
return None | [
"def",
"get_weights",
"(",
"self",
")",
":",
"tensorWeights",
"=",
"callBigDlFunc",
"(",
"self",
".",
"bigdl_type",
",",
"\"getWeights\"",
",",
"self",
".",
"value",
")",
"if",
"tensorWeights",
"is",
"not",
"None",
":",
"return",
"[",
"tensor",
".",
"to_nd... | 34.846154 | 16.384615 |
def loadfile(path, mode=None, filetype=None, **kwargs):
"""Loads the given file using the appropriate InferenceFile class.
If ``filetype`` is not provided, this will try to retreive the ``filetype``
from the file's ``attrs``. If the file does not exist yet, an IOError will
be raised if ``filetype`` is not provided.
Parameters
----------
path : str
The filename to load.
mode : str, optional
What mode to load the file with, e.g., 'w' for write, 'r' for read,
'a' for append. Default will default to h5py.File's mode, which is 'a'.
filetype : str, optional
Force the file to be loaded with the given class name. This must be
provided if creating a new file.
Returns
-------
filetype instance
An open file handler to the file. The class used for IO with the file
is determined by the ``filetype`` keyword (if provided) or the
``filetype`` stored in the file (if not provided).
"""
if filetype is None:
# try to read the file to get its filetype
try:
fileclass = get_file_type(path)
except IOError:
# file doesn't exist, filetype must be provided
raise IOError("The file appears not to exist. In this case, "
"filetype must be provided.")
else:
fileclass = filetypes[filetype]
return fileclass(path, mode=mode, **kwargs) | [
"def",
"loadfile",
"(",
"path",
",",
"mode",
"=",
"None",
",",
"filetype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"filetype",
"is",
"None",
":",
"# try to read the file to get its filetype",
"try",
":",
"fileclass",
"=",
"get_file_type",
"(",
... | 39.138889 | 21.833333 |
def _yield_leaves(self, url, tree):
'''
Yields a URL corresponding to a leaf dataset for each dataset described by the catalog
:param str url: URL for the current catalog
:param lxml.etree.Eleemnt tree: Current XML Tree
'''
for leaf in tree.findall('.//{%s}dataset[@urlPath]' % INV_NS):
# Subset by the skips
name = leaf.get("name")
if any([x.match(name) for x in self.skip]):
logger.info("Skipping dataset based on 'skips'. Name: %s" % name)
continue
# Subset by before and after
date_tag = leaf.find('.//{%s}date[@type="modified"]' % INV_NS)
if date_tag is not None:
try:
dt = parse(date_tag.text)
except ValueError:
logger.error("Skipping dataset.Wrong date string %s " % date_tag.text)
continue
else:
dt = dt.replace(tzinfo=pytz.utc)
if self.after and dt < self.after:
continue
if self.before and dt > self.before:
continue
# Subset by the Selects defined
gid = leaf.get('ID')
if self.select is not None:
if gid is not None and any([x.match(gid) for x in self.select]):
logger.debug("Processing %s" % gid)
yield "%s?dataset=%s" % (url, gid)
else:
logger.info("Ignoring dataset based on 'selects'. ID: %s" % gid)
continue
else:
logger.debug("Processing %s" % gid)
yield "%s?dataset=%s" % (url, gid) | [
"def",
"_yield_leaves",
"(",
"self",
",",
"url",
",",
"tree",
")",
":",
"for",
"leaf",
"in",
"tree",
".",
"findall",
"(",
"'.//{%s}dataset[@urlPath]'",
"%",
"INV_NS",
")",
":",
"# Subset by the skips",
"name",
"=",
"leaf",
".",
"get",
"(",
"\"name\"",
")",... | 42.75 | 18.7 |
def siblings_before(self):
"""
:return: a list of this node's siblings that occur *before* this
node in the DOM.
"""
impl_nodelist = self.adapter.get_node_children(self.parent.impl_node)
before_nodelist = []
for n in impl_nodelist:
if n == self.impl_node:
break
before_nodelist.append(n)
return self._convert_nodelist(before_nodelist) | [
"def",
"siblings_before",
"(",
"self",
")",
":",
"impl_nodelist",
"=",
"self",
".",
"adapter",
".",
"get_node_children",
"(",
"self",
".",
"parent",
".",
"impl_node",
")",
"before_nodelist",
"=",
"[",
"]",
"for",
"n",
"in",
"impl_nodelist",
":",
"if",
"n",... | 35.916667 | 13.083333 |
def GetPropertyValueEx(self, propertyId: int, ignoreDefaultValue: int) -> Any:
"""
Call IUIAutomationElement::GetCurrentPropertyValueEx.
propertyId: int, a value in class `PropertyId`.
ignoreDefaultValue: int, 0 or 1.
Return Any, corresponding type according to propertyId.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpropertyvalueex
"""
return self.Element.GetCurrentPropertyValueEx(propertyId, ignoreDefaultValue) | [
"def",
"GetPropertyValueEx",
"(",
"self",
",",
"propertyId",
":",
"int",
",",
"ignoreDefaultValue",
":",
"int",
")",
"->",
"Any",
":",
"return",
"self",
".",
"Element",
".",
"GetCurrentPropertyValueEx",
"(",
"propertyId",
",",
"ignoreDefaultValue",
")"
] | 62 | 28.444444 |
def most_recent_common_ancestor(self, *ts):
"""Find the MRCA of some tax_ids.
Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if
no ancestor of the specified tax_ids could be found.
"""
if len(ts) > 200:
res = self._large_mrca(ts)
else:
res = self._small_mrca(ts)
if res:
(res,), = res
else:
raise NoAncestor()
return res | [
"def",
"most_recent_common_ancestor",
"(",
"self",
",",
"*",
"ts",
")",
":",
"if",
"len",
"(",
"ts",
")",
">",
"200",
":",
"res",
"=",
"self",
".",
"_large_mrca",
"(",
"ts",
")",
"else",
":",
"res",
"=",
"self",
".",
"_small_mrca",
"(",
"ts",
")",
... | 28 | 17.875 |
def is_readable(path=None):
"""
Test if the supplied filesystem path can be read
:param path: A filesystem path
:return: True if the path is a file that can be read. Otherwise, False
"""
if os.path.isfile(path) and os.access(path, os.R_OK):
return True
return False | [
"def",
"is_readable",
"(",
"path",
"=",
"None",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
"and",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"R_OK",
")",
":",
"return",
"True",
"return",
"False"
] | 32.555556 | 14.111111 |
def compute_availabilities(hdf5_file, N_columns, damping, N_processes, rows_sum):
"""Coordinates the computation and update of the availability matrix
for Affinity Propagation clustering.
Parameters
----------
hdf5_file : string or file handle
Specify access to the hierarchical data format used throughout all the iterations
of message-passing between data-points involved in Affinity Propagation clustering.
N_columns : int
The number of samples in the data-set subjected to Affinity Propagation clustering.
damping : float
The damping parameter of Affinity Propagation clustering, typically set to 0.5.
N_processes : int
The number of subprocesses involved in the parallel computation and update of the
matrix of availabitilies.
rows_sum : array of shape (N_columns,)
A vector containing, for each column entry of the similarities matrix, the sum
of its rows entries.
"""
slice_queue = multiprocessing.JoinableQueue()
pid_list = []
for i in range(N_processes):
worker = Availabilities_worker(hdf5_file, '/aff_prop_group',
N_columns, damping, slice_queue, rows_sum)
worker.daemon = True
worker.start()
pid_list.append(worker.pid)
for rows_slice in chunk_generator(N_columns, 8 * N_processes):
slice_queue.put(rows_slice)
slice_queue.join()
slice_queue.close()
terminate_processes(pid_list)
gc.collect() | [
"def",
"compute_availabilities",
"(",
"hdf5_file",
",",
"N_columns",
",",
"damping",
",",
"N_processes",
",",
"rows_sum",
")",
":",
"slice_queue",
"=",
"multiprocessing",
".",
"JoinableQueue",
"(",
")",
"pid_list",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"... | 35.372093 | 24.372093 |
def itemgetters(*args):
"""
Get a handful of items from an iterable.
This is just map(itemgetter(...), iterable) with a list comprehension.
"""
f = itemgetter(*args)
def inner(l):
return [f(x) for x in l]
return inner | [
"def",
"itemgetters",
"(",
"*",
"args",
")",
":",
"f",
"=",
"itemgetter",
"(",
"*",
"args",
")",
"def",
"inner",
"(",
"l",
")",
":",
"return",
"[",
"f",
"(",
"x",
")",
"for",
"x",
"in",
"l",
"]",
"return",
"inner"
] | 18.846154 | 21.923077 |
def load_user_from_request(req):
"""
Just like the Flask.login load_user_from_request
If you need to customize the user loading from your database,
the FlaskBitjws.get_user_by_key method is the one to modify.
:param req: The flask request to load a user based on.
"""
load_jws_from_request(req)
if not hasattr(req, 'jws_header') or req.jws_header is None or not \
'iat' in req.jws_payload:
current_app.logger.info("invalid jws request.")
return None
ln = current_app.bitjws.get_last_nonce(current_app,
req.jws_header['kid'],
req.jws_payload['iat'])
if (ln is None or 'iat' not in req.jws_payload or
req.jws_payload['iat'] * 1000 <= ln):
current_app.logger.info("invalid nonce. lastnonce: %s" % ln)
return None
rawu = current_app.bitjws.get_user_by_key(current_app,
req.jws_header['kid'])
if rawu is None:
return None
current_app.logger.info("logging in user: %s" % rawu)
return FlaskUser(rawu) | [
"def",
"load_user_from_request",
"(",
"req",
")",
":",
"load_jws_from_request",
"(",
"req",
")",
"if",
"not",
"hasattr",
"(",
"req",
",",
"'jws_header'",
")",
"or",
"req",
".",
"jws_header",
"is",
"None",
"or",
"not",
"'iat'",
"in",
"req",
".",
"jws_payloa... | 37.366667 | 20.766667 |
def get_context_data(self, **kwargs):
"""Includes the Gauge slugs and data in the context."""
data = super(GaugesView, self).get_context_data(**kwargs)
data.update({'gauges': get_r().gauge_slugs()})
return data | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"super",
"(",
"GaugesView",
",",
"self",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"data",
".",
"update",
"(",
"{",
"'gauges'",
":",
"get_r",
"(... | 47.6 | 12.6 |
def visit_unaryop(self, node):
"""return an astroid.UnaryOp node as string"""
if node.op == "not":
operator = "not "
else:
operator = node.op
return "%s%s" % (operator, self._precedence_parens(node, node.operand)) | [
"def",
"visit_unaryop",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
".",
"op",
"==",
"\"not\"",
":",
"operator",
"=",
"\"not \"",
"else",
":",
"operator",
"=",
"node",
".",
"op",
"return",
"\"%s%s\"",
"%",
"(",
"operator",
",",
"self",
".",
"_p... | 37.571429 | 15.571429 |
def handle_pagination(self, page_num=None, page_size=None):
""" Handle retrieving and processing the next page of results. """
self._response_json = self.get_next_page(page_num=page_num, page_size=page_size)
self.update_attrs()
self.position = 0
self.values = self.process_page() | [
"def",
"handle_pagination",
"(",
"self",
",",
"page_num",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
":",
"self",
".",
"_response_json",
"=",
"self",
".",
"get_next_page",
"(",
"page_num",
"=",
"page_num",
",",
"page_size",
"=",
"page_size",
")",
"se... | 52.333333 | 16 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.