text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def get(vm, key='uuid'):
'''
Output the JSON object describing a VM
vm : string
vm to be targeted
key : string [uuid|alias|hostname]
value type of 'vm' parameter
CLI Example:
.. code-block:: bash
salt '*' vmadm.get 186da9ab-7392-4f55-91a5-b8f1fe770543
salt '*' vmadm.get nacl key=alias
'''
ret = {}
if key not in ['uuid', 'alias', 'hostname']:
ret['Error'] = 'Key must be either uuid, alias or hostname'
return ret
vm = lookup('{0}={1}'.format(key, vm), one=True)
if 'Error' in vm:
return vm
# vmadm get <uuid>
cmd = 'vmadm get {0}'.format(vm)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode)
return ret
return salt.utils.json.loads(res['stdout']) | [
"def",
"get",
"(",
"vm",
",",
"key",
"=",
"'uuid'",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"key",
"not",
"in",
"[",
"'uuid'",
",",
"'alias'",
",",
"'hostname'",
"]",
":",
"ret",
"[",
"'Error'",
"]",
"=",
"'Key must be either uuid, alias or hostname'",
"... | 27.870968 | 20 |
def parse_label(module = None, function = None, offset = None):
"""
Creates a label from a module and a function name, plus an offset.
@warning: This method only creates the label, it doesn't make sure the
label actually points to a valid memory location.
@type module: None or str
@param module: (Optional) Module name.
@type function: None, str or int
@param function: (Optional) Function name or ordinal.
@type offset: None or int
@param offset: (Optional) Offset value.
If C{function} is specified, offset from the function.
If C{function} is C{None}, offset from the module.
@rtype: str
@return:
Label representing the given function in the given module.
@raise ValueError:
The module or function name contain invalid characters.
"""
# TODO
# Invalid characters should be escaped or filtered.
# Convert ordinals to strings.
try:
function = "#0x%x" % function
except TypeError:
pass
# Validate the parameters.
if module is not None and ('!' in module or '+' in module):
raise ValueError("Invalid module name: %s" % module)
if function is not None and ('!' in function or '+' in function):
raise ValueError("Invalid function name: %s" % function)
# Parse the label.
if module:
if function:
if offset:
label = "%s!%s+0x%x" % (module, function, offset)
else:
label = "%s!%s" % (module, function)
else:
if offset:
## label = "%s+0x%x!" % (module, offset)
label = "%s!0x%x" % (module, offset)
else:
label = "%s!" % module
else:
if function:
if offset:
label = "!%s+0x%x" % (function, offset)
else:
label = "!%s" % function
else:
if offset:
label = "0x%x" % offset
else:
label = "0x0"
return label | [
"def",
"parse_label",
"(",
"module",
"=",
"None",
",",
"function",
"=",
"None",
",",
"offset",
"=",
"None",
")",
":",
"# TODO",
"# Invalid characters should be escaped or filtered.",
"# Convert ordinals to strings.",
"try",
":",
"function",
"=",
"\"#0x%x\"",
"%",
"f... | 31.985507 | 21.550725 |
def create_knowledge_base(self,
parent,
knowledge_base,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a knowledge base.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``knowledge_base``:
>>> knowledge_base = {}
>>>
>>> response = client.create_knowledge_base(parent, knowledge_base)
Args:
parent (str): Required. The agent to create a knowledge base for.
Format: ``projects/<Project ID>/agent``.
knowledge_base (Union[dict, ~google.cloud.dialogflow_v2beta1.types.KnowledgeBase]): Required. The knowledge base to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_knowledge_base' not in self._inner_api_calls:
self._inner_api_calls[
'create_knowledge_base'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_knowledge_base,
default_retry=self._method_configs[
'CreateKnowledgeBase'].retry,
default_timeout=self._method_configs['CreateKnowledgeBase']
.timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.CreateKnowledgeBaseRequest(
parent=parent,
knowledge_base=knowledge_base,
)
return self._inner_api_calls['create_knowledge_base'](
request, retry=retry, timeout=timeout, metadata=metadata) | [
"def",
"create_knowledge_base",
"(",
"self",
",",
"parent",
",",
"knowledge_base",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
... | 47.609375 | 24.828125 |
def uint(self, length, name, value=None, align=None):
"""Add an unsigned integer to template.
`length` is given in bytes and `value` is optional. `align` can be used
to align the field to longer byte length.
Examples:
| uint | 2 | foo |
| uint | 2 | foo | 42 |
| uint | 2 | fourByteFoo | 42 | align=4 |
"""
self._add_field(UInt(length, name, value, align=align)) | [
"def",
"uint",
"(",
"self",
",",
"length",
",",
"name",
",",
"value",
"=",
"None",
",",
"align",
"=",
"None",
")",
":",
"self",
".",
"_add_field",
"(",
"UInt",
"(",
"length",
",",
"name",
",",
"value",
",",
"align",
"=",
"align",
")",
")"
] | 35.416667 | 18.25 |
def search_with_retry(self, sleeptime=3.0, retrycount=3, **params):
"""
This function performs a search given a dictionary of search(..)
parameters. It accounts for server timeouts as necessary and
will retry some number of times.
@param sleeptime: number of seconds to sleep between retries
@type sleeptime: float
@param retrycount: number of times to retry given search
@type retrycount: int
@param params: search parameters
@type params: **kwds
@rtype: list
@return: returns records in given format
"""
results = []
count = 0
while count < retrycount:
try:
results = self.search(**params)
break
except urllib2.URLError:
sys.stderr.write("Timeout while searching...Retrying\n")
time.sleep(sleeptime)
count += 1
else:
sys.stderr.write("Aborting search after %d attempts.\n" % (retrycount,))
return results | [
"def",
"search_with_retry",
"(",
"self",
",",
"sleeptime",
"=",
"3.0",
",",
"retrycount",
"=",
"3",
",",
"*",
"*",
"params",
")",
":",
"results",
"=",
"[",
"]",
"count",
"=",
"0",
"while",
"count",
"<",
"retrycount",
":",
"try",
":",
"results",
"=",
... | 33.548387 | 19.419355 |
def add_edge(self, u, v):
"""
O(log(n))
"""
# print('add_edge u, v = %r, %r' % (u, v,))
if self.graph.has_edge(u, v):
return
for node in (u, v):
if not self.graph.has_node(node):
self.graph.add_node(node)
for Fi in self.forests:
Fi.add_node(node)
# First set the level of (u, v) to 0
self.level[(u, v)] = 0
# update the adjacency lists of u and v
self.graph.add_edge(u, v)
# If u and v are in separate trees in F_0, add e to F_0
ru = self.forests[0].find_root(u)
rv = self.forests[0].find_root(v)
if ru is not rv:
# If they are in different connected compoments merge compoments
self.forests[0].add_edge(u, v) | [
"def",
"add_edge",
"(",
"self",
",",
"u",
",",
"v",
")",
":",
"# print('add_edge u, v = %r, %r' % (u, v,))",
"if",
"self",
".",
"graph",
".",
"has_edge",
"(",
"u",
",",
"v",
")",
":",
"return",
"for",
"node",
"in",
"(",
"u",
",",
"v",
")",
":",
"if",... | 36 | 9.636364 |
def get_ext(self, obj=None):
"""Return the file extension
:param obj: the fileinfo with information. If None, this will use the stored object of JB_File
:type obj: :class:`FileInfo`
:returns: the file extension
:rtype: str
:raises: None
"""
if obj is None:
obj = self._obj
return self._extel.get_ext(obj) | [
"def",
"get_ext",
"(",
"self",
",",
"obj",
"=",
"None",
")",
":",
"if",
"obj",
"is",
"None",
":",
"obj",
"=",
"self",
".",
"_obj",
"return",
"self",
".",
"_extel",
".",
"get_ext",
"(",
"obj",
")"
] | 31.5 | 16.083333 |
def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00",
interp_type="spline"):
"""
Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create
separate directories for each variable if they are not already available.
"""
if interp_type == "spline":
out_data = self.interpolate_grid(in_lon, in_lat)
else:
out_data = self.max_neighbor(in_lon, in_lat)
if not os.access(out_path + self.variable, os.R_OK):
try:
os.mkdir(out_path + self.variable)
except OSError:
print(out_path + self.variable + " already created")
out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable,
self.start_date.strftime("%Y%m%d-%H:%M"),
self.end_date.strftime("%Y%m%d-%H:%M"))
out_obj = Dataset(out_file, "w")
out_obj.createDimension("time", out_data.shape[0])
out_obj.createDimension("y", out_data.shape[1])
out_obj.createDimension("x", out_data.shape[2])
data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True,
fill_value=-9999.0,
least_significant_digit=3)
data_var[:] = out_data
data_var.long_name = self.variable
data_var.coordinates = "latitude longitude"
if "MESH" in self.variable or "QPE" in self.variable:
data_var.units = "mm"
elif "Reflectivity" in self.variable:
data_var.units = "dBZ"
elif "Rotation" in self.variable:
data_var.units = "s-1"
else:
data_var.units = ""
out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True)
out_lon[:] = in_lon
out_lon.units = "degrees_east"
out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True)
out_lat[:] = in_lat
out_lat.units = "degrees_north"
dates = out_obj.createVariable("time", "i8", ("time",), zlib=True)
dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64)
dates.long_name = "Valid date"
dates.units = date_unit
out_obj.Conventions="CF-1.6"
out_obj.close()
return | [
"def",
"interpolate_to_netcdf",
"(",
"self",
",",
"in_lon",
",",
"in_lat",
",",
"out_path",
",",
"date_unit",
"=",
"\"seconds since 1970-01-01T00:00\"",
",",
"interp_type",
"=",
"\"spline\"",
")",
":",
"if",
"interp_type",
"==",
"\"spline\"",
":",
"out_data",
"=",... | 51.510204 | 21.714286 |
def get_port_profile_status_input_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
input = ET.SubElement(get_port_profile_status, "input")
port_profile_name = ET.SubElement(input, "port-profile-name")
port_profile_name.text = kwargs.pop('port_profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"get_port_profile_status_input_port_profile_name",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_port_profile_status",
"=",
"ET",
".",
"Element",
"(",
"\"get_port_profile_status\"",
")",
"c... | 45.083333 | 17 |
def Uninitialize(
self,
Channel):
"""
Uninitializes one or all PCAN Channels initialized by CAN_Initialize
Remarks:
Giving the TPCANHandle value "PCAN_NONEBUS", uninitialize all initialized channels
Parameters:
Channel : A TPCANHandle representing a PCAN Channel
Returns:
A TPCANStatus error code
"""
try:
res = self.__m_dllBasic.CAN_Uninitialize(Channel)
return TPCANStatus(res)
except:
logger.error("Exception on PCANBasic.Uninitialize")
raise | [
"def",
"Uninitialize",
"(",
"self",
",",
"Channel",
")",
":",
"try",
":",
"res",
"=",
"self",
".",
"__m_dllBasic",
".",
"CAN_Uninitialize",
"(",
"Channel",
")",
"return",
"TPCANStatus",
"(",
"res",
")",
"except",
":",
"logger",
".",
"error",
"(",
"\"Exce... | 26.772727 | 24.772727 |
def pmag_angle(D1,D2): # use this
"""
finds the angle between lists of two directions D1,D2
"""
D1 = numpy.array(D1)
if len(D1.shape) > 1:
D1 = D1[:,0:2] # strip off intensity
else: D1 = D1[:2]
D2 = numpy.array(D2)
if len(D2.shape) > 1:
D2 = D2[:,0:2] # strip off intensity
else: D2 = D2[:2]
X1 = dir2cart(D1) # convert to cartesian from polar
X2 = dir2cart(D2)
angles = [] # set up a list for angles
for k in range(X1.shape[0]): # single vector
angle = numpy.arccos(numpy.dot(X1[k],X2[k]))*180./numpy.pi # take the dot product
angle = angle%360.
angles.append(angle)
return numpy.array(angles) | [
"def",
"pmag_angle",
"(",
"D1",
",",
"D2",
")",
":",
"# use this ",
"D1",
"=",
"numpy",
".",
"array",
"(",
"D1",
")",
"if",
"len",
"(",
"D1",
".",
"shape",
")",
">",
"1",
":",
"D1",
"=",
"D1",
"[",
":",
",",
"0",
":",
"2",
"]",
"# strip off i... | 34.1 | 13 |
def filter_genes(
data: AnnData,
min_counts: Optional[int] = None,
min_cells: Optional[int] = None,
max_counts: Optional[int] = None,
max_cells: Optional[int] = None,
inplace: bool = True,
copy: bool = False,
) -> Union[AnnData, None, Tuple[np.ndarray, np.ndarray]]:
"""Filter genes based on number of cells or counts.
Keep genes that have at least ``min_counts`` counts or are expressed in at
least ``min_cells`` cells or have at most ``max_counts`` counts or are expressed
in at most ``max_cells`` cells.
Only provide one of the optional parameters ``min_counts``, ``min_cells``,
``max_counts``, ``max_cells`` per call.
Parameters
----------
data
An annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
min_counts
Minimum number of counts required for a gene to pass filtering.
min_cells
Minimum number of cells expressed required for a gene to pass filtering.
max_counts
Maximum number of counts required for a gene to pass filtering.
max_cells
Maximum number of cells expressed required for a gene to pass filtering.
inplace
Perform computation inplace or return result.
Returns
-------
Depending on `inplace`, returns the following arrays or directly subsets
and annotates the data matrix
gene_subset : numpy.ndarray
Boolean index mask that does filtering. `True` means that the
gene is kept. `False` means the gene is removed.
number_per_gene : numpy.ndarray
Depending on what was tresholded (`counts` or `cells`), the array stores
`n_counts` or `n_cells` per gene.
"""
if copy:
logg.warn('`copy` is deprecated, use `inplace` instead.')
n_given_options = sum(
option is not None for option in
[min_cells, min_counts, max_cells, max_counts])
if n_given_options != 1:
raise ValueError(
'Only provide one of the optional parameters `min_counts`,'
'`min_cells`, `max_counts`, `max_cells` per call.')
if isinstance(data, AnnData):
adata = data.copy() if copy else data
gene_subset, number = materialize_as_ndarray(
filter_genes(adata.X, min_cells=min_cells,
min_counts=min_counts, max_cells=max_cells,
max_counts=max_counts))
if not inplace:
return gene_subset, number
if min_cells is None and max_cells is None:
adata.var['n_counts'] = number
else:
adata.var['n_cells'] = number
adata._inplace_subset_var(gene_subset)
return adata if copy else None
X = data # proceed with processing the data matrix
min_number = min_counts if min_cells is None else min_cells
max_number = max_counts if max_cells is None else max_cells
number_per_gene = np.sum(X if min_cells is None and max_cells is None
else X > 0, axis=0)
if issparse(X):
number_per_gene = number_per_gene.A1
if min_number is not None:
gene_subset = number_per_gene >= min_number
if max_number is not None:
gene_subset = number_per_gene <= max_number
s = np.sum(~gene_subset)
if s > 0:
logg.info('filtered out {} genes that are detected'.format(s), end=' ')
if min_cells is not None or min_counts is not None:
logg.info('in less than',
str(min_cells) + ' cells'
if min_counts is None else str(min_counts) + ' counts', no_indent=True)
if max_cells is not None or max_counts is not None:
logg.info('in more than ',
str(max_cells) + ' cells'
if max_counts is None else str(max_counts) + ' counts', no_indent=True)
return gene_subset, number_per_gene | [
"def",
"filter_genes",
"(",
"data",
":",
"AnnData",
",",
"min_counts",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"min_cells",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"max_counts",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
"... | 40.063158 | 19.705263 |
def _handle_status(self, key, value):
"""Parse a status code from the attached GnuPG process.
:raises ValueError: if the status message is unknown.
"""
informational_keys = ["KEY_CONSIDERED"]
if key in ("EXPORTED"):
self.fingerprints.append(value)
elif key == "EXPORT_RES":
export_res = value.split()
for x in self.counts.keys():
self.counts[x] += int(export_res.pop(0))
elif key not in informational_keys:
raise ValueError("Unknown status message: %r" % key) | [
"def",
"_handle_status",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"informational_keys",
"=",
"[",
"\"KEY_CONSIDERED\"",
"]",
"if",
"key",
"in",
"(",
"\"EXPORTED\"",
")",
":",
"self",
".",
"fingerprints",
".",
"append",
"(",
"value",
")",
"elif",
... | 40.5 | 9.642857 |
def _read_gaf_nts(self, fin_gaf, hdr_only, allow_missing_symbol):
"""Read GAF file. Store annotation data in a list of namedtuples."""
nts = []
ver = None
hdrobj = GafHdr()
datobj = None
# pylint: disable=not-callable
ntobj_make = None
get_gafvals = None
lnum = -1
line = ''
try:
with open(fin_gaf) as ifstrm:
for lnum, line in enumerate(ifstrm, 1):
# Read data
if get_gafvals:
# print(lnum, line)
gafvals = get_gafvals(line)
if gafvals:
nts.append(ntobj_make(gafvals))
else:
datobj.ignored.append((lnum, line))
# Read header
elif datobj is None:
if line[0] == '!':
if ver is None and line[1:13] == 'gaf-version:':
ver = line[13:].strip()
hdrobj.chkaddhdr(line)
else:
self.hdr = hdrobj.get_hdr()
if hdr_only:
return nts
datobj = GafData(ver, allow_missing_symbol)
get_gafvals = datobj.get_gafvals
ntobj_make = datobj.get_ntobj()._make
except Exception as inst:
import traceback
traceback.print_exc()
sys.stderr.write("\n **FATAL-gaf: {MSG}\n\n".format(MSG=str(inst)))
sys.stderr.write("**FATAL-gaf: {FIN}[{LNUM}]:\n{L}".format(FIN=fin_gaf, L=line, LNUM=lnum))
if datobj is not None:
datobj.prt_line_detail(sys.stdout, line)
sys.exit(1)
self.datobj = datobj
return nts | [
"def",
"_read_gaf_nts",
"(",
"self",
",",
"fin_gaf",
",",
"hdr_only",
",",
"allow_missing_symbol",
")",
":",
"nts",
"=",
"[",
"]",
"ver",
"=",
"None",
"hdrobj",
"=",
"GafHdr",
"(",
")",
"datobj",
"=",
"None",
"# pylint: disable=not-callable",
"ntobj_make",
"... | 42.044444 | 15.288889 |
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.rollback()
return
connection = tldap.backend.connections[using]
connection.rollback() | [
"def",
"rollback",
"(",
"using",
"=",
"None",
")",
":",
"if",
"using",
"is",
"None",
":",
"for",
"using",
"in",
"tldap",
".",
"backend",
".",
"connections",
":",
"connection",
"=",
"tldap",
".",
"backend",
".",
"connections",
"[",
"using",
"]",
"connec... | 32.181818 | 13.090909 |
def get_unique_groups(input_list):
"""Function to get a unique list of groups."""
out_list = []
for item in input_list:
if item not in out_list:
out_list.append(item)
return out_list | [
"def",
"get_unique_groups",
"(",
"input_list",
")",
":",
"out_list",
"=",
"[",
"]",
"for",
"item",
"in",
"input_list",
":",
"if",
"item",
"not",
"in",
"out_list",
":",
"out_list",
".",
"append",
"(",
"item",
")",
"return",
"out_list"
] | 30.285714 | 11.142857 |
def rbac_policy_list(request, **kwargs):
"""List of RBAC Policies."""
policies = neutronclient(request).list_rbac_policies(
**kwargs).get('rbac_policies')
return [RBACPolicy(p) for p in policies] | [
"def",
"rbac_policy_list",
"(",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"policies",
"=",
"neutronclient",
"(",
"request",
")",
".",
"list_rbac_policies",
"(",
"*",
"*",
"kwargs",
")",
".",
"get",
"(",
"'rbac_policies'",
")",
"return",
"[",
"RBACPolic... | 42.2 | 4.6 |
def _queue_models(self, models, context):
"""
Work an an appropriate ordering for the models.
This isn't essential, but makes the script look nicer because
more instances can be defined on their first try.
"""
model_queue = []
number_remaining_models = len(models)
# Max number of cycles allowed before we call it an infinite loop.
MAX_CYCLES = number_remaining_models
allowed_cycles = MAX_CYCLES
while number_remaining_models > 0:
previous_number_remaining_models = number_remaining_models
model = models.pop(0)
# If the model is ready to be processed, add it to the list
if check_dependencies(model, model_queue, context["__avaliable_models"]):
model_class = ModelCode(model=model, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options)
model_queue.append(model_class)
# Otherwise put the model back at the end of the list
else:
models.append(model)
# Check for infinite loops.
# This means there is a cyclic foreign key structure
# That cannot be resolved by re-ordering
number_remaining_models = len(models)
if number_remaining_models == previous_number_remaining_models:
allowed_cycles -= 1
if allowed_cycles <= 0:
# Add the remaining models, but do not remove them from the model list
missing_models = [ModelCode(model=m, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options) for m in models]
model_queue += missing_models
# Replace the models with the model class objects
# (sure, this is a little bit of hackery)
models[:] = missing_models
break
else:
allowed_cycles = MAX_CYCLES
return model_queue | [
"def",
"_queue_models",
"(",
"self",
",",
"models",
",",
"context",
")",
":",
"model_queue",
"=",
"[",
"]",
"number_remaining_models",
"=",
"len",
"(",
"models",
")",
"# Max number of cycles allowed before we call it an infinite loop.",
"MAX_CYCLES",
"=",
"number_remain... | 45.318182 | 22.727273 |
def aStockQoutation(self,code):
'''
订阅一只股票的实时行情数据,接收推送
:param code: 股票代码
:return:
'''
#设置监听-->订阅-->调用接口
# 分时
self.quote_ctx.set_handler(RTDataTest())
self.quote_ctx.subscribe(code, SubType.RT_DATA)
ret_code_rt_data, ret_data_rt_data = self.quote_ctx.get_rt_data(code)
# 逐笔
self.quote_ctx.set_handler(TickerTest())
self.quote_ctx.subscribe(code, SubType.TICKER)
ret_code_rt_ticker, ret_data_rt_ticker = self.quote_ctx.get_rt_ticker(code)
# 报价
self.quote_ctx.set_handler(StockQuoteTest())
self.quote_ctx.subscribe(code, SubType.QUOTE)
ret_code_stock_quote, ret_data_stock_quote = self.quote_ctx.get_stock_quote([code])
# 实时K线
self.quote_ctx.set_handler(CurKlineTest())
kTypes = [SubType.K_1M, SubType.K_5M, SubType.K_15M, SubType.K_30M, SubType.K_60M, SubType.K_DAY,
SubType.K_WEEK, SubType.K_MON]
auTypes = [AuType.NONE, AuType.QFQ, AuType.HFQ]
num = 10
ret_code_cur_kline = RET_OK
for kType in kTypes:
self.quote_ctx.subscribe(code, kType)
for auType in auTypes:
ret_code_cur_kline_temp, ret_data_cur_kline = self.quote_ctx.get_cur_kline(code, num, kType, auType)
if ret_code_cur_kline_temp is RET_ERROR:
ret_code_cur_kline = RET_ERROR
# 摆盘
self.quote_ctx.set_handler(OrderBookTest())
self.quote_ctx.subscribe(code, SubType.ORDER_BOOK)
ret_code_order_book, ret_data_order_book = self.quote_ctx.get_order_book(code)
# 经纪队列
self.quote_ctx.set_handler(BrokerTest())
self.quote_ctx.subscribe(code, SubType.BROKER)
ret_code_broker_queue, bid_frame_table, ask_frame_table = self.quote_ctx.get_broker_queue(code)
return ret_code_rt_data+ret_code_rt_ticker+ret_code_stock_quote+ret_code_cur_kline+ret_code_order_book+ret_code_broker_queue | [
"def",
"aStockQoutation",
"(",
"self",
",",
"code",
")",
":",
"#设置监听-->订阅-->调用接口",
"# 分时",
"self",
".",
"quote_ctx",
".",
"set_handler",
"(",
"RTDataTest",
"(",
")",
")",
"self",
".",
"quote_ctx",
".",
"subscribe",
"(",
"code",
",",
"SubType",
".",
"RT_DAT... | 44.454545 | 25.636364 |
def InitFromGrrMessage(self, message):
"""Init from GrrMessage rdfvalue."""
if message.source:
self.client_id = message.source.Basename()
self.payload_type = compatibility.GetName(message.payload.__class__)
self.payload = message.payload
self.timestamp = message.age
return self | [
"def",
"InitFromGrrMessage",
"(",
"self",
",",
"message",
")",
":",
"if",
"message",
".",
"source",
":",
"self",
".",
"client_id",
"=",
"message",
".",
"source",
".",
"Basename",
"(",
")",
"self",
".",
"payload_type",
"=",
"compatibility",
".",
"GetName",
... | 30.1 | 17.9 |
def time_to_jump( self ):
"""
The timestep until the next jump.
Args:
None
Returns:
(Float): The timestep until the next jump.
"""
k_tot = rate_prefactor * np.sum( self.p )
return -( 1.0 / k_tot ) * math.log( random.random() ) | [
"def",
"time_to_jump",
"(",
"self",
")",
":",
"k_tot",
"=",
"rate_prefactor",
"*",
"np",
".",
"sum",
"(",
"self",
".",
"p",
")",
"return",
"-",
"(",
"1.0",
"/",
"k_tot",
")",
"*",
"math",
".",
"log",
"(",
"random",
".",
"random",
"(",
")",
")"
] | 24.833333 | 17.916667 |
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params={}):
"""Returns a tree from Alignment object aln.
aln: an xxx.Alignment object, or data that can be used to build one.
moltype: cogent.core.moltype.MolType object
best_tree: best_tree suppport is currently not implemented
params: dict of parameters to pass in to the RAxML app controller.
The result will be an xxx.Alignment object, or None if tree fails.
"""
if best_tree:
raise NotImplementedError
if '-m' not in params:
if moltype == DNA or moltype == RNA:
#params["-m"] = 'GTRMIX'
# in version 7.2.3, GTRMIX is no longer supported but says GTRCAT
# behaves like GTRMIX (http://www.phylo.org/tools/raxmlhpc2.html)
params["-m"] = 'GTRGAMMA'
elif moltype == PROTEIN:
params["-m"] = 'PROTGAMMAmatrixName'
else:
raise ValueError, "Moltype must be either DNA, RNA, or PROTEIN"
if not hasattr(aln, 'toPhylip'):
aln = Alignment(aln)
seqs, align_map = aln.toPhylip()
# generate temp filename for output
params["-w"] = "/tmp/"
params["-n"] = get_tmp_filename().split("/")[-1]
params["-k"] = True
params["-p"] = randint(1,100000)
params["-x"] = randint(1,100000)
ih = '_input_as_multiline_string'
raxml_app = Raxml(params=params,
InputHandler=ih,
WorkingDir=None,
SuppressStderr=True,
SuppressStdout=True)
raxml_result = raxml_app(seqs)
tree = DndParser(raxml_result['Bootstrap'], constructor=PhyloNode)
for node in tree.tips():
node.Name = align_map[node.Name]
raxml_result.cleanUp()
return tree | [
"def",
"build_tree_from_alignment",
"(",
"aln",
",",
"moltype",
"=",
"DNA",
",",
"best_tree",
"=",
"False",
",",
"params",
"=",
"{",
"}",
")",
":",
"if",
"best_tree",
":",
"raise",
"NotImplementedError",
"if",
"'-m'",
"not",
"in",
"params",
":",
"if",
"m... | 30.803571 | 20.321429 |
def _attempt_set_timeout(self, timeout):
"""Sets a timeout on the inner watchman client's socket."""
try:
self.client.setTimeout(timeout)
except Exception:
self._logger.debug('failed to set post-startup watchman timeout to %s', self._timeout)
else:
self._logger.debug('set post-startup watchman timeout to %s', self._timeout) | [
"def",
"_attempt_set_timeout",
"(",
"self",
",",
"timeout",
")",
":",
"try",
":",
"self",
".",
"client",
".",
"setTimeout",
"(",
"timeout",
")",
"except",
"Exception",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"'failed to set post-startup watchman timeout ... | 44 | 22.375 |
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df | [
"def",
"GetHist",
"(",
"tag_name",
",",
"start_time",
",",
"end_time",
",",
"period",
"=",
"5",
",",
"mode",
"=",
"\"raw\"",
",",
"desc_as_label",
"=",
"False",
",",
"label",
"=",
"None",
",",
"high_speed",
"=",
"False",
",",
"utc",
"=",
"False",
")",
... | 47.285714 | 23.214286 |
def get_plaintext_to_sign(self):
"""
Get back the plaintext that will be signed.
It is derived from the serialized zone file strings,
but encoded as a single string (omitting the signature field,
if already given)
"""
as_strings = self.pack_subdomain()
if self.sig is not None:
# don't sign the signature
as_strings = as_strings[:-1]
return ",".join(as_strings) | [
"def",
"get_plaintext_to_sign",
"(",
"self",
")",
":",
"as_strings",
"=",
"self",
".",
"pack_subdomain",
"(",
")",
"if",
"self",
".",
"sig",
"is",
"not",
"None",
":",
"# don't sign the signature",
"as_strings",
"=",
"as_strings",
"[",
":",
"-",
"1",
"]",
"... | 34.307692 | 10.769231 |
def _parse_command_line():
"""Configure and parse our command line flags."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--portserver_static_pool',
type=str,
default='15000-24999',
help='Comma separated N-P Range(s) of ports to manage (inclusive).')
parser.add_argument(
'--portserver_unix_socket_address',
type=str,
default='@unittest-portserver',
help='Address of AF_UNIX socket on which to listen (first @ is a NUL).')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Enable verbose messages.')
parser.add_argument('--debug',
action='store_true',
default=False,
help='Enable full debug messages.')
return parser.parse_args(sys.argv[1:]) | [
"def",
"_parse_command_line",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--portserver_static_pool'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'15000-24999'",
",",
"help",
"=",
"'Comma ... | 40.590909 | 11.272727 |
async def send(self, request):
"""
Sends and receives the result for the given request.
"""
body = bytes(request)
msg_id = self._state._get_new_msg_id()
await self._connection.send(
struct.pack('<qqi', 0, msg_id, len(body)) + body
)
body = await self._connection.recv()
if len(body) < 8:
raise InvalidBufferError(body)
with BinaryReader(body) as reader:
auth_key_id = reader.read_long()
assert auth_key_id == 0, 'Bad auth_key_id'
msg_id = reader.read_long()
assert msg_id != 0, 'Bad msg_id'
# ^ We should make sure that the read ``msg_id`` is greater
# than our own ``msg_id``. However, under some circumstances
# (bad system clock/working behind proxies) this seems to not
# be the case, which would cause endless assertion errors.
length = reader.read_int()
assert length > 0, 'Bad length'
# We could read length bytes and use those in a new reader to read
# the next TLObject without including the padding, but since the
# reader isn't used for anything else after this, it's unnecessary.
return reader.tgread_object() | [
"async",
"def",
"send",
"(",
"self",
",",
"request",
")",
":",
"body",
"=",
"bytes",
"(",
"request",
")",
"msg_id",
"=",
"self",
".",
"_state",
".",
"_get_new_msg_id",
"(",
")",
"await",
"self",
".",
"_connection",
".",
"send",
"(",
"struct",
".",
"p... | 40.935484 | 17.903226 |
def fromMarkdown(md, *args, **kwargs):
"""
Creates abstraction using path to file
:param str path: path to markdown file
:return: TreeOfContents object
"""
return TOC.fromHTML(markdown(md, *args, **kwargs)) | [
"def",
"fromMarkdown",
"(",
"md",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"TOC",
".",
"fromHTML",
"(",
"markdown",
"(",
"md",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | 31 | 9.25 |
def dfa_to_json(dfa: dict, name: str, path: str = './'):
""" Exports a DFA to a JSON file.
If *path* do not exists, it will be created.
:param dict dfa: DFA to export;
:param str name: name of the output file;
:param str path: path where to save the JSON file (default:
working directory)
"""
out = {
'alphabet': list(dfa['alphabet']),
'states': list(dfa['states']),
'initial_state': dfa['initial_state'],
'accepting_states': list(dfa['accepting_states']),
'transitions': list()
}
for t in dfa['transitions']:
out['transitions'].append(
[t[0], t[1], dfa['transitions'][t]])
if not os.path.exists(path):
os.makedirs(path)
file = open(os.path.join(path, name + '.json'), 'w')
json.dump(out, file, sort_keys=True, indent=4)
file.close() | [
"def",
"dfa_to_json",
"(",
"dfa",
":",
"dict",
",",
"name",
":",
"str",
",",
"path",
":",
"str",
"=",
"'./'",
")",
":",
"out",
"=",
"{",
"'alphabet'",
":",
"list",
"(",
"dfa",
"[",
"'alphabet'",
"]",
")",
",",
"'states'",
":",
"list",
"(",
"dfa",... | 31.555556 | 15.407407 |
def _router_numbers(self):
"""A tuple of the numbers of all "routing" basins."""
return tuple(up for up in self._up2down.keys()
if up in self._up2down.values()) | [
"def",
"_router_numbers",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"up",
"for",
"up",
"in",
"self",
".",
"_up2down",
".",
"keys",
"(",
")",
"if",
"up",
"in",
"self",
".",
"_up2down",
".",
"values",
"(",
")",
")"
] | 48.5 | 10.25 |
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
This method contains a workaround for `Bugzilla #1202917`_.
Most entities are uniquely identified by an ID. ``System`` is a bit
different: it has both an ID and a UUID, and the UUID is used to
uniquely identify a ``System``.
Return a path in the format ``katello/api/v2/systems/<uuid>`` if a UUID
is available and:
* ``which is None``, or
* ``which == 'this'``.
.. _Bugzilla #1202917:
https://bugzilla.redhat.com/show_bug.cgi?id=1202917
Finally, return a path in the form
``katello/api/v2/systems/<uuid>/subscriptions`` if ``'subscriptions'``
is passed in.
"""
if which == 'subscriptions':
return '{0}/{1}/{2}'.format(
super(System, self).path('base'),
self.uuid, # pylint:disable=no-member
which,
)
if hasattr(self, 'uuid') and (which is None or which == 'self'):
return '{0}/{1}'.format(
super(System, self).path('base'),
self.uuid # pylint:disable=no-member
)
return super(System, self).path(which) | [
"def",
"path",
"(",
"self",
",",
"which",
"=",
"None",
")",
":",
"if",
"which",
"==",
"'subscriptions'",
":",
"return",
"'{0}/{1}/{2}'",
".",
"format",
"(",
"super",
"(",
"System",
",",
"self",
")",
".",
"path",
"(",
"'base'",
")",
",",
"self",
".",
... | 35.114286 | 20.457143 |
def find_fixtures(fixtures_base_dir: str) -> Iterable[Tuple[str, str]]:
"""
Finds all of the (fixture_path, fixture_key) pairs for a given path under
the JSON test fixtures directory.
"""
all_fixture_paths = find_fixture_files(fixtures_base_dir)
for fixture_path in sorted(all_fixture_paths):
with open(fixture_path) as fixture_file:
fixtures = json.load(fixture_file)
for fixture_key in sorted(fixtures.keys()):
yield (fixture_path, fixture_key) | [
"def",
"find_fixtures",
"(",
"fixtures_base_dir",
":",
"str",
")",
"->",
"Iterable",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
":",
"all_fixture_paths",
"=",
"find_fixture_files",
"(",
"fixtures_base_dir",
")",
"for",
"fixture_path",
"in",
"sorted",
"(",... | 38.461538 | 16.307692 |
def feature_selection(df, labels, n_features, method='chi2'):
"""
Reduces the number of features in the imput dataframe.
Ex: labels = gs.meta['biospecimen_sample__sample_type_id'].apply(int).apply(lambda x: 0 if x < 10 else 1)
chi2_fs(gs.data, labels, 50)
:param df: The input dataframe
:param labels: Labels for each row in the df. Type: Pandas.Series
:param no_features: The desired number of features
:param method: The feature selection method to be employed. It is set to 'chi2' by default
To select the features using mutual information, the method value should be set to 'mi'
To select the features using ANOVA, the method value should be set to 'ANOVA'
:return: Returns the dataframe with the selected features
"""
fs_obj = None
if method == 'chi2':
fs_obj = chi2
elif method == 'ANOVA':
fs_obj = f_classif
elif method == 'mi':
fs_obj = mutual_info_classif
else:
raise ValueError('The method is not recognized')
fs = SelectKBest(fs_obj, k=n_features)
fs.fit_transform(df, labels)
df_reduced = df.loc[:, fs.get_support()]
return df_reduced | [
"def",
"feature_selection",
"(",
"df",
",",
"labels",
",",
"n_features",
",",
"method",
"=",
"'chi2'",
")",
":",
"fs_obj",
"=",
"None",
"if",
"method",
"==",
"'chi2'",
":",
"fs_obj",
"=",
"chi2",
"elif",
"method",
"==",
"'ANOVA'",
":",
"fs_obj",
"=",
"... | 42.931034 | 21.551724 |
def _loadHandlers(self):
"""
creates a dictionary of named handler instances
:return: the dictionary
"""
return {handler.name: handler for handler in map(self.createHandler, self.config['handlers'])} | [
"def",
"_loadHandlers",
"(",
"self",
")",
":",
"return",
"{",
"handler",
".",
"name",
":",
"handler",
"for",
"handler",
"in",
"map",
"(",
"self",
".",
"createHandler",
",",
"self",
".",
"config",
"[",
"'handlers'",
"]",
")",
"}"
] | 39 | 17 |
def _get_file_version(self, infile):
"""Returns infile version string."""
# Determine file version
for line1 in infile:
if line1.strip() != "[Pyspread save file version]":
raise ValueError(_("File format unsupported."))
break
for line2 in infile:
return line2.strip() | [
"def",
"_get_file_version",
"(",
"self",
",",
"infile",
")",
":",
"# Determine file version",
"for",
"line1",
"in",
"infile",
":",
"if",
"line1",
".",
"strip",
"(",
")",
"!=",
"\"[Pyspread save file version]\"",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"\"Fi... | 31.181818 | 17.545455 |
def _node_is_match(qualified_name, package_names, fqn):
"""Determine if a qualfied name matches an fqn, given the set of package
names in the graph.
:param List[str] qualified_name: The components of the selector or node
name, split on '.'.
:param Set[str] package_names: The set of pacakge names in the graph.
:param List[str] fqn: The node's fully qualified name in the graph.
"""
if len(qualified_name) == 1 and fqn[-1] == qualified_name[0]:
return True
if qualified_name[0] in package_names:
if is_selected_node(fqn, qualified_name):
return True
for package_name in package_names:
local_qualified_node_name = [package_name] + qualified_name
if is_selected_node(fqn, local_qualified_node_name):
return True
return False | [
"def",
"_node_is_match",
"(",
"qualified_name",
",",
"package_names",
",",
"fqn",
")",
":",
"if",
"len",
"(",
"qualified_name",
")",
"==",
"1",
"and",
"fqn",
"[",
"-",
"1",
"]",
"==",
"qualified_name",
"[",
"0",
"]",
":",
"return",
"True",
"if",
"quali... | 36.772727 | 21.272727 |
def get_job_statuses(github_token, api_url, build_id,
polling_interval, job_number):
"""Wait for all the travis jobs to complete.
Once the other jobs are complete, return a list of booleans,
indicating whether or not the job was successful. Ignore jobs
marked "allow_failure".
"""
auth = get_json('{api_url}/auth/github'.format(api_url=api_url),
data={'github_token': github_token})['access_token']
while True:
build = get_json('{api_url}/builds/{build_id}'.format(
api_url=api_url, build_id=build_id), auth=auth)
jobs = [job for job in build['jobs']
if job['number'] != job_number and
not job['allow_failure']] # Ignore allowed failures
if all(job['finished_at'] for job in jobs):
break # All the jobs have completed
elif any(job['state'] != 'passed'
for job in jobs if job['finished_at']):
break # Some required job that finished did not pass
print('Waiting for jobs to complete: {job_numbers}'.format(
job_numbers=[job['number'] for job in jobs
if not job['finished_at']]))
time.sleep(polling_interval)
return [job['state'] == 'passed' for job in jobs] | [
"def",
"get_job_statuses",
"(",
"github_token",
",",
"api_url",
",",
"build_id",
",",
"polling_interval",
",",
"job_number",
")",
":",
"auth",
"=",
"get_json",
"(",
"'{api_url}/auth/github'",
".",
"format",
"(",
"api_url",
"=",
"api_url",
")",
",",
"data",
"="... | 44.034483 | 18.827586 |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._notification_delivery_method is not None:
return False
if self._notification_target is not None:
return False
if self._category is not None:
return False
return True | [
"def",
"is_all_field_none",
"(",
"self",
")",
":",
"if",
"self",
".",
"_notification_delivery_method",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_notification_target",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_c... | 20.4 | 19.333333 |
def reset(self, **kwargs):
""" Reset the triplestore with all of the data
"""
self.drop_all(**kwargs)
file_locations = self.__file_locations__
self.__file_locations__ = []
self.load(file_locations, **kwargs) | [
"def",
"reset",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"drop_all",
"(",
"*",
"*",
"kwargs",
")",
"file_locations",
"=",
"self",
".",
"__file_locations__",
"self",
".",
"__file_locations__",
"=",
"[",
"]",
"self",
".",
"load",
"(",... | 35.571429 | 5.428571 |
def json_post(methodname, rtype, key):
"""decorator factory for json POST queries"""
return compose(
reusable,
map_return(registry(rtype), itemgetter(key)),
basic_interaction,
map_yield(partial(_json_as_post, methodname)),
oneyield,
) | [
"def",
"json_post",
"(",
"methodname",
",",
"rtype",
",",
"key",
")",
":",
"return",
"compose",
"(",
"reusable",
",",
"map_return",
"(",
"registry",
"(",
"rtype",
")",
",",
"itemgetter",
"(",
"key",
")",
")",
",",
"basic_interaction",
",",
"map_yield",
"... | 30.888889 | 16.111111 |
def _accept(self):
"""
Work loop runs forever (or until running is False)
:return:
"""
logger.warning("Reactor " + self._name + " is starting")
while self.running:
try:
self._completeTask()
except:
logger.exception("Unexpected exception during request processing")
logger.warning("Reactor " + self._name + " is terminating") | [
"def",
"_accept",
"(",
"self",
")",
":",
"logger",
".",
"warning",
"(",
"\"Reactor \"",
"+",
"self",
".",
"_name",
"+",
"\" is starting\"",
")",
"while",
"self",
".",
"running",
":",
"try",
":",
"self",
".",
"_completeTask",
"(",
")",
"except",
":",
"l... | 35.416667 | 18.25 |
def on_done(self):
"""
Reimplemented from :meth:`~AsyncViewBase.on_done`
"""
if self._d:
self._d.callback(self)
self._d = None | [
"def",
"on_done",
"(",
"self",
")",
":",
"if",
"self",
".",
"_d",
":",
"self",
".",
"_d",
".",
"callback",
"(",
"self",
")",
"self",
".",
"_d",
"=",
"None"
] | 25.142857 | 11.428571 |
def createSensorToClassifierLinks(network, sensorRegionName,
classifierRegionName):
"""Create required links from a sensor region to a classifier region."""
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="bucketIdxOut", destInput="bucketIdxIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="actValueOut", destInput="actValueIn")
network.link(sensorRegionName, classifierRegionName, "UniformLink", "",
srcOutput="categoryOut", destInput="categoryIn") | [
"def",
"createSensorToClassifierLinks",
"(",
"network",
",",
"sensorRegionName",
",",
"classifierRegionName",
")",
":",
"network",
".",
"link",
"(",
"sensorRegionName",
",",
"classifierRegionName",
",",
"\"UniformLink\"",
",",
"\"\"",
",",
"srcOutput",
"=",
"\"bucketI... | 66.666667 | 22.888889 |
def height(self) -> int:
"""Determines how many entry rows are in the diagram."""
max_y = -1.0
for _, y in self.entries.keys():
max_y = max(max_y, y)
for h in self.horizontal_lines:
max_y = max(max_y, h.y)
for v in self.vertical_lines:
max_y = max(max_y, v.y1, v.y2)
return 1 + int(max_y) | [
"def",
"height",
"(",
"self",
")",
"->",
"int",
":",
"max_y",
"=",
"-",
"1.0",
"for",
"_",
",",
"y",
"in",
"self",
".",
"entries",
".",
"keys",
"(",
")",
":",
"max_y",
"=",
"max",
"(",
"max_y",
",",
"y",
")",
"for",
"h",
"in",
"self",
".",
... | 36.3 | 6.5 |
def append(self, state, symbol, action, destinationstate, production = None):
"""Appends a new rule"""
if action not in (None, "Accept", "Shift", "Reduce"):
raise TypeError
rule = {"action":action, "dest":destinationstate}
if action == "Reduce":
if rule is None:
raise TypeError("Expected production parameter")
rule["rule"] = production
while isinstance(symbol, TerminalSymbol) and isinstance(symbol.gd, Iterable) and len(symbol.gd) == 1 and isinstance(list(symbol.gd)[0], Grammar):
symbol = TerminalSymbol(list(symbol.gd)[0]) #Reduces symbol if its gd is a Sequence/Choice of 1 element
if not isinstance(symbol, Symbol):
raise TypeError("Expected symbol, got %s" % symbol)
self[state][symbol] = rule | [
"def",
"append",
"(",
"self",
",",
"state",
",",
"symbol",
",",
"action",
",",
"destinationstate",
",",
"production",
"=",
"None",
")",
":",
"if",
"action",
"not",
"in",
"(",
"None",
",",
"\"Accept\"",
",",
"\"Shift\"",
",",
"\"Reduce\"",
")",
":",
"ra... | 58.571429 | 25.428571 |
def validateOneNamespace(self, ctxt, elem, prefix, ns, value):
"""Try to validate a single namespace declaration for an
element basically it does the following checks as described
by the XML-1.0 recommendation: - [ VC: Attribute Value Type
] - [ VC: Fixed Attribute Default ] - [ VC: Entity Name ] -
[ VC: Name Token ] - [ VC: ID ] - [ VC: IDREF ] - [ VC:
Entity Name ] - [ VC: Notation Attributes ] The ID/IDREF
uniqueness and matching are done separately """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
if elem is None: elem__o = None
else: elem__o = elem._o
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlValidateOneNamespace(ctxt__o, self._o, elem__o, prefix, ns__o, value)
return ret | [
"def",
"validateOneNamespace",
"(",
"self",
",",
"ctxt",
",",
"elem",
",",
"prefix",
",",
"ns",
",",
"value",
")",
":",
"if",
"ctxt",
"is",
"None",
":",
"ctxt__o",
"=",
"None",
"else",
":",
"ctxt__o",
"=",
"ctxt",
".",
"_o",
"if",
"elem",
"is",
"No... | 52.5 | 17.375 |
def subject_key_identifier(self):
"""The :py:class:`~django_ca.extensions.SubjectKeyIdentifier` extension, or ``None`` if it doesn't
exist."""
try:
ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_KEY_IDENTIFIER)
except x509.ExtensionNotFound:
return None
return SubjectKeyIdentifier(ext) | [
"def",
"subject_key_identifier",
"(",
"self",
")",
":",
"try",
":",
"ext",
"=",
"self",
".",
"x509",
".",
"extensions",
".",
"get_extension_for_oid",
"(",
"ExtensionOID",
".",
"SUBJECT_KEY_IDENTIFIER",
")",
"except",
"x509",
".",
"ExtensionNotFound",
":",
"retur... | 45.75 | 13.875 |
def parse_xml(self, key_xml):
'''
Parse a VocabularyKey from an Xml as per Healthvault
schema.
:param key_xml: lxml.etree.Element representing a single VocabularyKey
'''
xmlutils = XmlUtils(key_xml)
self.name = xmlutils.get_string_by_xpath('name')
self.family = xmlutils.get_string_by_xpath('family')
self.version = xmlutils.get_string_by_xpath('version')
self.description = xmlutils.get_string_by_xpath('description')
self.language = xmlutils.get_lang() | [
"def",
"parse_xml",
"(",
"self",
",",
"key_xml",
")",
":",
"xmlutils",
"=",
"XmlUtils",
"(",
"key_xml",
")",
"self",
".",
"name",
"=",
"xmlutils",
".",
"get_string_by_xpath",
"(",
"'name'",
")",
"self",
".",
"family",
"=",
"xmlutils",
".",
"get_string_by_x... | 38.785714 | 23.642857 |
def delete(self, project_status, params={}, **options):
"""Deletes a specific, existing project status update.
Returns an empty data record.
Parameters
----------
project-status : {Id} The project status update to delete.
"""
path = "/project_statuses/%s" % (project_status)
return self.client.delete(path, params, **options) | [
"def",
"delete",
"(",
"self",
",",
"project_status",
",",
"params",
"=",
"{",
"}",
",",
"*",
"*",
"options",
")",
":",
"path",
"=",
"\"/project_statuses/%s\"",
"%",
"(",
"project_status",
")",
"return",
"self",
".",
"client",
".",
"delete",
"(",
"path",
... | 35.454545 | 17.727273 |
def call_functions_parallel(*worker_defs):
"""Call specified functions in parallel.
:param *worker_defs: Each positional argument can be either of
a function to be called or a tuple which consists of a function,
a list of positional arguments) and keyword arguments (optional).
If you need to pass arguments, you need to pass a tuple.
Example usages are like:
call_functions_parallel(func1, func2, func3)
call_functions_parallel(func1, (func2, [1, 2]))
call_functions_parallel((func1, [], {'a': 1}),
(func2, [], {'a': 2, 'b': 10}))
:returns: a tuple of values returned from individual functions.
None is returned if a corresponding function does not return.
It is better to return values other than None from individual
functions.
"""
# TODO(amotoki): Needs to figure out what max_workers can be specified.
# According to e0ne, the apache default configuration in devstack allows
# only 10 threads. What happens if max_worker=11 is specified?
max_workers = len(worker_defs)
# Prepare a list with enough length.
futures = [None] * len(worker_defs)
with futurist.ThreadPoolExecutor(max_workers=max_workers) as e:
for index, func_def in enumerate(worker_defs):
if callable(func_def):
func_def = [func_def]
args = func_def[1] if len(func_def) > 1 else []
kwargs = func_def[2] if len(func_def) > 2 else {}
func = functools.partial(func_def[0], *args, **kwargs)
futures[index] = e.submit(fn=func)
return tuple(f.result() for f in futures) | [
"def",
"call_functions_parallel",
"(",
"*",
"worker_defs",
")",
":",
"# TODO(amotoki): Needs to figure out what max_workers can be specified.",
"# According to e0ne, the apache default configuration in devstack allows",
"# only 10 threads. What happens if max_worker=11 is specified?",
"max_work... | 50.272727 | 18.787879 |
def bounds_at_zoom(self, zoom=None):
"""
Return process bounds for zoom level.
Parameters
----------
zoom : integer or list
Returns
-------
process bounds : tuple
left, bottom, right, top
"""
return () if self.area_at_zoom(zoom).is_empty else Bounds(
*self.area_at_zoom(zoom).bounds) | [
"def",
"bounds_at_zoom",
"(",
"self",
",",
"zoom",
"=",
"None",
")",
":",
"return",
"(",
")",
"if",
"self",
".",
"area_at_zoom",
"(",
"zoom",
")",
".",
"is_empty",
"else",
"Bounds",
"(",
"*",
"self",
".",
"area_at_zoom",
"(",
"zoom",
")",
".",
"bound... | 25 | 15.8 |
def external_incompatibilities(self): # type: () -> Generator[Incompatibility]
"""
Returns all external incompatibilities in this incompatibility's
derivation graph.
"""
if isinstance(self._cause, ConflictCause):
cause = self._cause # type: ConflictCause
for incompatibility in cause.conflict.external_incompatibilities:
yield incompatibility
for incompatibility in cause.other.external_incompatibilities:
yield incompatibility
else:
yield self | [
"def",
"external_incompatibilities",
"(",
"self",
")",
":",
"# type: () -> Generator[Incompatibility]",
"if",
"isinstance",
"(",
"self",
".",
"_cause",
",",
"ConflictCause",
")",
":",
"cause",
"=",
"self",
".",
"_cause",
"# type: ConflictCause",
"for",
"incompatibilit... | 40.142857 | 19.428571 |
def find_dependencies(self, dependent_rev, recurse=None):
"""Find all dependencies of the given revision, recursively traversing
the dependency tree if requested.
"""
if recurse is None:
recurse = self.options.recurse
try:
dependent = self.get_commit(dependent_rev)
except InvalidCommitish as e:
abort(e.message())
self.todo.append(dependent)
self.todo_d[dependent.hex] = True
first_time = True
while self.todo:
sha1s = [commit.hex[:8] for commit in self.todo]
if first_time:
self.logger.info("Initial TODO list: %s" % " ".join(sha1s))
first_time = False
else:
self.logger.info(" TODO list now: %s" % " ".join(sha1s))
dependent = self.todo.pop(0)
dependent_sha1 = dependent.hex
del self.todo_d[dependent_sha1]
self.logger.info(" Processing %s from TODO list" %
dependent_sha1[:8])
if dependent_sha1 in self.done_d:
self.logger.info(" %s already done previously" %
dependent_sha1)
continue
self.notify_listeners('new_commit', dependent)
parent = dependent.parents[0]
self.find_dependencies_with_parent(dependent, parent)
self.done.append(dependent_sha1)
self.done_d[dependent_sha1] = True
self.logger.info(" Found all dependencies for %s" %
dependent_sha1[:8])
# A commit won't have any dependencies if it only added new files
dependencies = self.dependencies.get(dependent_sha1, {})
self.notify_listeners('dependent_done', dependent, dependencies)
self.logger.info("Finished processing TODO list")
self.notify_listeners('all_done') | [
"def",
"find_dependencies",
"(",
"self",
",",
"dependent_rev",
",",
"recurse",
"=",
"None",
")",
":",
"if",
"recurse",
"is",
"None",
":",
"recurse",
"=",
"self",
".",
"options",
".",
"recurse",
"try",
":",
"dependent",
"=",
"self",
".",
"get_commit",
"("... | 38.877551 | 17.836735 |
def checkSystemVersion(s, versions=None):
"""
Check if the current version is different from the previously recorded
version. If it is, or if there is no previously recorded version,
create a version matching the current config.
"""
if versions is None:
versions = getSystemVersions()
currentVersionMap = dict([(v.package, v) for v in versions])
mostRecentSystemVersion = s.findFirst(SystemVersion,
sort=SystemVersion.creation.descending)
mostRecentVersionMap = dict([(v.package, v.asVersion()) for v in
s.query(SoftwareVersion,
(SoftwareVersion.systemVersion ==
mostRecentSystemVersion))])
if mostRecentVersionMap != currentVersionMap:
currentSystemVersion = SystemVersion(store=s, creation=Time())
for v in currentVersionMap.itervalues():
makeSoftwareVersion(s, v, currentSystemVersion) | [
"def",
"checkSystemVersion",
"(",
"s",
",",
"versions",
"=",
"None",
")",
":",
"if",
"versions",
"is",
"None",
":",
"versions",
"=",
"getSystemVersions",
"(",
")",
"currentVersionMap",
"=",
"dict",
"(",
"[",
"(",
"v",
".",
"package",
",",
"v",
")",
"fo... | 45.681818 | 21.227273 |
def set_xlabel(self, s, delay_draw=False):
"set plot xlabel"
self.conf.relabel(xlabel=s, delay_draw=delay_draw) | [
"def",
"set_xlabel",
"(",
"self",
",",
"s",
",",
"delay_draw",
"=",
"False",
")",
":",
"self",
".",
"conf",
".",
"relabel",
"(",
"xlabel",
"=",
"s",
",",
"delay_draw",
"=",
"delay_draw",
")"
] | 41.666667 | 11.666667 |
def response(self, status, content_type, content, headers=None):
"""
Send an HTTP response
"""
assert not isinstance(content, (str, bytes)), 'response content cannot be of type str or bytes'
response_headers = [('Content-Type', content_type)]
if headers:
response_headers.extend(headers)
self.start_response(status, response_headers)
return content | [
"def",
"response",
"(",
"self",
",",
"status",
",",
"content_type",
",",
"content",
",",
"headers",
"=",
"None",
")",
":",
"assert",
"not",
"isinstance",
"(",
"content",
",",
"(",
"str",
",",
"bytes",
")",
")",
",",
"'response content cannot be of type str o... | 37.727273 | 19.363636 |
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app) | [
"def",
"render_template_string",
"(",
"source",
",",
"*",
"*",
"context",
")",
":",
"ctx",
"=",
"_app_ctx_stack",
".",
"top",
"ctx",
".",
"app",
".",
"update_template_context",
"(",
"context",
")",
"return",
"_render",
"(",
"ctx",
".",
"app",
".",
"jinja_e... | 38.384615 | 11.769231 |
def inject_request_ids_into_environment(func):
"""Decorator for the Lambda handler to inject request IDs for logging."""
@wraps(func)
def wrapper(event, context):
# This might not always be an API Gateway event, so only log the
# request ID, if it looks like to be coming from there.
if 'requestContext' in event:
os.environ[ENV_APIG_REQUEST_ID] = event['requestContext'].get(
'requestId', 'N/A')
os.environ[ENV_LAMBDA_REQUEST_ID] = context.aws_request_id
return func(event, context)
return wrapper | [
"def",
"inject_request_ids_into_environment",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"event",
",",
"context",
")",
":",
"# This might not always be an API Gateway event, so only log the",
"# request ID, if it looks like to be coming from... | 40.785714 | 19.142857 |
def modify_meta(uid, data_dic, extinfo=None):
'''
update meta of the rec.
'''
if extinfo is None:
extinfo = {}
title = data_dic['title'].strip()
if len(title) < 2:
return False
cur_info = MPost.get_by_uid(uid)
if cur_info:
# ToDo: should not do this. Not for 's'
if DB_CFG['kind'] == 's':
entry = TabPost.update(
title=title,
user_name=data_dic['user_name'],
keywords='',
time_update=tools.timestamp(),
date=datetime.now(),
cnt_md=data_dic['cnt_md'],
memo=data_dic['memo'] if 'memo' in data_dic else '',
logo=data_dic['logo'],
order=data_dic['order'],
cnt_html=tools.markdown2html(data_dic['cnt_md']),
valid=data_dic['valid']
).where(TabPost.uid == uid)
entry.execute()
else:
cur_extinfo = cur_info.extinfo
# Update the extinfo, Not replace
for key in extinfo:
cur_extinfo[key] = extinfo[key]
entry = TabPost.update(
title=title,
user_name=data_dic['user_name'],
keywords='',
time_update=tools.timestamp(),
date=datetime.now(),
cnt_md=data_dic['cnt_md'],
memo=data_dic['memo'] if 'memo' in data_dic else '',
logo=data_dic['logo'],
order=data_dic['order'] if 'order' in data_dic else '',
cnt_html=tools.markdown2html(data_dic['cnt_md']),
extinfo=cur_extinfo,
valid=data_dic['valid']
).where(TabPost.uid == uid)
entry.execute()
else:
return MPost.add_meta(uid, data_dic, extinfo)
return uid | [
"def",
"modify_meta",
"(",
"uid",
",",
"data_dic",
",",
"extinfo",
"=",
"None",
")",
":",
"if",
"extinfo",
"is",
"None",
":",
"extinfo",
"=",
"{",
"}",
"title",
"=",
"data_dic",
"[",
"'title'",
"]",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"title"... | 38.75 | 12.519231 |
def email(self, comment, content_object, request):
"""
Overwritten for a better email notification.
"""
if not self.email_notification:
return
send_comment_posted(comment, request) | [
"def",
"email",
"(",
"self",
",",
"comment",
",",
"content_object",
",",
"request",
")",
":",
"if",
"not",
"self",
".",
"email_notification",
":",
"return",
"send_comment_posted",
"(",
"comment",
",",
"request",
")"
] | 28.25 | 11.25 |
def get_objective_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.*
"""
if not self.supports_objective_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ObjectiveQuerySession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | [
"def",
"get_objective_query_session",
"(",
"self",
",",
"proxy",
")",
":",
"if",
"not",
"self",
".",
"supports_objective_query",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"... | 39.884615 | 18.769231 |
def diff(new_dict, old_dict, iter_prefix='__iter__',
np_allclose=False, **kwargs):
""" return the difference between two dict_like objects
Parameters
----------
new_dict: dict
old_dict: dict
iter_prefix: str
prefix to use for list and tuple indexes
np_allclose: bool
if True, try using numpy.allclose to assess differences
**kwargs:
keyword arguments to parse to numpy.allclose
Returns
-------
outcome: dict
Containing none or more of:
- "insertions" : list of (path, val)
- "deletions" : list of (path, val)
- "changes" : list of (path, (val1, val2))
- "uncomparable" : list of (path, (val1, val2))
Examples
--------
>>> from pprint import pprint
>>> diff({'a':1},{'a':1})
{}
>>> pprint(diff({'a': 1, 'b': 2, 'c': 5},{'b': 3, 'c': 4, 'd': 6}))
{'changes': [(('b',), (2, 3)), (('c',), (5, 4))],
'deletions': [(('d',), 6)],
'insertions': [(('a',), 1)]}
>>> pprint(diff({'a': [{"b":1}, {"c":2}, 1]},{'a': [{"b":1}, {"d":2}, 2]}))
{'changes': [(('a', '__iter__2'), (1, 2))],
'deletions': [(('a', '__iter__1', 'd'), 2)],
'insertions': [(('a', '__iter__1', 'c'), 2)]}
>>> diff({'a':1}, {'a':1+1e-10})
{'changes': [(('a',), (1, 1.0000000001))]}
>>> diff({'a':1}, {'a':1+1e-10}, np_allclose=True)
{}
"""
if np_allclose:
try:
import numpy
except ImportError:
raise ValueError("to use np_allclose, numpy must be installed")
dct1_flat = flatten(new_dict, all_iters=iter_prefix)
dct2_flat = flatten(old_dict, all_iters=iter_prefix)
outcome = {'insertions': [], 'deletions': [],
'changes': [], 'uncomparable': []}
for path, val in dct1_flat.items():
if path not in dct2_flat:
outcome['insertions'].append((path, val))
continue
other_val = dct2_flat.pop(path)
if np_allclose:
try:
if numpy.allclose(val, other_val, **kwargs):
continue
except Exception:
pass
try:
if val != other_val:
outcome['changes'].append((path, (val, other_val)))
except Exception:
outcome['uncomparable'].append((path, (val, other_val)))
for path2, val2 in dct2_flat.items():
outcome['deletions'].append((path2, val2))
# remove any empty lists and sort
for key in list(outcome.keys()):
if not outcome[key]:
outcome.pop(key)
try:
outcome[key] = sorted(outcome[key])
except Exception:
pass
return outcome | [
"def",
"diff",
"(",
"new_dict",
",",
"old_dict",
",",
"iter_prefix",
"=",
"'__iter__'",
",",
"np_allclose",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"np_allclose",
":",
"try",
":",
"import",
"numpy",
"except",
"ImportError",
":",
"raise",
"... | 28.791209 | 20 |
def declare_base(erroName=True):
"""Create a Exception with default message.
:param errorName: boolean, True if you want the Exception name in the
error message body.
"""
if erroName:
class Base(Exception):
def __str__(self):
if len(self.args):
return "%s: %s" % (self.__class__.__name__, self.args[0])
else:
return "%s: %s" % (self.__class__.__name__, self.default)
else:
class Base(Exception):
def __str__(self):
if len(self.args):
return "%s" % self.args[0]
else:
return "%s" % self.default
return Base | [
"def",
"declare_base",
"(",
"erroName",
"=",
"True",
")",
":",
"if",
"erroName",
":",
"class",
"Base",
"(",
"Exception",
")",
":",
"def",
"__str__",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"args",
")",
":",
"return",
"\"%s: %s\"",
"%",
... | 33.571429 | 16.619048 |
def min_depth_img(self, num_img=1):
"""Collect a series of depth images and return the min of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The min DepthImage collected from the frames.
"""
depths = self._read_depth_images(num_img)
return Image.min_images(depths) | [
"def",
"min_depth_img",
"(",
"self",
",",
"num_img",
"=",
"1",
")",
":",
"depths",
"=",
"self",
".",
"_read_depth_images",
"(",
"num_img",
")",
"return",
"Image",
".",
"min_images",
"(",
"depths",
")"
] | 27.0625 | 18.5 |
def land_surface_temp(self):
"""
Mean values from Allen (2007)
:return:
"""
rp = 0.91
tau = 0.866
rsky = 1.32
epsilon = self.emissivity(approach='tasumi')
radiance = self.radiance(6)
rc = ((radiance - rp) / tau) - ((1 - epsilon) * rsky)
lst = self.k2 / (log((epsilon * self.k1 / rc) + 1))
return lst | [
"def",
"land_surface_temp",
"(",
"self",
")",
":",
"rp",
"=",
"0.91",
"tau",
"=",
"0.866",
"rsky",
"=",
"1.32",
"epsilon",
"=",
"self",
".",
"emissivity",
"(",
"approach",
"=",
"'tasumi'",
")",
"radiance",
"=",
"self",
".",
"radiance",
"(",
"6",
")",
... | 29.538462 | 14 |
def TransferDemo(handler, t):
"""
Demonstration of transfering to another number
"""
t.say ("One moment please.")
t.transfer(MY_PHONE)
t.say("Hi. I am a robot")
json = t.RenderJson()
logging.info ("TransferDemo json: %s" % json)
handler.response.out.write(json) | [
"def",
"TransferDemo",
"(",
"handler",
",",
"t",
")",
":",
"t",
".",
"say",
"(",
"\"One moment please.\"",
")",
"t",
".",
"transfer",
"(",
"MY_PHONE",
")",
"t",
".",
"say",
"(",
"\"Hi. I am a robot\"",
")",
"json",
"=",
"t",
".",
"RenderJson",
"(",
")"... | 28.8 | 8.4 |
def _strip_metachars(val):
"""
When a filter uses a / or - in the search, only the elements
name and comment field is searched. This can cause issues if
searching a network element, i.e. 1.1.1.0/24 where the /24 portion
is not present in the name and only the elements ipv4_network
attribute. If exact_match is not specified, strip off the /24
portion. Queries of this nature should instead use a kw filter
of: ipv4_network='1.1.1.0/24'.
"""
ignore_metachar = r'(.+)([/-].+)'
match = re.search(ignore_metachar, str(val))
if match:
left_half = match.group(1)
return left_half
return val | [
"def",
"_strip_metachars",
"(",
"val",
")",
":",
"ignore_metachar",
"=",
"r'(.+)([/-].+)'",
"match",
"=",
"re",
".",
"search",
"(",
"ignore_metachar",
",",
"str",
"(",
"val",
")",
")",
"if",
"match",
":",
"left_half",
"=",
"match",
".",
"group",
"(",
"1"... | 39.875 | 16.25 |
def bank_identifier(self):
"""Return the IBAN's Bank Identifier."""
end = get_iban_spec(self.country_code).bban_split_pos + 4
return self._id[4:end] | [
"def",
"bank_identifier",
"(",
"self",
")",
":",
"end",
"=",
"get_iban_spec",
"(",
"self",
".",
"country_code",
")",
".",
"bban_split_pos",
"+",
"4",
"return",
"self",
".",
"_id",
"[",
"4",
":",
"end",
"]"
] | 42.25 | 12.25 |
def passphrase_file(passphrase=None):
"""Read passphrase from a file. This should only ever be
used by our built in integration tests. At this time,
during normal operation, only pinentry is supported for
entry of passwords."""
cmd = []
pass_file = None
if not passphrase and 'CRYPTORITO_PASSPHRASE_FILE' in os.environ:
pass_file = os.environ['CRYPTORITO_PASSPHRASE_FILE']
if not os.path.isfile(pass_file):
raise CryptoritoError('CRYPTORITO_PASSPHRASE_FILE is invalid')
elif passphrase:
tmpdir = ensure_tmpdir()
pass_file = "%s/p_pass" % tmpdir
p_handle = open(pass_file, 'w')
p_handle.write(passphrase)
p_handle.close()
if pass_file:
cmd = cmd + ["--batch", "--passphrase-file", pass_file]
vsn = gpg_version()
if vsn[0] >= 2 and vsn[1] >= 1:
cmd = cmd + ["--pinentry-mode", "loopback"]
return cmd | [
"def",
"passphrase_file",
"(",
"passphrase",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"]",
"pass_file",
"=",
"None",
"if",
"not",
"passphrase",
"and",
"'CRYPTORITO_PASSPHRASE_FILE'",
"in",
"os",
".",
"environ",
":",
"pass_file",
"=",
"os",
".",
"environ",
"... | 35.346154 | 17.038462 |
def _last_in_direction(starting_pos, direction):
"""
move in the tree in given direction and return the last position.
:param starting_pos: position to start at
:param direction: callable that transforms a position into a position.
"""
cur_pos = None
next_pos = starting_pos
while next_pos is not None:
cur_pos = next_pos
next_pos = direction(cur_pos)
return cur_pos | [
"def",
"_last_in_direction",
"(",
"starting_pos",
",",
"direction",
")",
":",
"cur_pos",
"=",
"None",
"next_pos",
"=",
"starting_pos",
"while",
"next_pos",
"is",
"not",
"None",
":",
"cur_pos",
"=",
"next_pos",
"next_pos",
"=",
"direction",
"(",
"cur_pos",
")",... | 34.692308 | 14.538462 |
def parse(self, argument):
"""Determine validity of argument and return the correct element of enum.
If self.enum_values is empty, then all arguments are valid and argument
will be returned.
Otherwise, if argument matches an element in enum, then the first
matching element will be returned.
Args:
argument: The supplied flag value.
Returns:
The matching element from enum_values, or argument if enum_values is
empty.
Raises:
ValueError: enum_values was non-empty, but argument didn't match
anything in enum.
"""
if not self.enum_values:
return argument
elif self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0] | [
"def",
"parse",
"(",
"self",
",",
"argument",
")",
":",
"if",
"not",
"self",
".",
"enum_values",
":",
"return",
"argument",
"elif",
"self",
".",
"case_sensitive",
":",
"if",
"argument",
"not",
"in",
"self",
".",
"enum_values",
":",
"raise",
"ValueError",
... | 32.628571 | 21.885714 |
def current_state(self, *,
chat: typing.Union[str, int, None] = None,
user: typing.Union[str, int, None] = None) -> FSMContext:
"""
Get current state for user in chat as context
.. code-block:: python3
with dp.current_state(chat=message.chat.id, user=message.user.id) as state:
pass
state = dp.current_state()
state.set_state('my_state')
:param chat:
:param user:
:return:
"""
if chat is None:
chat_obj = types.Chat.get_current()
chat = chat_obj.id if chat_obj else None
if user is None:
user_obj = types.User.get_current()
user = user_obj.id if user_obj else None
return FSMContext(storage=self.storage, chat=chat, user=user) | [
"def",
"current_state",
"(",
"self",
",",
"*",
",",
"chat",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None",
",",
"user",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"int",
",",
"None",
"]",
"=",
"None",
")"... | 31.923077 | 20.461538 |
def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True):
"""Set the stepdown window for this instance.
Date times are assumed to be UTC, so use UTC date times.
:param datetime.datetime start: The datetime which the stepdown window is to open.
:param datetime.datetime end: The datetime which the stepdown window is to close.
:param bool enabled: A boolean indicating whether or not stepdown is to be enabled.
:param bool scheduled: A boolean indicating whether or not to schedule stepdown.
:param bool weekly: A boolean indicating whether or not to schedule compaction weekly.
"""
# Ensure a logical start and endtime is requested.
if not start < end:
raise TypeError('Parameter "start" must occur earlier in time than "end".')
# Ensure specified window is less than a week in length.
week_delta = datetime.timedelta(days=7)
if not ((end - start) <= week_delta):
raise TypeError('Stepdown windows can not be longer than 1 week in length.')
url = self._service_url + 'stepdown/'
data = {
'start': int(start.strftime('%s')),
'end': int(end.strftime('%s')),
'enabled': enabled,
'scheduled': scheduled,
'weekly': weekly,
}
response = requests.post(
url,
data=json.dumps(data),
**self._instances._default_request_kwargs
)
return response.json() | [
"def",
"set_stepdown_window",
"(",
"self",
",",
"start",
",",
"end",
",",
"enabled",
"=",
"True",
",",
"scheduled",
"=",
"True",
",",
"weekly",
"=",
"True",
")",
":",
"# Ensure a logical start and endtime is requested.",
"if",
"not",
"start",
"<",
"end",
":",
... | 43.171429 | 24.828571 |
def generate_key(self, email):
"""
Generate a new email confirmation key and return it.
"""
salt = sha1(str(random())).hexdigest()[:5]
return sha1(salt + email).hexdigest() | [
"def",
"generate_key",
"(",
"self",
",",
"email",
")",
":",
"salt",
"=",
"sha1",
"(",
"str",
"(",
"random",
"(",
")",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"5",
"]",
"return",
"sha1",
"(",
"salt",
"+",
"email",
")",
".",
"hexdigest",
... | 34.5 | 7.5 |
def unique(series: pd.Series) -> pd.Series:
"""Test that the data items do not repeat."""
return ~series.duplicated(keep=False) | [
"def",
"unique",
"(",
"series",
":",
"pd",
".",
"Series",
")",
"->",
"pd",
".",
"Series",
":",
"return",
"~",
"series",
".",
"duplicated",
"(",
"keep",
"=",
"False",
")"
] | 44.333333 | 1.333333 |
def _ParseContainerConfigJSON(self, parser_mediator, file_object):
"""Extracts events from a Docker container configuration file.
The path of each container config file is:
DOCKER_DIR/containers/<container_id>/config.json
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file is not a valid container config file.
"""
file_content = file_object.read()
file_content = codecs.decode(file_content, self._ENCODING)
json_dict = json.loads(file_content)
if 'Driver' not in json_dict:
raise errors.UnableToParseFile(
'not a valid Docker container configuration file, ' 'missing '
'\'Driver\' key.')
container_id_from_path = self._GetIdentifierFromPath(parser_mediator)
container_id_from_json = json_dict.get('ID', None)
if not container_id_from_json:
raise errors.UnableToParseFile(
'not a valid Docker layer configuration file, the \'ID\' key is '
'missing from the JSON dict (should be {0:s})'.format(
container_id_from_path))
if container_id_from_json != container_id_from_path:
raise errors.UnableToParseFile(
'not a valid Docker container configuration file. The \'ID\' key of '
'the JSON dict ({0:s}) is different from the layer ID taken from the'
' path to the file ({1:s}) JSON file.)'.format(
container_id_from_json, container_id_from_path))
if 'Config' in json_dict and 'Hostname' in json_dict['Config']:
container_name = json_dict['Config']['Hostname']
else:
container_name = 'Unknown container name'
event_data = DockerJSONContainerEventData()
event_data.container_id = container_id_from_path
event_data.container_name = container_name
if 'State' in json_dict:
if 'StartedAt' in json_dict['State']:
event_data.action = 'Container Started'
timestamp = timelib.Timestamp.FromTimeString(
json_dict['State']['StartedAt'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
if 'FinishedAt' in json_dict['State']:
if json_dict['State']['FinishedAt'] != '0001-01-01T00:00:00Z':
event_data.action = 'Container Finished'
# If the timestamp is 0001-01-01T00:00:00Z, the container
# is still running, so we don't generate a Finished event
timestamp = timelib.Timestamp.FromTimeString(
json_dict['State']['FinishedAt'])
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_END)
parser_mediator.ProduceEventWithEventData(event, event_data)
created_time = json_dict.get('Created', None)
if created_time:
event_data.action = 'Container Created'
timestamp = timelib.Timestamp.FromTimeString(created_time)
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data) | [
"def",
"_ParseContainerConfigJSON",
"(",
"self",
",",
"parser_mediator",
",",
"file_object",
")",
":",
"file_content",
"=",
"file_object",
".",
"read",
"(",
")",
"file_content",
"=",
"codecs",
".",
"decode",
"(",
"file_content",
",",
"self",
".",
"_ENCODING",
... | 40.961538 | 20.589744 |
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | [
"def",
"add_tgs",
"(",
"self",
",",
"tgs_rep",
",",
"enc_tgs_rep_part",
",",
"override_pp",
"=",
"False",
")",
":",
"#from AS_REP",
"c",
"=",
"Credential",
"(",
")",
"c",
".",
"client",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"tgs_rep",
"[",
"'cname... | 48.84 | 32.44 |
def preprocess_images(raw_color_im,
raw_depth_im,
camera_intr,
T_camera_world,
workspace_box,
workspace_im,
image_proc_config):
""" Preprocess a set of color and depth images. """
# read params
inpaint_rescale_factor = image_proc_config['inpaint_rescale_factor']
cluster = image_proc_config['cluster']
cluster_tolerance = image_proc_config['cluster_tolerance']
min_cluster_size = image_proc_config['min_cluster_size']
max_cluster_size = image_proc_config['max_cluster_size']
# deproject into 3D world coordinates
point_cloud_cam = camera_intr.deproject(raw_depth_im)
point_cloud_cam.remove_zero_points()
point_cloud_world = T_camera_world * point_cloud_cam
# compute the segmask for points above the box
seg_point_cloud_world, _ = point_cloud_world.box_mask(workspace_box)
seg_point_cloud_cam = T_camera_world.inverse() * seg_point_cloud_world
depth_im_seg = camera_intr.project_to_image(seg_point_cloud_cam)
# mask out objects in the known workspace
env_pixels = depth_im_seg.pixels_farther_than(workspace_im)
depth_im_seg._data[env_pixels[:,0], env_pixels[:,1]] = 0
# REMOVE NOISE
# clip low points
low_indices = np.where(point_cloud_world.data[2,:] < workspace_box.min_pt[2])[0]
point_cloud_world.data[2,low_indices] = workspace_box.min_pt[2]
# clip high points
high_indices = np.where(point_cloud_world.data[2,:] > workspace_box.max_pt[2])[0]
point_cloud_world.data[2,high_indices] = workspace_box.max_pt[2]
# segment out the region in the workspace (including the table)
workspace_point_cloud_world, valid_indices = point_cloud_world.box_mask(workspace_box)
invalid_indices = np.setdiff1d(np.arange(point_cloud_world.num_points),
valid_indices)
if cluster:
# create new cloud
pcl_cloud = pcl.PointCloud(workspace_point_cloud_world.data.T.astype(np.float32))
tree = pcl_cloud.make_kdtree()
# find large clusters (likely to be real objects instead of noise)
ec = pcl_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(cluster_tolerance)
ec.set_MinClusterSize(min_cluster_size)
ec.set_MaxClusterSize(max_cluster_size)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
num_clusters = len(cluster_indices)
# read out all points in large clusters
filtered_points = np.zeros([3,workspace_point_cloud_world.num_points])
cur_i = 0
for j, indices in enumerate(cluster_indices):
num_points = len(indices)
points = np.zeros([3,num_points])
for i, index in enumerate(indices):
points[0,i] = pcl_cloud[index][0]
points[1,i] = pcl_cloud[index][1]
points[2,i] = pcl_cloud[index][2]
filtered_points[:,cur_i:cur_i+num_points] = points.copy()
cur_i = cur_i + num_points
# reconstruct the point cloud
all_points = np.c_[filtered_points[:,:cur_i], point_cloud_world.data[:,invalid_indices]]
else:
all_points = point_cloud_world.data
filtered_point_cloud_world = PointCloud(all_points,
frame='world')
# compute the filtered depth image
filtered_point_cloud_cam = T_camera_world.inverse() * filtered_point_cloud_world
depth_im = camera_intr.project_to_image(filtered_point_cloud_cam)
# form segmask
segmask = depth_im_seg.to_binary()
valid_px_segmask = depth_im.invalid_pixel_mask().inverse()
segmask = segmask.mask_binary(valid_px_segmask)
# inpaint
color_im = raw_color_im.inpaint(rescale_factor=inpaint_rescale_factor)
depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)
return color_im, depth_im, segmask | [
"def",
"preprocess_images",
"(",
"raw_color_im",
",",
"raw_depth_im",
",",
"camera_intr",
",",
"T_camera_world",
",",
"workspace_box",
",",
"workspace_im",
",",
"image_proc_config",
")",
":",
"# read params",
"inpaint_rescale_factor",
"=",
"image_proc_config",
"[",
"'in... | 42.565217 | 20.945652 |
def read_inquiry_mode(sock):
"""returns the current mode, or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE )
pkt = sock.recv(255)
status,mode = struct.unpack("xxxxxxBB", pkt)
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return mode | [
"def",
"read_inquiry_mode",
"(",
"sock",
")",
":",
"# save current filter",
"old_filter",
"=",
"sock",
".",
"getsockopt",
"(",
"bluez",
".",
"SOL_HCI",
",",
"bluez",
".",
"HCI_FILTER",
",",
"14",
")",
"# Setup socket filter to receive only events related to the",
"# r... | 35.296296 | 17.962963 |
def _add_raster_layer(self, raster_layer, layer_name, save_style=False):
"""Add a raster layer to the folder.
:param raster_layer: The layer to add.
:type raster_layer: QgsRasterLayer
:param layer_name: The name of the layer in the datastore.
:type layer_name: str
:param save_style: If we have to save a QML too. Default to False.
Not implemented in geopackage !
:type save_style: bool
:returns: A two-tuple. The first element will be True if we could add
the layer to the datastore. The second element will be the layer
name which has been used or the error message.
:rtype: (bool, str)
.. versionadded:: 4.0
"""
source = gdal.Open(raster_layer.source())
array = source.GetRasterBand(1).ReadAsArray()
x_size = source.RasterXSize
y_size = source.RasterYSize
output = self.raster_driver.Create(
self.uri.absoluteFilePath(),
x_size,
y_size,
1,
gdal.GDT_Byte,
['APPEND_SUBDATASET=YES', 'RASTER_TABLE=%s' % layer_name]
)
output.SetGeoTransform(source.GetGeoTransform())
output.SetProjection(source.GetProjection())
output.GetRasterBand(1).WriteArray(array)
# Once we're done, close properly the dataset
output = None
source = None
return True, layer_name | [
"def",
"_add_raster_layer",
"(",
"self",
",",
"raster_layer",
",",
"layer_name",
",",
"save_style",
"=",
"False",
")",
":",
"source",
"=",
"gdal",
".",
"Open",
"(",
"raster_layer",
".",
"source",
"(",
")",
")",
"array",
"=",
"source",
".",
"GetRasterBand",... | 32.136364 | 20.704545 |
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
if not self.connected(service):
# the user pressed "cancel" when prompted to unlock their keyring.
raise PasswordDeleteError("Cancelled by user")
if not self.iface.hasEntry(self.handle, service, username, self.appid):
raise PasswordDeleteError("Password not found")
self.iface.removeEntry(self.handle, service, username, self.appid) | [
"def",
"delete_password",
"(",
"self",
",",
"service",
",",
"username",
")",
":",
"if",
"not",
"self",
".",
"connected",
"(",
"service",
")",
":",
"# the user pressed \"cancel\" when prompted to unlock their keyring.",
"raise",
"PasswordDeleteError",
"(",
"\"Cancelled b... | 56.222222 | 17.111111 |
def encodeAllRecords(self, records=None, toBeAdded=True):
"""Encodes a list of records.
Parameters:
--------------------------------------------------------------------
records: One or more records. (i,j)th element of this 2D array
specifies the value at field j of record i.
If unspecified, records previously generated and stored are
used.
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields
"""
if records is None:
records = self.getAllRecords()
if self.verbosity>0: print 'Encoding', len(records), 'records.'
encodings = [self.encodeRecord(record, toBeAdded) for record in records]
return encodings | [
"def",
"encodeAllRecords",
"(",
"self",
",",
"records",
"=",
"None",
",",
"toBeAdded",
"=",
"True",
")",
":",
"if",
"records",
"is",
"None",
":",
"records",
"=",
"self",
".",
"getAllRecords",
"(",
")",
"if",
"self",
".",
"verbosity",
">",
"0",
":",
"... | 44.705882 | 21.529412 |
def flatten(lst):
"""flatten([["a","btr"],"b", [],["c",["d",["e"], []]]]) will return ['a', 'btr', 'b', 'c', 'd', 'e']"""
def flatten_aux(item, accumulated):
if type(item) != list:
accumulated.append(item)
else:
for l in item:
flatten_aux(l, accumulated)
accumulated = []
flatten_aux(lst,accumulated)
return accumulated | [
"def",
"flatten",
"(",
"lst",
")",
":",
"def",
"flatten_aux",
"(",
"item",
",",
"accumulated",
")",
":",
"if",
"type",
"(",
"item",
")",
"!=",
"list",
":",
"accumulated",
".",
"append",
"(",
"item",
")",
"else",
":",
"for",
"l",
"in",
"item",
":",
... | 35 | 11.636364 |
def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):
"""Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : dict, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
roll_coords : bool
Indicates whether to roll the coordinates by the offset
The current default of roll_coords (None, equivalent to True) is
deprecated and will change to False in a future version.
Explicitly pass roll_coords to silence the warning.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See also
--------
shift
Examples
--------
>>> ds = xr.Dataset({'foo': ('x', list('abcde'))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 3 4 0 1 2
Data variables:
foo (x) object 'd' 'e' 'a' 'b' 'c'
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, 'roll')
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
if roll_coords is None:
warnings.warn("roll_coords will be set to False in the future."
" Explicitly set roll_coords to silence warning.",
FutureWarning, stacklevel=2)
roll_coords = True
unrolled_vars = () if roll_coords else self.coords
variables = OrderedDict()
for k, v in self.variables.items():
if k not in unrolled_vars:
variables[k] = v.roll(**{k: s for k, s in shifts.items()
if k in v.dims})
else:
variables[k] = v
return self._replace_vars_and_dims(variables) | [
"def",
"roll",
"(",
"self",
",",
"shifts",
"=",
"None",
",",
"roll_coords",
"=",
"None",
",",
"*",
"*",
"shifts_kwargs",
")",
":",
"shifts",
"=",
"either_dict_or_kwargs",
"(",
"shifts",
",",
"shifts_kwargs",
",",
"'roll'",
")",
"invalid",
"=",
"[",
"k",
... | 36.893939 | 22.5 |
def add_scroller_widget(self, ref, left=1, top=1, right=20, bottom=1, direction="h", speed=1, text="Message"):
""" Add Scroller Widget """
if ref not in self.widgets:
widget = ScrollerWidget(screen=self, ref=ref, left=left, top=top, right=right, bottom=bottom, direction=direction, speed=speed, text=text)
self.widgets[ref] = widget
return self.widgets[ref] | [
"def",
"add_scroller_widget",
"(",
"self",
",",
"ref",
",",
"left",
"=",
"1",
",",
"top",
"=",
"1",
",",
"right",
"=",
"20",
",",
"bottom",
"=",
"1",
",",
"direction",
"=",
"\"h\"",
",",
"speed",
"=",
"1",
",",
"text",
"=",
"\"Message\"",
")",
":... | 53.5 | 32.125 |
def encode_dict(values_dict):
"""Encode a dictionary into protobuf ``Value``-s.
Args:
values_dict (dict): The dictionary to encode as protobuf fields.
Returns:
Dict[str, ~google.cloud.firestore_v1beta1.types.Value]: A
dictionary of string keys and ``Value`` protobufs as dictionary
values.
"""
return {key: encode_value(value) for key, value in six.iteritems(values_dict)} | [
"def",
"encode_dict",
"(",
"values_dict",
")",
":",
"return",
"{",
"key",
":",
"encode_value",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"values_dict",
")",
"}"
] | 34.583333 | 25.416667 |
def tree_csp_solver(csp):
"[Fig. 6.11]"
n = len(csp.vars)
assignment = {}
root = csp.vars[0]
X, parent = topological_sort(csp.vars, root)
for Xj in reversed(X):
if not make_arc_consistent(parent[Xj], Xj, csp):
return None
for Xi in X:
if not csp.curr_domains[Xi]:
return None
assignment[Xi] = csp.curr_domains[Xi][0]
return assignment | [
"def",
"tree_csp_solver",
"(",
"csp",
")",
":",
"n",
"=",
"len",
"(",
"csp",
".",
"vars",
")",
"assignment",
"=",
"{",
"}",
"root",
"=",
"csp",
".",
"vars",
"[",
"0",
"]",
"X",
",",
"parent",
"=",
"topological_sort",
"(",
"csp",
".",
"vars",
",",... | 28.642857 | 15.928571 |
def get_image_dimensions(request):
"""
Verifies or calculates image dimensions.
:param request: OGC-type request
:type request: WmsRequest or WcsRequest
:return: horizontal and vertical dimensions of requested image
:rtype: (int or str, int or str)
"""
if request.service_type is ServiceType.WCS or (isinstance(request.size_x, int) and
isinstance(request.size_y, int)):
return request.size_x, request.size_y
if not isinstance(request.size_x, int) and not isinstance(request.size_y, int):
raise ValueError("At least one of parameters 'width' and 'height' must have an integer value")
missing_dimension = get_image_dimension(request.bbox, width=request.size_x, height=request.size_y)
if request.size_x is None:
return missing_dimension, request.size_y
if request.size_y is None:
return request.size_x, missing_dimension
raise ValueError("Parameters 'width' and 'height' must be integers or None") | [
"def",
"get_image_dimensions",
"(",
"request",
")",
":",
"if",
"request",
".",
"service_type",
"is",
"ServiceType",
".",
"WCS",
"or",
"(",
"isinstance",
"(",
"request",
".",
"size_x",
",",
"int",
")",
"and",
"isinstance",
"(",
"request",
".",
"size_y",
","... | 54.15 | 22.85 |
def convert_constant(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert constant layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting constant ...')
params_list = params['value'].numpy()
def target_layer(x, value=params_list):
return tf.constant(value.tolist(), shape=value.shape)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name + '_np'] = params_list # ad-hoc
layers[scope_name] = lambda_layer(layers[list(layers.keys())[0]]) | [
"def",
"convert_constant",
"(",
"params",
",",
"w_name",
",",
"scope_name",
",",
"inputs",
",",
"layers",
",",
"weights",
",",
"names",
")",
":",
"print",
"(",
"'Converting constant ...'",
")",
"params_list",
"=",
"params",
"[",
"'value'",
"]",
".",
"numpy",... | 33.73913 | 15.73913 |
def union_q(token):
"""
Appends all the Q() objects.
"""
query = Q()
operation = 'and'
negation = False
for t in token:
if type(t) is ParseResults: # See tokens recursively
query &= union_q(t)
else:
if t in ('or', 'and'): # Set the new op and go to next token
operation = t
elif t == '-': # Next tokens needs to be negated
negation = True
else: # Append to query the token
if negation:
t = ~t
if operation == 'or':
query |= t
else:
query &= t
return query | [
"def",
"union_q",
"(",
"token",
")",
":",
"query",
"=",
"Q",
"(",
")",
"operation",
"=",
"'and'",
"negation",
"=",
"False",
"for",
"t",
"in",
"token",
":",
"if",
"type",
"(",
"t",
")",
"is",
"ParseResults",
":",
"# See tokens recursively",
"query",
"&=... | 28.041667 | 15.958333 |
def wrsamp(self, expanded=False, write_dir=''):
"""
Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
"""
# Perform field validity and cohesion checks, and write the
# header file.
self.wrheader(write_dir=write_dir)
if self.n_sig > 0:
# Perform signal validity and cohesion checks, and write the
# associated dat files.
self.wr_dats(expanded=expanded, write_dir=write_dir) | [
"def",
"wrsamp",
"(",
"self",
",",
"expanded",
"=",
"False",
",",
"write_dir",
"=",
"''",
")",
":",
"# Perform field validity and cohesion checks, and write the",
"# header file.",
"self",
".",
"wrheader",
"(",
"write_dir",
"=",
"write_dir",
")",
"if",
"self",
"."... | 35.857143 | 17.666667 |
def project_delete_event(self, proj_info):
"""Process project delete event."""
LOG.debug("Processing project_delete_event...")
proj_id = proj_info.get('resource_info')
proj_name = self.get_project_name(proj_id)
if proj_name:
try:
self.dcnm_client.delete_project(proj_name,
self.cfg.dcnm.
default_partition_name)
except dexc.DfaClientRequestFailed:
# Failed to delete project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to create project %s on DCNM.",
proj_name)
self.update_project_info_cache(proj_id, name=proj_name,
opcode='delete',
result=constants.DELETE_FAIL)
else:
self.update_project_info_cache(proj_id, opcode='delete')
LOG.debug('Deleted project:%s', proj_name)
self.project_delete_notif(proj_id, proj_name) | [
"def",
"project_delete_event",
"(",
"self",
",",
"proj_info",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Processing project_delete_event...\"",
")",
"proj_id",
"=",
"proj_info",
".",
"get",
"(",
"'resource_info'",
")",
"proj_name",
"=",
"self",
".",
"get_project_name"... | 50.130435 | 19.565217 |
def convert_ensembl_to_entrez(self, ensembl):
"""Convert Ensembl Id to Entrez Gene Id"""
if 'ENST' in ensembl:
pass
else:
raise (IndexError)
# Submit resquest to NCBI eutils/Gene database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format(
ensembl)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
info = xmltodict.parse(response)
try:
geneId = info['eSearchResult']['IdList']['Id']
except TypeError:
raise (TypeError)
return geneId | [
"def",
"convert_ensembl_to_entrez",
"(",
"self",
",",
"ensembl",
")",
":",
"if",
"'ENST'",
"in",
"ensembl",
":",
"pass",
"else",
":",
"raise",
"(",
"IndexError",
")",
"# Submit resquest to NCBI eutils/Gene database",
"server",
"=",
"\"http://eutils.ncbi.nlm.nih.gov/entr... | 36.095238 | 18.571429 |
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options | [
"def",
"options",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"dbo",
"=",
"self",
".",
"__database",
".",
"client",
".",
"get_database",
"(",
"self",
".",
"__database",
".",
"name",
",",
"self",
".",
"codec_options",
",",
"self",
".",
"read_pre... | 29.594595 | 17.675676 |
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result] | [
"def",
"as_list",
"(",
"self",
",",
"key",
")",
":",
"result",
"=",
"self",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"result",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"list",
"(",
"result",
")",
"return",
"[",
"result",
"]"
] | 24.05 | 17.15 |
def call_receivers(self, client, clients_list, message):
"""
Calls receivers callbacks
"""
# Try to parse JSON
try:
json_message = json.loads(message)
except ValueError:
json_message = None
for func, to_json in self.receivers:
# Check if json version is available
if to_json:
if json_message is None:
continue
msg = json_message
else:
msg = message
# Call callback
func(client, clients_list, msg) | [
"def",
"call_receivers",
"(",
"self",
",",
"client",
",",
"clients_list",
",",
"message",
")",
":",
"# Try to parse JSON",
"try",
":",
"json_message",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"except",
"ValueError",
":",
"json_message",
"=",
"None",
"... | 26.636364 | 14.090909 |
def addBorrowers(self, *borrowers):
"""Add more transformed MIBs repositories to borrow MIBs from.
Whenever MibCompiler.compile encounters MIB module which neither of
the *searchers* can find or fetched ASN.1 MIB module can not be
parsed (due to syntax errors), these *borrowers* objects will be
invoked in order of their addition asking each if already transformed
MIB can be fetched (borrowed).
Args:
borrowers: borrower object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._borrowers.extend(borrowers)
debug.logger & debug.flagCompiler and debug.logger(
'current MIB borrower(s): %s' % ', '.join([str(x) for x in self._borrowers]))
return self | [
"def",
"addBorrowers",
"(",
"self",
",",
"*",
"borrowers",
")",
":",
"self",
".",
"_borrowers",
".",
"extend",
"(",
"borrowers",
")",
"debug",
".",
"logger",
"&",
"debug",
".",
"flagCompiler",
"and",
"debug",
".",
"logger",
"(",
"'current MIB borrower(s): %s... | 35.909091 | 24.863636 |
def filtered_cls_slots(self, cn: ClassDefinitionName, all_slots: bool=True) \
-> List[SlotDefinitionName]:
""" Return the set of slots associated with the class that meet the filter criteria. Slots will be returned
in defining order, with class slots returned last
@param cn: name of class to filter
@param all_slots: True means include attributes
@return: List of slot definitions
"""
rval = []
cls = self.schema.classes[cn]
cls_slots = self.all_slots(cls, cls_slots_first=True)
for slot in cls_slots:
if all_slots or slot.range in self.schema.classes:
rval.append(slot.name)
return rval | [
"def",
"filtered_cls_slots",
"(",
"self",
",",
"cn",
":",
"ClassDefinitionName",
",",
"all_slots",
":",
"bool",
"=",
"True",
")",
"->",
"List",
"[",
"SlotDefinitionName",
"]",
":",
"rval",
"=",
"[",
"]",
"cls",
"=",
"self",
".",
"schema",
".",
"classes",... | 41.352941 | 14.941176 |
def Add(self, request, callback=None):
"""Add a new request.
Args:
request: A http_wrapper.Request to add to the batch.
callback: A callback to be called for this response, of the
form callback(response, exception). The first parameter is the
deserialized response object. The second is an
apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors
occurred.
Returns:
None
"""
handler = RequestResponseAndHandler(request, None, callback)
self.__request_response_handlers[self._NewId()] = handler | [
"def",
"Add",
"(",
"self",
",",
"request",
",",
"callback",
"=",
"None",
")",
":",
"handler",
"=",
"RequestResponseAndHandler",
"(",
"request",
",",
"None",
",",
"callback",
")",
"self",
".",
"__request_response_handlers",
"[",
"self",
".",
"_NewId",
"(",
... | 40.647059 | 23.705882 |
def check(self, file):
"""
Checks a given file against all available yara rules
:param file: Path to file
:type file:str
:returns: Python dictionary containing the results
:rtype: list
"""
result = []
for rule in self.ruleset:
matches = rule.match(file)
for match in matches:
result.append(str(match))
return result | [
"def",
"check",
"(",
"self",
",",
"file",
")",
":",
"result",
"=",
"[",
"]",
"for",
"rule",
"in",
"self",
".",
"ruleset",
":",
"matches",
"=",
"rule",
".",
"match",
"(",
"file",
")",
"for",
"match",
"in",
"matches",
":",
"result",
".",
"append",
... | 26.375 | 14.875 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.