Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
372,800
|
def encode_time(o):
r = o.isoformat()
if o.microsecond:
r = r[:12]
if r.endswith():
r = r[:-6] +
return r
|
Encodes a Python datetime.time object as an ECMA-262 compliant
time string.
|
372,801
|
def iplot(figure,validate=True,sharing=None,filename=,
online=None,asImage=False,asUrl=False,asPlot=False,
dimensions=None,display_image=True,**kwargs):
valid_kwargs=[,,]
for key in list(kwargs.keys()):
if key not in valid_kwargs:
raise Exception("Invalid keyword : ".format(key))
if in kwargs:
if in figure:
figure[].update(showlegend=kwargs[])
if all([ in kwargs,sharing is None]):
sharing=kwargs[]
if isinstance(sharing,bool):
if sharing:
sharing=
else:
sharing=
if sharing is None:
sharing=auth.get_config_file()[]
if not filename:
try:
filename=figure[][]
except:
filename=.format(time.strftime("%Y-%m-%d %H:%M:%S"))
if not dimensions:
dimensions=(800,500) if not auth.get_config_file()[] else auth.get_config_file()[]
show_link = auth.get_config_file()[]
link_text = auth.get_config_file()[]
config = auth.get_config_file()[]
if in figure:
validate = False if in figure[] else validate
auto_open=True
if asUrl:
asPlot=True
auto_open=False
if asImage:
if offline.is_offline() and not online:
return offline.py_offline.iplot(figure,validate=validate, filename=filename, show_link=show_link,link_text=link_text,
image=, image_width=dimensions[0], image_height=dimensions[1], config=config)
else:
try:
py.image.save_as(figure,filename=+filename,format=,
width=dimensions[0],height=dimensions[1],scale=kwargs.get(,None))
path=+filename+
except:
py.image.save_as(figure,filename=filename,format=,
width=dimensions[0],height=dimensions[1],scale=kwargs.get(,None))
path=filename+
if display_image:
return display(Image(path))
else:
print(.format(path))
return None
if asPlot:
filename+=
if offline.is_offline() and not online:
return offline.py_offline.plot(figure, filename=filename, validate=validate,
show_link=show_link, link_text=link_text, auto_open=auto_open, config=config)
else:
return py.plot(figure, sharing=sharing, filename=filename, validate=validate,
auto_open=auto_open)
if offline.is_offline() and not online:
return offline.py_offline.iplot(figure, validate=validate, filename=filename, show_link=show_link, link_text=link_text, config=config)
else:
return py.iplot(figure,validate=validate,sharing=sharing,
filename=filename)
|
Plots a figure in IPython, creates an HTML or generates an Image
figure : figure
Plotly figure to be charted
validate : bool
If True then all values are validated before
it is charted
sharing : string
Sets the sharing level permission
public - anyone can see this chart
private - only you can see this chart
secret - only people with the link can see the chart
filename : string
Name to be used to save the file in the server, or as an image
online : bool
If True then the chart/image is rendered on the server
even when running in offline mode.
asImage : bool
If True it returns an Image (png)
In ONLINE mode:
Image file is saved in the working directory
Accepts:
filename
dimensions
scale
display_image
In OFFLINE mode:
Image file is downloaded (downloads folder) and a
regular plotly chart is displayed in Jupyter
Accepts:
filename
dimensions
asUrl : bool
If True the chart url/path is returned. No chart is displayed.
If Online : the URL is returned
If Offline : the local path is returned
asPlot : bool
If True the chart opens in browser
dimensions : tuple(int,int)
Dimensions for image
(width,height)
display_image : bool
If true, then the image is displayed after it has been saved
Requires Jupyter Notebook
Only valid when asImage=True
Other Kwargs
============
legend : bool
If False then the legend will not be shown
scale : integer
Increase the resolution of the image by `scale` amount
Only valid when asImage=True
|
372,802
|
def add_to_loader(loader_cls: Type, classes: List[Type]) -> None:
if not isinstance(classes, list):
classes = [classes]
for class_ in classes:
tag = .format(class_.__name__)
if issubclass(class_, enum.Enum):
loader_cls.add_constructor(tag, EnumConstructor(class_))
elif issubclass(class_, str) or issubclass(class_, UserString):
loader_cls.add_constructor(tag, UserStringConstructor(class_))
else:
loader_cls.add_constructor(tag, Constructor(class_))
if not hasattr(loader_cls, ):
loader_cls._registered_classes = dict()
loader_cls._registered_classes[tag] = class_
|
Registers one or more classes with a YAtiML loader.
Once a class has been registered, it can be recognized and \
constructed when reading a YAML text.
Args:
loader_cls: The loader to register the classes with.
classes: The class(es) to register, a plain Python class or a \
list of them.
|
372,803
|
def _list_of_dicts_to_column_headers(list_of_dicts):
if len(list_of_dicts) < 2 or not all(isinstance(item, dict) for item in list_of_dicts):
return None
column_headers = list_of_dicts[0].keys()
for d in list_of_dicts[1:]:
if len(d.keys()) != len(column_headers) or not all(header in d for header in column_headers):
return None
return column_headers
|
Detects if all entries in an list of ``dict``'s have identical keys.
Returns the keys if all keys are the same and ``None`` otherwise.
Parameters
----------
list_of_dicts : list
List of dictionaries to test for identical keys.
Returns
-------
list or None
List of column headers if all dictionary posessed the same keys. Returns ``None`` otherwise.
|
372,804
|
def _get_net_runner_opts():
runner_opts = __opts__.get(, {}).get(, {})
return {
: runner_opts.get(, _DEFAULT_TARGET),
: runner_opts.get(, _DEFAULT_EXPR_FORM),
: runner_opts.get(, _DEFAULT_IGNORE_INTF),
: runner_opts.get(, _DEFAULT_DISPLAY),
: runner_opts.get(, _DEFAULT_OUTPUTTER),
}
|
Return the net.find runner options.
|
372,805
|
def distanceToMesh(self, actor, signed=False, negate=False):
poly1 = self.polydata()
poly2 = actor.polydata()
df = vtk.vtkDistancePolyDataFilter()
df.SetInputData(0, poly1)
df.SetInputData(1, poly2)
if signed:
df.SignedDistanceOn()
if negate:
df.NegateDistanceOn()
df.Update()
scals = df.GetOutput().GetPointData().GetScalars()
poly1.GetPointData().AddArray(scals)
poly1.GetPointData().SetActiveScalars(scals.GetName())
rng = scals.GetRange()
self.mapper.SetScalarRange(rng[0], rng[1])
self.mapper.ScalarVisibilityOn()
return self
|
Computes the (signed) distance from one mesh to another.
.. hint:: |distance2mesh| |distance2mesh.py|_
|
372,806
|
def run_plate_bias(in_prefix, in_type, out_prefix, base_dir, options):
os.mkdir(out_prefix)
required_type = "bfile"
check_input_files(in_prefix, in_type, required_type)
script_prefix = os.path.join(out_prefix, "plate_bias")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
try:
plate_bias.main(options)
except plate_bias.ProgramError as e:
msg = "plate_bias: {}".format(e)
raise ProgramError(msg)
filename = script_prefix + ".significant_SNPs.summary"
if not os.path.isfile(filename):
raise ProgramError("{}: no such file".format(filename))
plate_counter = None
with open(filename, "r") as i_file:
header = {
name: i for i, name in
enumerate(i_file.readline().rstrip("\r\n").split("\t"))
}
if "plate" not in header:
msg = "{}: missing column plate".format(filename)
raise ProgramError(msg)
plate_counter = Counter(
line.rstrip("\r\n").split("\t")[header["plate"]] for line in i_file
)
table = [["plate name", "number of markers"]]
for plate_name, number in plate_counter.most_common():
table.append([
latex_template.sanitize_tex(plate_name),
"{:,d}".format(number),
])
filename = script_prefix + ".significant_SNPs.txt"
nb_markers = None
with open(filename, "r") as i_file:
nb_markers = len({line.rstrip("\r\n") for line in i_file})
p_threshold = str(plate_bias.parser.get_default("pfilter"))
if "--pfilter" in options:
p_threshold = str(options[options.index("--pfilter") + 1])
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(plate_bias.pretty_name)
text = (
"After performing the plate bias analysis using Plink, a "
"total of {:,d} unique marker{} had a significant result "
"({} a value less than {}).".format(
nb_markers,
"s" if nb_markers > 1 else "",
r"\textit{i.e.}",
latex_template.format_numbers(p_threshold),
)
)
print >>o_file, latex_template.wrap_lines(text)
if nb_markers > 0:
table_label = re.sub(
r"[/\\]",
"_",
script_prefix,
) + "_plate_bias"
text = (
r"Table~\ref{" + table_label + "} summarizes the plate "
"bias results."
)
print >>o_file, latex_template.wrap_lines(text)
longtable_template = latex_template.jinja2_env.get_template(
"longtable_template.tex",
)
table_caption = (
"Summary of the plate bias analysis performed by Plink. "
"For each plate, the number of significant marker{} is "
"shown (threshold of {}). The plates are sorted according "
"to the total number of significant results.".format(
"s" if nb_markers > 1 else "",
latex_template.format_numbers(p_threshold),
)
)
print >>o_file, longtable_template.render(
table_caption=table_caption,
table_label=table_label,
nb_col=len(table[1]),
col_alignments="lr",
text_size="normalsize",
header_data=zip(table[0], [1 for i in table[0]]),
tabular_data=table[1:],
)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "
print >>o_file, ("Number of markers with plate bias (p<{})\t"
"{:,d}".format(p_threshold, nb_markers))
print >>o_file, "---"
return _StepResult(
next_file=in_prefix,
next_file_type=required_type,
latex_summary=latex_file,
description=plate_bias.desc,
long_description=plate_bias.long_desc,
graph_path=None,
)
|
Runs step7 (plate bias).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.PlateBias.plate_bias` module.
The required file type for this module is ``bfile``, hence the need to use
the :py:func:`check_input_files` to check if the file input file type is
the good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.PlateBias.plate_bias` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type.
|
372,807
|
def create_cache_subnet_group(name, subnets=None, region=None, key=None, keyid=None, profile=None, **args):
[myVPCSubnet1,myVPCSubnet2]
if subnets:
if not in args:
args[] = []
if not isinstance(subnets, list):
subnets = [subnets]
for subnet in subnets:
if subnet.startswith():
args[] += [subnet]
continue
sn = __salt__[](subnet_names=subnet, region=region, key=key,
keyid=keyid, profile=profile).get()
if not sn:
raise SaltInvocationError(
.format(subnet))
if len(sn) == 1:
args[] += [sn[0][]]
elif len(sn) > 1:
raise CommandExecutionError(
.format(subnet))
args = dict([(k, v) for k, v in args.items() if not k.startswith()])
return _create_resource(name, name_param=, desc=,
res_type=,
region=region, key=key, keyid=keyid, profile=profile, **args)
|
Create an ElastiCache subnet group
Example:
.. code-block:: bash
salt myminion boto3_elasticache.create_cache_subnet_group name=my-subnet-group \
CacheSubnetGroupDescription="description" \
subnets='[myVPCSubnet1,myVPCSubnet2]'
|
372,808
|
def random_patt_uniform(nrows, ncols, patt_type=scalar):
if np.mod(ncols, 2) == 1:
raise ValueError(err_msg[])
if(patt_type == scalar):
vec = np.random.normal(0.0, 1.0, nrows * ncols) + \
1j * np.random.normal(0.0, 1.0, nrows * ncols)
return ScalarPatternUniform(vec.reshape((nrows, ncols)),
doublesphere=False)
elif(patt_type == vector):
vec1 = np.random.normal(0.0, 1.0, nrows * ncols) + \
1j * np.random.normal(0.0, 1.0, nrows * ncols)
vec2 = np.random.normal(0.0, 1.0, nrows * ncols) + \
1j * np.random.normal(0.0, 1.0, nrows * ncols)
return TransversePatternUniform(vec1.reshape((nrows, ncols)),
vec2.reshape((nrows, ncols)),
doublesphere=False)
else:
raise TypeError(err_msg[])
|
Returns a ScalarPatternUniform object or a VectorPatternUniform object
where each of the elements is set to a normal random variable with zero
mean and unit standard deviation. *nrows* is the number of rows in
the pattern, which corresponds to the theta axis. *ncols* must be even
and is the number of columns in the pattern and corresponds to the phi
axis.
(See *ScalarPatternUniform* and *VectorPatternUniform* for details.)
Examples::
>>> f = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.scalar)
>>> f = spherepy.random_patt_uniform(6, 8) # same as above
>>> F = spherepy.random_patt_uniform(6, 8, coef_type = spherepy.vector)
Args:
nrows (int): Number of rows corresponding to the theta axis.
ncols (int): Number of columns corresponding to the phi axis. To get
the speed and accuracy I need, this value **must** be even.
coef_type (int, optional): Set to 0 for scalar, and 1 for vector.
The default option is scalar.
Returns:
coefs: Returns a ScalarPatternUniform object if coef_type is either
blank or set to 0. Returns a VectorPatternUniform object if
coef_type = 1.
Raises:
ValueError: If ncols is not even.
TypeError: If coef_type is anything but 0 or 1.
|
372,809
|
def openByVendorIDAndProductID(
self, vendor_id, product_id,
skip_on_access_error=False, skip_on_error=False):
result = self.getByVendorIDAndProductID(
vendor_id, product_id,
skip_on_access_error=skip_on_access_error,
skip_on_error=skip_on_error)
if result is not None:
return result.open()
|
Get the first USB device matching given vendor and product ids.
Returns an USBDeviceHandle instance, or None if no present device
match.
skip_on_error (bool)
(see getDeviceList)
skip_on_access_error (bool)
(see getDeviceList)
|
372,810
|
def selectInvert( self ):
currLayer = self._currentLayer
for item in self.items():
layer = item.layer()
if ( layer == currLayer or not layer ):
item.setSelected(not item.isSelected())
|
Inverts the currently selected items in the scene.
|
372,811
|
def decode_to_bin_keypath(path):
path = encode_to_bin(path)
if path[0] == 1:
path = path[4:]
assert path[0:2] == PREFIX_00
padded_len = TWO_BITS.index(path[2:4])
return path[4+((4 - padded_len) % 4):]
|
Decodes bytes into a sequence of 0s and 1s
Used in decoding key path of a KV-NODE
|
372,812
|
def validate_table_name(name):
try:
validate_sqlite_table_name(name)
except (InvalidCharError, InvalidReservedNameError) as e:
raise NameValidationError(e)
except NullNameError:
raise NameValidationError("table name is empty")
except ValidReservedNameError:
pass
|
:param str name: Table name to validate.
:raises NameValidationError: |raises_validate_table_name|
|
372,813
|
def dump_bulk(cls, parent=None, keep_ids=True):
cls = get_result_class(cls)
qset = cls._get_serializable_model().objects.all()
if parent:
qset = qset.filter(path__startswith=parent.path)
ret, lnk = [], {}
for pyobj in serializers.serialize(, qset):
lnk[path] = newobj
return ret
|
Dumps a tree branch to a python data structure.
|
372,814
|
def post_user_contact_lists_contacts(self, id, contact_list_id, **data):
return self.post("/users/{0}/contact_lists/{0}/contacts/".format(id,contact_list_id), data=data)
|
POST /users/:id/contact_lists/:contact_list_id/contacts/
Adds a new contact to the contact list. Returns ``{"created": true}``.
There is no way to update entries in the list; just delete the old one
and add the updated version.
|
372,815
|
def get_first_properties(elt, keys=None, ctx=None):
if isinstance(keys, string_types):
keys = (keys,)
result = _get_properties(elt, keys=keys, first=True, ctx=ctx)
return result
|
Get first properties related to one input key.
:param elt: first property elt. Not None methods.
:param list keys: property keys to get.
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: dict of first values of elt properties.
|
372,816
|
def create_secgroup_rule(self, protocol, from_port, to_port,
source, target):
kwargs = {
: protocol,
: from_port,
: to_port
}
sg = self.find_secgroup(target).ec2sg
if not sg:
raise BangError("Security group not found, %s" % target)
if in source:
kwargs[] = source
else:
kwargs[] = self.find_secgroup(source).ec2sg
sg.authorize(**kwargs)
|
Creates a new server security group rule.
:param str protocol: E.g. ``tcp``, ``icmp``, etc...
:param int from_port: E.g. ``1``
:param int to_port: E.g. ``65535``
:param str source:
:param str target: The target security group. I.e. the group in which
this rule should be created.
|
372,817
|
def load(cls, filename, gzipped, byteorder=):
open_file = gzip.open if gzipped else open
with open_file(filename, ) as buff:
return cls.from_buffer(buff, byteorder)
|
Read, parse and return the file at the specified location.
The `gzipped` argument is used to indicate if the specified
file is gzipped. The `byteorder` argument lets you specify
whether the file is big-endian or little-endian.
|
372,818
|
def update(self, figure):
if hasattr(self, "figure_canvas"):
self.figure_canvas.Destroy()
self.figure_canvas = self._get_figure_canvas(figure)
self.figure_canvas.SetSize(self.GetSize())
figure.subplots_adjust()
self.main_sizer.Add(self.figure_canvas, 1,
wx.EXPAND | wx.FIXED_MINSIZE, 0)
self.Layout()
self.figure_canvas.draw()
|
Updates figure on data change
Parameters
----------
* figure: matplotlib.figure.Figure
\tMatplotlib figure object that is displayed in self
|
372,819
|
def process_escape(self, char):
self.process_char = self.last_process_char
if self.part == [] and char in self.whitespace:
self.parts.append(self.escape_char)
return
if char == self.eol_char:
return
unescaped = self.escape_results.get(char, self.escape_char+char)
self.part.append(unescaped)
|
Handle the char after the escape char
|
372,820
|
def list2pd(all_data, subjindex=None, listindex=None):
listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex
subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex
def make_multi_index(listindex, sub_num):
return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = [, ])
listindex = list(listindex)
subjindex = list(subjindex)
subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)]
return pd.concat(subs_list_of_dfs)
|
Makes multi-indexed dataframe of subject data
Parameters
----------
all_data : list of lists of strings
strings are either all presented or all recalled items, in the order of presentation or recall
*should also work for presented / recalled ints and floats, if desired
Returns
----------
subs_list_of_dfs : multi-indexed dataframe
dataframe of subject data (presented or recalled words/items), indexed by subject and list number
cell populated by the term presented or recalled in the position indicated by the column number
|
372,821
|
async def dispatch_request(
self, request_context: Optional[RequestContext]=None,
) -> ResponseReturnValue:
request_ = (request_context or _request_ctx_stack.top).request
if request_.routing_exception is not None:
raise request_.routing_exception
if request_.method == and request_.url_rule.provide_automatic_options:
return await self.make_default_options_response()
handler = self.view_functions[request_.url_rule.endpoint]
return await handler(**request_.view_args)
|
Dispatch the request to the view function.
Arguments:
request_context: The request context, optional as Flask
omits this argument.
|
372,822
|
def add_custom_fields(cls, *args, **kw):
for factory in config.custom_field_factories:
for field in factory():
setattr(cls, field.name, field)
|
Add any custom fields defined in the configuration.
|
372,823
|
def right_join(self, table, one=None, operator=None, two=None):
if isinstance(table, JoinClause):
table.type = "right"
return self.join(table, one, operator, two, "right")
|
Add a right join to the query
:param table: The table to join with, can also be a JoinClause instance
:type table: str or JoinClause
:param one: The first column of the join condition
:type one: str
:param operator: The operator of the join condition
:type operator: str
:param two: The second column of the join condition
:type two: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder
|
372,824
|
def from_etree(cls, etree_element):
ins = SaltElement.from_etree(etree_element)
ins.__class__ = SaltEdge.mro()[0]
ins.layers = get_layer_ids(etree_element)
ins.source = get_node_id(etree_element, )
ins.target = get_node_id(etree_element, )
return ins
|
creates a ``SaltEdge`` instance from the etree representation of an
<edges> element from a SaltXMI file.
|
372,825
|
def collect_output(mr_out_dir, out_file=None):
if out_file is None:
output = []
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn, "rt") as f:
output.append(f.read())
return "".join(output)
else:
block_size = 16777216
with open(out_file, ) as o:
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn) as f:
data = f.read(block_size)
while len(data) > 0:
o.write(data)
data = f.read(block_size)
|
Return all mapreduce output in ``mr_out_dir``.
Append the output to ``out_file`` if provided. Otherwise, return
the result as a single string (it is the caller's responsibility to
ensure that the amount of data retrieved fits into memory).
|
372,826
|
def to_json_object(self):
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict[] = self.app
return obj_dict
|
Returns a dict representation that can be serialized to JSON.
|
372,827
|
def set_mode_cb(self, mode, tf):
if tf:
self.canvas.set_draw_mode(mode)
if mode == :
self.edit_select_cuts()
return True
|
Called when one of the Move/Draw/Edit radio buttons is selected.
|
372,828
|
def save_and_close_enable(self, top_left, bottom_right):
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True)
|
Handle the data change event to enable the save and close button.
|
372,829
|
def _getFromTime(self, atDate=None):
return getLocalTime(self.date_from, self.time_from, self.tz)
|
Time that the event starts (in the local time zone).
|
372,830
|
def decodeLength(length):
bytes_length = len(length)
if bytes_length < 2:
offset = b
XOR = 0
elif bytes_length < 3:
offset = b
XOR = 0x8000
elif bytes_length < 4:
offset = b
XOR = 0xC00000
elif bytes_length < 5:
offset = b
XOR = 0xE0000000
else:
raise ConnectionError(.format(length))
decoded = unpack(, (offset + length))[0]
decoded ^= XOR
return decoded
|
Decode length based on given bytes.
:param length: Bytes string to decode.
:return: Decoded length.
|
372,831
|
def ispercolating(am, inlets, outlets, mode=):
r
if am.format is not :
am = am.to_coo()
ij = sp.vstack((am.col, am.row)).T
if mode.startswith():
occupied_sites = sp.zeros(shape=am.shape[0], dtype=bool)
occupied_sites[ij[am.data].flatten()] = True
clusters = site_percolation(ij, occupied_sites)
elif mode.startswith():
occupied_bonds = am.data
clusters = bond_percolation(ij, occupied_bonds)
ins = sp.unique(clusters.sites[inlets])
if ins[0] == -1:
ins = ins[1:]
outs = sp.unique(clusters.sites[outlets])
if outs[0] == -1:
outs = outs[1:]
hits = sp.in1d(ins, outs)
return sp.any(hits)
|
r"""
Determines if a percolating clusters exists in the network spanning
the given inlet and outlet sites
Parameters
----------
am : adjacency_matrix
The adjacency matrix with the ``data`` attribute indicating
if a bond is occupied or not
inlets : array_like
An array of indices indicating which sites are part of the inlets
outlets : array_like
An array of indices indicating which sites are part of the outlets
mode : string
Indicates which type of percolation to apply, either `'site'` or
`'bond'`
|
372,832
|
def get_family_admin_session(self):
if not self.supports_family_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
try:
session = sessions.FamilyAdminSession(proxy=self._proxy,
runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
Gets the ``OsidSession`` associated with the family administrative service.
return: (osid.relationship.FamilyAdminSession) - a
``FamilyAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_admin()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.*
|
372,833
|
def adjacent(geohash, direction):
assert direction in , "Invalid direction: %s"%direction
assert geohash, "Invalid geohash: %s"%geohash
neighbor = {
: [ , ],
: [ , ],
: [ , ],
: [ , ]
}
border = {
: [ , ],
: [ , ],
: [ , ],
: [ , ]
}
last = geohash[-1]
parent = geohash[0:-1]
t = len(geohash) % 2
if (last in border[direction][t]) and (parent):
parent = adjacent(parent, direction)
return parent + BASESEQUENCE[neighbor[direction][t].index(last)]
|
Return the adjacent geohash for a given direction.
|
372,834
|
def from_labeled_point(rdd, categorical=False, nb_classes=None):
features = np.asarray(
rdd.map(lambda lp: from_vector(lp.features)).collect())
labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype=)
if categorical:
if not nb_classes:
nb_classes = np.max(labels) + 1
temp = np.zeros((len(labels), nb_classes))
for i, label in enumerate(labels):
temp[i, label] = 1.
labels = temp
return features, labels
|
Convert a LabeledPoint RDD back to a pair of numpy arrays
:param rdd: LabeledPoint RDD
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: optional int, indicating the number of class labels
:return: pair of numpy arrays, features and labels
|
372,835
|
def _update(self):
initial_time = time.time()
self._updateHiddenStateTrajectories()
self._updateEmissionProbabilities()
self._updateTransitionMatrix()
final_time = time.time()
elapsed_time = final_time - initial_time
logger().info("BHMM update iteration took %.3f s" % elapsed_time)
|
Update the current model using one round of Gibbs sampling.
|
372,836
|
def addInHeaderInfo(self, name, type, namespace, element_type=0,
mustUnderstand=0):
headerinfo = HeaderInfo(name, type, namespace, element_type)
if mustUnderstand:
headerinfo.mustUnderstand = 1
self.inheaders.append(headerinfo)
return headerinfo
|
Add an input SOAP header description to the call info.
|
372,837
|
def calculate_perimeters(labels, indexes):
m = table_idx_from_labels(labels)
pixel_score = __perimeter_scoring[m]
return fixup_scipy_ndimage_result(scind.sum(pixel_score, labels, np.array(indexes,dtype=np.int32)))
|
Count the distances between adjacent pixels in the perimeters of the labels
|
372,838
|
def list(self, verbose=True):
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=)
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=)
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=)
else:
print("%10d" % tarinfo.size, end=)
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=)
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=)
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=)
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=)
print()
|
Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
|
372,839
|
def publish(self, subject, payload):
if self.is_closed:
raise ErrConnectionClosed
if self.is_draining_pubs:
raise ErrConnectionDraining
payload_size = len(payload)
if payload_size > self._max_payload:
raise ErrMaxPayload
yield from self._publish(subject, _EMPTY_, payload, payload_size)
|
Sends a PUB command to the server on the specified subject.
->> PUB hello 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 5
|
372,840
|
def get_beta_list(queue, *args):
beta_list = []
for i in queue:
c = CDM(i)
beta_list.append(c.get_alpha(*args))
return beta_list
|
获取调整后的风险因子列表,用于归一化风险因子系数
Keyword arguments:
queue -- 标题候选队列
*args -- 强化ef,客串如多个list
Return:
beta_list -- 所有候选标题的beta,list类型
|
372,841
|
def do_until(lambda_expr, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5, message=None):
ll keep performing the action until it succeeds.
(main differnce between do_until and wait_until is do_until will keep trying
until a value is returned, while wait until will wait until the function
evaluates True.)
Args:
lambda_expr (lambda) : Expression to evaluate.
Kwargs:
timeout (number): Timeout period in seconds.
sleep (number) : Sleep time to wait between iterations
message (str) : Provide a message for TimeoutError raised.
Returns:
The value of the evaluated lambda expression.
Usage::
do_until(lambda: driver.find_element_by_id("save").click(),
timeout=30,
sleep=0.5)
Is equivalent to:
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
return driver.find_element_by_id("save").click()
except:
pass
time.sleep(0.5)
raise OperationTimeoutError()
'
__check_condition_parameter_is_function(lambda_expr)
end_time = datetime.now() + timedelta(seconds=timeout)
last_exception = None
while datetime.now() < end_time:
try:
return lambda_expr()
except Exception as e:
last_exception = e
time.sleep(sleep)
if message:
raise OperationTimeoutError(message, last_exception)
else:
raise OperationTimeoutError("Operation timed out.", last_exception)
|
A retry wrapper that'll keep performing the action until it succeeds.
(main differnce between do_until and wait_until is do_until will keep trying
until a value is returned, while wait until will wait until the function
evaluates True.)
Args:
lambda_expr (lambda) : Expression to evaluate.
Kwargs:
timeout (number): Timeout period in seconds.
sleep (number) : Sleep time to wait between iterations
message (str) : Provide a message for TimeoutError raised.
Returns:
The value of the evaluated lambda expression.
Usage::
do_until(lambda: driver.find_element_by_id("save").click(),
timeout=30,
sleep=0.5)
Is equivalent to:
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
return driver.find_element_by_id("save").click()
except:
pass
time.sleep(0.5)
raise OperationTimeoutError()
|
372,842
|
def emit(self, event, *args, **kwargs):
callbacks = self._callbacks.get(event, [])
single = kwargs.pop(, None)
if single and callbacks:
return callbacks[-1](*args, **kwargs)
res = []
for callback in callbacks:
res.append(callback(*args, **kwargs))
return res
|
Call all callback functions registered with an event.
Any positional and keyword arguments can be passed here, and they will
be forwarded to the callback functions.
Return the list of callback return results.
|
372,843
|
def create_symbol(self, type_, **kwargs):
unique_name = kwargs.get()
if not unique_name:
unique_name = kwargs.get()
kwargs[] = unique_name
filename = kwargs.get()
if filename:
filename = os.path.abspath(filename)
kwargs[] = os.path.abspath(filename)
if unique_name in self.__symbols:
warn(, "%s(unique_name=%s, filename=%s, project=%s)"
" has already been defined: %s" % (type_.__name__, unique_name, filename,
kwargs.get(),
self.get_symbol(unique_name)))
return None
aliases = kwargs.pop(, [])
for alias in aliases:
self.create_symbol(ProxySymbol,
unique_name=alias,
target=unique_name)
symbol = type_()
debug( % unique_name,
)
for key, value in list(kwargs.items()):
setattr(symbol, key, value)
self.__symbols[unique_name] = symbol
for alias in aliases:
self.__symbols[alias] = symbol
self.__aliases[unique_name] = aliases
return symbol
|
Banana banana
|
372,844
|
def render_app_name(context, app, template="/admin_app_name.html"):
try:
template = app[] + template
text = render_to_string(template, context)
except:
text = app[]
return text
|
Render the application name using the default template name. If it cannot find a
template matching the given path, fallback to the application name.
|
372,845
|
def AgregarFrigorifico(self, cuit, nro_planta):
"Agrego el frigorifico a la liquidacíon (opcional)."
frig = {: cuit, : nro_planta}
self.solicitud[][] = frig
return True
|
Agrego el frigorifico a la liquidacíon (opcional).
|
372,846
|
def ply2gii(in_file, metadata, out_file=None):
from pathlib import Path
from numpy import eye
from nibabel.gifti import (
GiftiMetaData, GiftiCoordSystem, GiftiImage, GiftiDataArray,
)
from pyntcloud import PyntCloud
in_file = Path(in_file)
surf = PyntCloud.from_file(str(in_file))
metadata.update(
zip((, , ),
[ % c for c in surf.centroid])
)
da = (
GiftiDataArray(
data=surf.xyz.astype(),
datatype=,
intent=,
meta=GiftiMetaData.from_dict(metadata),
coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3)),
GiftiDataArray(
data=surf.mesh.values,
datatype=,
intent=,
coordsys=None))
surfgii = GiftiImage(darrays=da)
if out_file is None:
out_file = fname_presuffix(
in_file.name, suffix=, use_ext=False, newpath=str(Path.cwd()))
surfgii.to_filename(str(out_file))
return out_file
|
Convert from ply to GIfTI
|
372,847
|
def picture(self, row):
template =
types = (type(None), binary_type, int)
def guess_type(v):
try:
v = text_type(v).strip()
except ValueError:
v = binary_type(v).strip()
if not bool(v):
return type(None)
for t in (float, int, binary_type, text_type):
try:
return type(t(v))
except:
pass
def p(e):
tm = t = None
try:
t = guess_type(e)
tm = self.type_map.get(t, t)
return template[types.index(tm)]
except ValueError as e:
raise ValueError("Type / not in the types list: {} ({})".format(t, tm, types, e))
return .join(p(e) for e in row)
|
Create a simplified character representation of the data row, which can be pattern matched
with a regex
|
372,848
|
def last(symbol: str):
app = PriceDbApplication()
if symbol:
symbol = symbol.upper()
sec_symbol = SecuritySymbol("", "")
sec_symbol.parse(symbol)
latest = app.get_latest_price(sec_symbol)
assert isinstance(latest, PriceModel)
print(f"{latest}")
else:
latest = app.get_latest_prices()
for price in latest:
print(f"{price}")
|
displays last price, for symbol if provided
|
372,849
|
def freeze(sess, output_file_path, output_node_names):
with TemporaryDirectory() as temp_dir_name:
checkpoint_path = os.path.join(temp_dir_name, )
tf.train.Saver().save(sess, checkpoint_path)
freeze_from_checkpoint(checkpoint_path, output_file_path, output_node_names)
|
Freeze and shrink the graph based on a session and the output node names.
|
372,850
|
def patterson_d(aca, acb, acc, acd):
aca = AlleleCountsArray(aca, copy=False)
assert aca.shape[1] == 2,
acb = AlleleCountsArray(acb, copy=False)
assert acb.shape[1] == 2,
acc = AlleleCountsArray(acc, copy=False)
assert acc.shape[1] == 2,
acd = AlleleCountsArray(acd, copy=False)
assert acd.shape[1] == 2,
check_dim0_aligned(aca, acb, acc, acd)
a = aca.to_frequencies()[:, 1]
b = acb.to_frequencies()[:, 1]
c = acc.to_frequencies()[:, 1]
d = acd.to_frequencies()[:, 1]
num = (a - b) * (c - d)
den = (a + b - (2 * a * b)) * (c + d - (2 * c * d))
return num, den
|
Unbiased estimator for D(A, B; C, D), the normalised four-population
test for admixture between (A or B) and (C or D), also known as the
"ABBA BABA" test.
Parameters
----------
aca : array_like, int, shape (n_variants, 2),
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
acc : array_like, int, shape (n_variants, 2)
Allele counts for population C.
acd : array_like, int, shape (n_variants, 2)
Allele counts for population D.
Returns
-------
num : ndarray, float, shape (n_variants,)
Numerator (un-normalised f4 estimates).
den : ndarray, float, shape (n_variants,)
Denominator.
Notes
-----
See Patterson (2012), main text and Appendix A.
For un-normalized f4 statistics, ignore the `den` return value.
|
372,851
|
def cons(head_ele,l,**kwargs):
if( in kwargs):
mode = kwargs[]
else:
mode = "new"
return(prepend(l,head_ele,mode=mode))
|
from elist.elist import *
ol=[1,2,3,4]
id(ol)
new = cons(5,ol)
new
id(new)
####
ol=[1,2,3,4]
id(ol)
rslt = cons(5,ol,mode="original")
rslt
id(rslt)
|
372,852
|
def at(self, timestamp):
if not self.version_birth_date == self.version_start_date:
raise SuspiciousOperation(
"Cannot relocate this Versionable instance in time, since it "
"is a versioned instance")
if not isinstance(timestamp, datetime.datetime):
raise ValueError("This is not a datetime.datetime timestamp")
self.version_birth_date = self.version_start_date = timestamp
return self
|
Force the create date of an object to be at a certain time; This
method can be invoked only on a freshly created Versionable object.
It must not have been cloned yet. Raises a SuspiciousOperation
exception, otherwise.
:param timestamp: a datetime.datetime instance
|
372,853
|
def get_default_template():
return {
"format": 1,
"protocol": 1,
"environment": Environment.DEV,
"maxtps": "one",
"core": "empty",
"persistence": "one_minute",
"priority": "default",
"separation": "time",
"bcr": TemplateFields.yes,
"unlimited": TemplateFields.yes,
"clientiv": TemplateFields.yes,
"clientdiv": TemplateFields.no,
"resource": "global",
"credit": 32677,
TemplateFields.generation: {
TemplateFields.commkey: Gen.CLIENT,
TemplateFields.billingkey: Gen.LEGACY_RANDOM,
TemplateFields.appkey: Gen.LEGACY_RANDOM
}
}
|
Returns default getTemplate request specification.
:return:
|
372,854
|
def pillar_dir(self):
repo_dir = self.repo_dir
root = self.root
branch = self.branch
if branch == or branch == :
working_dir = os.path.join(repo_dir, , root)
if not os.path.isdir(working_dir):
log.error(, self.repo_location, root)
else:
return os.path.normpath(working_dir)
working_dir = os.path.join(repo_dir, , branch, root)
if os.path.isdir(working_dir):
return os.path.normpath(working_dir)
working_dir = os.path.join(working_dir, , branch, root)
if os.path.isdir(working_dir):
return os.path.normpath(working_dir)
log.error(, self.repo_location, branch, root)
return repo_dir
|
Returns the directory of the pillars (repo cache + branch + root)
|
372,855
|
def register_callbacks(self, on_create, on_modify, on_delete):
self.on_create = on_create
self.on_modify = on_modify
self.on_delete = on_delete
|
Register callbacks for file creation, modification, and deletion
|
372,856
|
def changes_in(self, rev_or_range):
try:
return self._scm.changes_in(rev_or_range, relative_to=get_buildroot())
except Scm.ScmException as e:
raise self.WorkspaceError("Problem detecting changes in {}.".format(rev_or_range), e)
|
:API: public
|
372,857
|
def get_edxml_with_aws_urls(self):
edxml = self.get_edxml()
soup = BeautifulSoup(edxml, )
attrs = {
: ,
: ,
: ,
:
}
if len(self.my_osid_object.object_map[]) > 0:
file_map = self.my_osid_object.get_files()
for file_label, url in file_map.items():
local_regex = re.compile(file_label + r)
for key, attr in attrs.items():
search = {attr: local_regex}
tags = soup.find_all(**search)
for item in tags:
item[attr] = url
return soup.find().prettify()
|
stub
|
372,858
|
def check_auth(user):
t be inside the system:
- If the user is staff or superuser: LOGIN GRANTED
- If the user has a Person and it is not "disabled": LOGIN GRANTED
- Elsewhere: LOGIN DENIED
'
auth = None
person = None
if user:
if user.is_staff or user.is_superuser:
auth = user
else:
person = getattr(user, "person", None)
if not person:
person_related = getattr(user, "people", None)
if person_related:
if person_related.count() == 1:
person = person_related.get()
if person and ((person.disabled is None) or (person.disabled > timezone.now())):
auth = user
return auth
|
Check if the user should or shouldn't be inside the system:
- If the user is staff or superuser: LOGIN GRANTED
- If the user has a Person and it is not "disabled": LOGIN GRANTED
- Elsewhere: LOGIN DENIED
|
372,859
|
def detect_stream_mode(stream):
if hasattr(stream, ):
if in stream.mode:
return bytes
elif in stream.mode:
return str
if hasattr(stream, ):
zeroStr = stream.read(0)
if type(zeroStr) is str:
return str
return bytes
elif hasattr(stream, ):
zeroStr = stream.recv(0)
if type(zeroStr) is str:
return str
return bytes
return bytes
|
detect_stream_mode - Detect the mode on a given stream
@param stream <object> - A stream object
If "mode" is present, that will be used.
@return <type> - "Bytes" type or "str" type
|
372,860
|
def __clear_in_buffer(self):
self.__in_buffer.value = bytes(b * len(self.__in_buffer))
|
Zeros out the in buffer
:return: None
|
372,861
|
def accuracy(self, test_set, format=None):
if isinstance(test_set, basestring):
test_data = self._read_data(test_set)
else:
test_data = test_set
test_features = [(self.extract_features(d), c) for d, c in test_data]
return nltk.classify.accuracy(self.classifier, test_features)
|
Compute the accuracy on a test set.
:param test_set: A list of tuples of the form ``(text, label)``, or a
filename.
:param format: If ``test_set`` is a filename, the file format, e.g.
``"csv"`` or ``"json"``. If ``None``, will attempt to detect the
file format.
|
372,862
|
def process(self):
assert self.queue is not None
while True:
event = self.queue.get()
if self.pre_process_event(event):
self.invoke_handlers(event)
self.queue.task_done()
|
Loops over the underlying queue of events and processes them in order.
|
372,863
|
def unionfs(rw=, ro=None, union=):
from functools import wraps
def wrap_in_union_fs(func):
@wraps(func)
def wrap_in_union_fs_func(project, *args, **kwargs):
container = project.container
if container is None or in_container():
return func(project, *args, **kwargs)
build_dir = local.path(project.builddir)
LOG.debug("UnionFS - Project builddir: %s", project.builddir)
if __unionfs_is_active(root=build_dir):
LOG.debug(
"UnionFS already active in %s, nesting not supported.",
build_dir)
return func(project, *args, **kwargs)
ro_dir = local.path(container.local)
rw_dir = build_dir / rw
un_dir = build_dir / union
LOG.debug("UnionFS - RW: %s", rw_dir)
unionfs_cmd = __unionfs_set_up(ro_dir, rw_dir, un_dir)
project_builddir_bak = project.builddir
project.builddir = un_dir
proc = unionfs_cmd.popen()
while (not __unionfs_is_active(root=un_dir)) and \
(proc.poll() is None):
pass
ret = None
if proc.poll() is None:
try:
with local.cwd(un_dir):
ret = func(project, *args, **kwargs)
finally:
project.builddir = project_builddir_bak
from signal import SIGINT
is_running = proc.poll() is None
while __unionfs_is_active(root=un_dir) and is_running:
try:
proc.send_signal(SIGINT)
proc.wait(timeout=3)
except subprocess.TimeoutExpired:
proc.kill()
is_running = False
LOG.debug("Unionfs shut down.")
if __unionfs_is_active(root=un_dir):
raise UnmountError()
return ret
return wrap_in_union_fs_func
return wrap_in_union_fs
|
Decorator for the UnionFS feature.
This configures a unionfs for projects. The given base_dir and/or image_dir
are layered as follows:
image_dir=RW:base_dir=RO
All writes go to the image_dir, while base_dir delivers the (read-only)
versions of the rest of the filesystem.
The unified version will be provided in the project's builddir. Unmouting
is done as soon as the function completes.
Args:
rw: writeable storage area for the unified fuse filesystem.
ro: read-only storage area for the unified fuse filesystem.
union: mountpoint of the unified fuse filesystem.
|
372,864
|
def delete(self):
try:
self._conn.delete("/ws/DataStream/{}".format(self.get_stream_id()))
except DeviceCloudHttpException as http_excpeption:
if http_excpeption.response.status_code == 404:
raise NoSuchStreamException()
else:
raise http_excpeption
|
Delete this stream from Device Cloud along with its history
This call will return None on success and raise an exception in the event of an error
performing the deletion.
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
:raises devicecloud.streams.NoSuchStreamException: if this stream has already been deleted
|
372,865
|
def get_language(self):
if self.config.use_meta_language:
if self.article.meta_lang:
return self.article.meta_lang[:2]
return self.config.target_language
|
\
Returns the language is by the article or
the configuration language
|
372,866
|
def grade(adjective, suffix=COMPARATIVE):
b = predicative(adjective)
if suffix == SUPERLATIVE and b.endswith(("s", u"ß")):
suffix = suffix[1:]
return adjective[:len(b)] + suffix + adjective[len(b):]
|
Returns the comparative or superlative form of the given (inflected) adjective.
|
372,867
|
def _startRecording(self, filename):
self.setOption(, filename)
self.setOption(, True)
self.setOption(, True)
|
Start recording the session to a file for debug purposes.
|
372,868
|
def insert_paragraph_before(self, text=None, style=None):
paragraph = self._insert_paragraph_before()
if text:
paragraph.add_run(text)
if style is not None:
paragraph.style = style
return paragraph
|
Return a newly created paragraph, inserted directly before this
paragraph. If *text* is supplied, the new paragraph contains that
text in a single run. If *style* is provided, that style is assigned
to the new paragraph.
|
372,869
|
def _isLastCodeColumn(self, block, column):
return column >= self._lastColumn(block) or \
self._isComment(block, self._nextNonSpaceColumn(block, column + 1))
|
Return true if the given column is at least equal to the column that
contains the last non-whitespace character at the given line, or if
the rest of the line is a comment.
|
372,870
|
def cdsparse(self, record):
try:
if self.genes[self.genenames[record.id]] == len(self.runmetadata.samples):
try:
self.genesequence[self.genenames[record.id]].add(str(record.seq))
except KeyError:
self.genesequence[self.genenames[record.id]] = set()
self.genesequence[self.genenames[record.id]].add(str(record.seq))
try:
self.coresequence[str(record.seq)].add(record.id)
except KeyError:
self.coresequence[str(record.seq)] = set()
self.coresequence[str(record.seq)].add(record.id)
except KeyError:
pass
|
Finds core genes, and records gene names and sequences in dictionaries
:param record: SeqIO record
|
372,871
|
def consume_item(rlp, start):
p, t, l, s = consume_length_prefix(rlp, start)
return consume_payload(rlp, p, s, t, l)
|
Read an item from an RLP string.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is
the read item, per_item_rlp is a list containing the RLP
encoding of each item and ``end`` is the position of the
first unprocessed byte
|
372,872
|
def tomof(self, indent=MOF_INDENT, maxline=MAX_MOF_LINE, line_pos=0):
mof = []
mof.append(self.name)
mof.append(u)
if isinstance(self.value, list):
mof.append(u)
else:
mof.append(u)
line_pos += len(u.join(mof))
val_str, line_pos = _value_tomof(
self.value, self.type, indent, maxline, line_pos + 1, 3, True)
if val_str and val_str[0] != :
mof.append(u)
else:
line_pos -= 1
mof.append(val_str)
if isinstance(self.value, list):
mof.append(u)
else:
mof.append(u)
mof_str = u.join(mof)
return mof_str
|
Return a MOF string with the specification of this CIM qualifier
as a qualifier value.
The items of array values are tried to keep on the same line. If the
generated line would exceed the maximum MOF line length, the value is
split into multiple lines, on array item boundaries, and/or within long
strings on word boundaries.
If a string value (of a scalar value, or of an array item) is split
into multiple lines, the first line of the value is put onto a line on
its own.
Parameters:
indent (:term:`integer`): For a multi-line result, the number of
spaces to indent each line except the first line (on which the
qualifier name appears). For a single-line result, ignored.
Returns:
:term:`unicode string`: MOF string.
|
372,873
|
def to_mongo(self, disjunction=True):
q = {}
clauses = [e.expr for e in self._main]
if clauses:
if disjunction:
if len(clauses) + len(self._where) > 1:
q[] = clauses
else:
q.update(clauses[0])
else:
for c in clauses:
q.update(c)
return q
|
Create from current state a valid MongoDB query expression.
:return: MongoDB query expression
:rtype: dict
|
372,874
|
def _get_record(self, record_type):
if (not self.has_record_type(record_type) and
record_type.get_identifier() not in self._record_type_data_sets):
raise errors.Unsupported()
if str(record_type) not in self._records:
record_initialized = self._init_record(str(record_type))
if record_initialized and str(record_type) not in self._my_map[]:
self._my_map[].append(str(record_type))
return self._records[str(record_type)]
|
This overrides _get_record in osid.Extensible.
Perhaps we should leverage it somehow?
|
372,875
|
def get_default_datatable_kwargs(self, **kwargs):
kwargs[] = self
return kwargs
|
Builds the default set of kwargs for initializing a Datatable class. Note that by default
the MultipleDatatableMixin does not support any configuration via the view's class
attributes, and instead relies completely on the Datatable class itself to declare its
configuration details.
|
372,876
|
async def flush(self, request: Request, stacks: List[Stack]):
ns: List[Stack] = []
for stack in stacks:
ns.extend(self.typify(stack))
if len(ns) > 1 and ns[-1] == Stack([lyr.Typing()]):
ns[-1].get_layer(lyr.Typing).active = False
await self.next(request, ns)
|
Add a typing stack after each stack.
|
372,877
|
def namedb_select_where_unexpired_names(current_block, only_registered=True):
ns_lifetime_multiplier = get_epoch_namespace_lifetime_multiplier(current_block, )
ns_grace_period = get_epoch_namespace_lifetime_grace_period(current_block, )
unexpired_query_fragment = "(" + \
"(" + \
"namespaces.op = ? AND " + \
"(" + \
"(namespaces.ready_block + ((namespaces.lifetime * {}) + {}) > ?) OR ".format(ns_lifetime_multiplier, ns_grace_period) + \
"(name_records.last_renewed + ((namespaces.lifetime * {}) + {}) >= ?)".format(ns_lifetime_multiplier, ns_grace_period) + \
")" + \
") OR " + \
"(" + \
"namespaces.op = ? AND namespaces.reveal_block <= ? AND ? < namespaces.reveal_block + ?" + \
")" + \
")"
unexpired_query_args = (NAMESPACE_READY,
current_block,
current_block,
NAMESPACE_REVEAL, current_block, current_block, NAMESPACE_REVEAL_EXPIRE)
if only_registered:
unexpired_query_fragment = .format(unexpired_query_fragment)
unexpired_query_args = (current_block,) + unexpired_query_args
return (unexpired_query_fragment, unexpired_query_args)
|
Generate part of a WHERE clause that selects from name records joined with namespaces
(or projections of them) that are not expired.
Also limit to names that are registered at this block, if only_registered=True.
If only_registered is False, then as long as current_block is before the expire block, then the name will be returned (but the name may not have existed at that block)
|
372,878
|
def R(self,*args,**kwargs):
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet: return thiso[0]
else: return thiso[0,:]
|
NAME:
R
PURPOSE:
return cylindrical radius at time t
INPUT:
t - (optional) time at which to get the radius
ro= (Object-wide default) physical scale for distances to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
R(t)
HISTORY:
2010-09-21 - Written - Bovy (NYU)
|
372,879
|
def _load_data(self):
if self.raw_data is None and self.data_format is not FormatType.PYTHON:
if self.file_path is None:
raise ArgumentInvalid()
if not os.path.isfile(self.file_path) or not os.access(self.file_path, os.R_OK):
raise ArgumentInvalid()
with open(self.file_path) as f:
self.raw_data = f.read()
|
Load data from raw_data or file_path
|
372,880
|
def cur_model(model=None):
if model is None:
if _system.currentmodel is not None:
return _system.currentmodel.interface
else:
return None
else:
if isinstance(model, _Model):
_system.currentmodel = model._impl
else:
_system.currentmodel = _system.models[model]
return _system.currentmodel.interface
|
Get and/or set the current model.
If ``model`` is given, set the current model to ``model`` and return it.
``model`` can be the name of a model object, or a model object itself.
If ``model`` is not given, the current model is returned.
|
372,881
|
def clean_all(G, settings):
quiet = settings["quiet"]
recon = settings["recon"]
sprint = settings["sprint"]
error = settings["error"]
all_outputs = []
for node in G.nodes(data=True):
if "output" in node[1]:
for item in get_all_outputs(node[1]):
all_outputs.append(item)
all_outputs.append(".shastore")
retcode = 0
for item in sorted(all_outputs):
if os.path.isfile(item):
if recon:
sprint("Would remove file: {}".format(item))
continue
sprint("Attempting to remove file ", level="verbose")
try:
os.remove(item)
sprint("Removed file", level="verbose")
except:
errmes = "Error: file failed to be removed"
error(errmes.format(item))
retcode = 1
if not retcode and not recon:
sprint("All clean", color=True)
return retcode
|
Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed
|
372,882
|
def parse_full_atom(data):
if len(data) < 4:
raise ValueError("not enough data")
version = ord(data[0:1])
flags = cdata.uint_be(b"\x00" + data[1:4])
return version, flags, data[4:]
|
Some atoms are versioned. Split them up in (version, flags, payload).
Can raise ValueError.
|
372,883
|
def clean(self, data):
cleaned_data = list()
if not isinstance(data, list):
data = [data]
for item in data:
date = datetime.datetime.strptime(item[], ).date()
cleaned_data.append(dict(price=item[], date=date))
return cleaned_data
|
Method returns cleaned list of stock closing prices
(i.e. dict(date=datetime.date(2015, 1, 2), price='23.21')).
|
372,884
|
def read_one(self, sequence):
check_not_negative(sequence, "sequence can't be smaller than 0")
return self._encode_invoke(ringbuffer_read_one_codec, sequence=sequence)
|
Reads one item from the Ringbuffer. If the sequence is one beyond the current tail, this call blocks until an
item is added. Currently it isn't possible to control how long this call is going to block.
:param sequence: (long), the sequence of the item to read.
:return: (object), the read item.
|
372,885
|
def update_subscription_user_settings(self, user_settings, subscription_id, user_id):
route_values = {}
if subscription_id is not None:
route_values[] = self._serialize.url(, subscription_id, )
if user_id is not None:
route_values[] = self._serialize.url(, user_id, )
content = self._serialize.body(user_settings, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, response)
|
UpdateSubscriptionUserSettings.
[Preview API] Update the specified user's settings for the specified subscription. This API is typically used to opt in or out of a shared subscription. User settings can only be applied to shared subscriptions, like team subscriptions or default subscriptions.
:param :class:`<SubscriptionUserSettings> <azure.devops.v5_0.notification.models.SubscriptionUserSettings>` user_settings:
:param str subscription_id:
:param str user_id: ID of the user
:rtype: :class:`<SubscriptionUserSettings> <azure.devops.v5_0.notification.models.SubscriptionUserSettings>`
|
372,886
|
def _receive(self, root, directory, dirs, files, include, exclude):
self._received += 1
if not self.symlinks:
where = root + os.path.sep + directory + os.path.sep
files = [
file_name for file_name in files
if not os.path.islink(where + file_name)
]
include = FileSetState("Include", directory, include, None
if include else self.include)
exclude = FileSetState("Exclude", directory, exclude, None
if exclude else self.exclude)
if exclude.matches_all_files_all_subdirs():
del dirs[0:]
matched = set()
else:
if include.no_possible_matches_in_subdirs():
del dirs[0:]
matched = include.match(set(files))
matched -= exclude.match(matched)
return matched, include, exclude
|
Internal function processing each yield from os.walk.
|
372,887
|
def run(self, cmd=None):
diagnostics.prefix.append(self.name)
if not cmd:
cmd = self.cmd
stderr = os.path.abspath(self.name + )
self.args.append(+stderr)
if self.pipe:
self.args += (, self.pipe, +stderr)
if self.stdout:
stdout = os.path.abspath(self.stdout)
self.args.append(+stdout)
diagnostics.log(, stdout)
elif self.stdout_append:
stdout = os.path.abspath(self.stdout_append)
self.args.append(+stdout)
diagnostics.log(, stdout)
else:
self.args.append(+stderr)
open(stderr, ).write("[biolite] timestamp=%s\n" % utils.timestamp())
diagnostics.log(, stderr)
cmd = .join(chain(cmd, map(str, self.args)))
diagnostics.log(, cmd)
start = time.time()
save_cwd = os.getcwd()
try:
os.chdir(self.cwd)
spawn_pid = os.spawnle(os.P_NOWAIT, self.shell, self.shell, , cmd, self.env)
wait_pid, retcode, rusage = os.wait4(spawn_pid, 0)
if wait_pid != spawn_pid:
utils.die("could not wait for process %d: got %d" % (spawn_pid, wait_pid))
os.chdir(save_cwd)
except OSError as e:
utils.info(e)
utils.die("could not run wrapper for command:\n%s" % cmd)
elapsed = time.time() - start
retcode = os.WEXITSTATUS(retcode)
if (self.return_ok is not None) and (self.return_ok != retcode):
if os.path.isfile(stderr):
subprocess.call([, , stderr])
utils.die("non-zero return (%d) from command:\n%s" % (retcode, cmd))
diagnostics.prefix.append()
diagnostics.log(, self.name)
diagnostics.log(, retcode)
diagnostics.log(, elapsed)
diagnostics.log(, rusage.ru_utime)
diagnostics.log(, rusage.ru_stime)
if config.uname == :
diagnostics.log(, rusage.ru_maxrss / 1024)
else:
diagnostics.log(, rusage.ru_maxrss)
diagnostics.prefix.pop()
if self.output_patterns:
self.output_patterns.reverse()
diagnostics.log_program_output(stderr, self.output_patterns)
diagnostics.prefix.pop()
|
Call this function at the end of your class's `__init__` function.
|
372,888
|
def debug_log_template(self, record):
reset = Style.RESET_ALL
levelname = (
LEVEL_COLORS.get(record.levelname)
+ Style.BRIGHT
+
+ Style.RESET_ALL
+
)
asctime = (
+ Fore.BLACK
+ Style.DIM
+ Style.BRIGHT
+
+ Fore.RESET
+ Style.RESET_ALL
+
)
name = (
+ Fore.WHITE
+ Style.DIM
+ Style.BRIGHT
+
+ Fore.RESET
+ Style.RESET_ALL
+
)
module_funcName = Fore.GREEN + Style.BRIGHT +
lineno = (
Fore.BLACK
+ Style.DIM
+ Style.BRIGHT
+
+ Style.RESET_ALL
+ Fore.CYAN
+
)
tpl = reset + levelname + asctime + name + module_funcName + lineno + reset
return tpl
|
Return the prefix for the log message. Template for Formatter.
Parameters
----------
record : :py:class:`logging.LogRecord`
This is passed in from inside the :py:meth:`logging.Formatter.format`
record.
Returns
-------
str
Log template.
|
372,889
|
def bridge_list():
*
cmd =
result = __salt__[](cmd)
retcode = result[]
stdout = result[]
return _stdout_list_split(retcode, stdout)
|
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
|
372,890
|
def profiling_query_formatter(view, context, query_document, name):
return Markup(
.join(
[
,
,
.format(query_document.get_admin_url(_external=True)),
mongo_command_name_formatter(
view, context, query_document,
),
,
,
profiling_pure_query_formatter(
None, None, query_document, , tag=
),
,
,
.format(query_document.duration),
,
,
,
]
)
)
|
Format a ProfilingQuery entry for a ProfilingRequest detail field
Parameters
----------
query_document : model.ProfilingQuery
|
372,891
|
def plot_shapes(df_shapes, shape_i_columns, axis=None, autoxlim=True,
autoylim=True, **kwargs):
if axis is None:
fig, axis = plt.subplots()
props = itertools.cycle(mpl.rcParams[])
color = kwargs.pop(, None)
patches = [Polygon(df_shape_i[[, ]].values, fc=props.next()[]
if color is None else color, **kwargs)
for shape_i, df_shape_i in df_shapes.groupby(shape_i_columns)]
collection = PatchCollection(patches)
axis.add_collection(collection)
xy_stats = df_shapes[[, ]].describe()
if autoxlim:
axis.set_xlim(*xy_stats.x.loc[[, ]])
if autoylim:
axis.set_ylim(*xy_stats.y.loc[[, ]])
return axis
|
Plot shapes from table/data-frame where each row corresponds to a vertex of
a shape. Shape vertices are grouped by `shape_i_columns`.
For example, consider the following dataframe:
shape_i vertex_i x y
0 0 0 81.679949 264.69306
1 0 1 81.679949 286.51788
2 0 2 102.87004 286.51788
3 0 3 102.87004 264.69306
4 1 0 103.11417 264.40011
5 1 1 103.11417 242.72177
6 1 2 81.435824 242.72177
7 1 3 81.435824 264.40011
8 2 0 124.84134 264.69306
9 2 1 103.65125 264.69306
10 2 2 103.65125 286.37141
11 2 3 124.84134 286.37141
This dataframe corresponds to three shapes, with (ordered) shape vertices
grouped by `shape_i`. Note that the column `vertex_i` is not required.
|
372,892
|
def find(self, *args, **kwargs):
cursor = self.delegate.find(*unwrap_args_session(args),
**unwrap_kwargs_session(kwargs))
cursor_class = create_class_with_framework(
AgnosticCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
|
Create a :class:`MotorCursor`. Same parameters as for
PyMongo's :meth:`~pymongo.collection.Collection.find`.
Note that ``find`` does not require an ``await`` expression, because
``find`` merely creates a
:class:`MotorCursor` without performing any operations on the server.
``MotorCursor`` methods such as :meth:`~MotorCursor.to_list`
perform actual operations.
|
372,893
|
def password(self, password):
from boiler.user.util.passlib import passlib_context
password = str(password)
encrypted = passlib_context.encrypt(password)
self._password = encrypted
|
Encode a string and set as password
|
372,894
|
def log(verbose=False):
terminal.log.config(verbose=verbose)
terminal.log.info()
terminal.log.verbose.info()
|
print a log test
:param verbose: show more logs
|
372,895
|
def new(preset, name, silent, update):
if self.isactive():
lib.echo("Please exit current preset before starting a new")
sys.exit(lib.USER_ERROR)
if not name:
count = 0
name = lib.random_name()
while name in _extern.projects():
if count > 10:
lib.echo("ERROR: Couldn't come up with a unique name :(")
sys.exit(lib.USER_ERROR)
name = lib.random_name()
count += 1
project_dir = lib.project_dir(_extern.cwd(), name)
if os.path.exists(project_dir):
lib.echo("\"%s\" already exists" % name)
sys.exit(lib.USER_ERROR)
username, preset = ([None] + preset.split("/", 1))[-2:]
presets_dir = _extern.presets_dir()
preset_dir = os.path.join(presets_dir, preset)
relative = False if username else True
try:
if not update and preset in _extern.local_presets():
_extern.copy_preset(preset_dir, project_dir)
else:
lib.echo("Finding preset for \"%s\".. " % preset, silent)
time.sleep(1 if silent else 0)
if relative:
presets = _extern.github_presets()
if preset not in presets:
sys.stdout.write("\"%s\" not found" % preset)
sys.exit(lib.USER_ERROR)
time.sleep(1 if silent else 0)
repository = presets[preset]
else:
repository = username + "/" + preset
lib.echo("Pulling %s.. " % repository, silent)
repository = _extern.fetch_release(repository)
if preset in _extern.local_presets():
_extern.remove_preset(preset)
try:
_extern.pull_preset(repository, preset_dir)
except IOError as e:
lib.echo("ERROR: Sorry, something went wrong.\n"
"Use be --verbose for more")
lib.echo(e)
sys.exit(lib.USER_ERROR)
try:
_extern.copy_preset(preset_dir, project_dir)
finally:
if not relative:
_extern.remove_preset(preset)
except IOError as exc:
if self.verbose:
lib.echo("ERROR: %s" % exc)
else:
lib.echo("ERROR: Could not write, do you have permission?")
sys.exit(lib.PROGRAM_ERROR)
lib.echo("\"%s\" created" % name, silent)
|
Create new default preset
\b
Usage:
$ be new ad
"blue_unicorn" created
$ be new film --name spiderman
"spiderman" created
|
372,896
|
def multiChoiceParam(parameters, name, type_converter = str):
param = parameters.find(".//MultiChoiceParam[@Name=]".format(name=name))
value = param.find()
values = param.find()
return [type_converter(values[int(item.text)].text) for item in value.findall()]
|
multi choice parameter values.
:param parameters: the parameters tree.
:param name: the name of the parameter.
:param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'
:returns dictionary: value -> values
|
372,897
|
def reply_bytes(self, request):
flags = struct.pack("<I", self._flags)
payload_type = struct.pack("<b", 0)
payload_data = bson.BSON.encode(self.doc)
data = b.join([flags, payload_type, payload_data])
reply_id = random.randint(0, 1000000)
response_to = request.request_id
header = struct.pack(
"<iiii", 16 + len(data), reply_id, response_to, OP_MSG)
return header + data
|
Take a `Request` and return an OP_MSG message as bytes.
|
372,898
|
def save_data(self, idx):
for name in self:
actual = getattr(self, name)
diskflag = getattr(self, % name)
ramflag = getattr(self, % name)
if diskflag:
file_ = getattr(self, % name)
ndim = getattr(self, % name)
length_tot = 1
for jdx in range(ndim):
length = getattr(self, % (name, jdx))
length_tot *= length
if ndim:
raw = struct.pack(length_tot*, *actual.flatten())
else:
raw = struct.pack(, actual)
file_.write(raw)
elif ramflag:
array = getattr(self, % name)
array[idx] = actual
|
Save the internal data of all sequences with an activated flag.
Write to file if the corresponding disk flag is activated; store
in working memory if the corresponding ram flag is activated.
|
372,899
|
def list_deploy_keys(self, auth, username, repo_name):
response = self.get("/repos/{u}/{r}/keys".format(u=username, r=repo_name), auth=auth)
return [GogsRepo.DeployKey.from_json(key_json) for key_json in response.json()]
|
List deploy keys for the specified repo.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: the name of the repo
:return: a list of deploy keys for the repo
:rtype: List[GogsRepo.DeployKey]
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.