Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
372,200
|
def exists(name, attributes):
ret = {: name,
: True,
: ,
: {}}
if not os.path.exists(name):
ret[] = False
ret[] = "File or directory doesnxattr.listxattr.readchangesxattr.writechangescommentAll values existed correctly.'
return ret
|
Make sure the given attributes exist on the file/directory
name
The path to the file/directory
attributes
The attributes that should exist on the file/directory, this is accepted as
an array, with key and value split with an equals sign, if you want to specify
a hex value then add 0x to the beginning of the value.
|
372,201
|
def view_isometric(self):
self.camera_position = self.get_default_cam_pos()
self.camera_set = False
return self.reset_camera()
|
Resets the camera to a default isometric view showing all the
actors in the scene.
|
372,202
|
def get_files_to_remove(self):
files_to_remove = {}
needful_files = self.get_needful_files()
for resources_type, resources in self.get_uploaded_resources():
exclude_paths = self.get_exclude_paths()
resources = {resource for resource in resources if not resource.startswith(exclude_paths)}
files_to_remove[resources_type] = resources - needful_files
return files_to_remove
|
Returns orphaned media files to be removed grouped by resource type.
All files which paths start with any of exclude paths are ignored.
|
372,203
|
def parse_slab_stats(slab_stats):
stats_dict = {: defaultdict(lambda: {})}
for line in slab_stats.splitlines():
if line == :
break
cmd, key, value = line.split()
if cmd != :
continue
if ":" not in key:
stats_dict[key] = int(value)
continue
slab, key = key.split()
stats_dict[][int(slab)][key] = int(value)
return stats_dict
|
Convert output from memcached's `stats slabs` into a Python dict.
Newlines are returned by memcached along with carriage returns
(i.e. '\r\n').
>>> parse_slab_stats(
"STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT "
"active_slabs 1\r\nSTAT total_malloced 1048512\r\nEND\r\n")
{
'slabs': {
1: {
'chunk_size': 96,
'chunks_per_page': 10922,
# ...
},
},
'active_slabs': 1,
'total_malloced': 1048512,
}
|
372,204
|
def stack_frames(
sig,
sampling_frequency,
frame_length=0.020,
frame_stride=0.020,
filter=lambda x: np.ones(
(x,
)),
zero_padding=True):
s = "Signal dimention should be of the format of (N,) but it is %s instead"
assert sig.ndim == 1, s % str(sig.shape)
length_signal = sig.shape[0]
frame_sample_length = int(
np.round(
sampling_frequency *
frame_length))
frame_stride = float(np.round(sampling_frequency * frame_stride))
if zero_padding:
numframes = (int(math.ceil((length_signal
- frame_sample_length) / frame_stride)))
print(numframes,length_signal,frame_sample_length,frame_stride)
len_sig = int(numframes * frame_stride + frame_sample_length)
additive_zeros = np.zeros((len_sig - length_signal,))
signal = np.concatenate((sig, additive_zeros))
else:
numframes = int(math.floor((length_signal
- frame_sample_length) / frame_stride))
len_sig = int((numframes - 1) * frame_stride + frame_sample_length)
signal = sig[0:len_sig]
indices = np.tile(np.arange(0,
frame_sample_length),
(numframes,
1)) + np.tile(np.arange(0,
numframes * frame_stride,
frame_stride),
(frame_sample_length,
1)).T
indices = np.array(indices, dtype=np.int32)
frames = signal[indices]
window = np.tile(filter(frame_sample_length), (numframes, 1))
Extracted_Frames = frames * window
return Extracted_Frames
|
Frame a signal into overlapping frames.
Args:
sig (array): The audio signal to frame of size (N,).
sampling_frequency (int): The sampling frequency of the signal.
frame_length (float): The length of the frame in second.
frame_stride (float): The stride between frames.
filter (array): The time-domain filter for applying to each frame.
By default it is one so nothing will be changed.
zero_padding (bool): If the samples is not a multiple of
frame_length(number of frames sample), zero padding will
be done for generating last frame.
Returns:
array: Stacked_frames-Array of frames of size (number_of_frames x frame_len).
|
372,205
|
def from_args(cls: Type[ConfigT], args: Namespace) -> ConfigT:
parsed_args = cls.parse_args(args)
return cls(args, host=args.host, port=args.port, debug=args.debug,
reject_insecure_auth=not args.insecure_login,
cert_file=args.cert, key_file=args.key,
**parsed_args)
|
Build and return a new :class:`IMAPConfig` using command-line
arguments.
Args:
args: The arguments parsed from the command-line.
|
372,206
|
def main():
args = parse_cmd_arguments()
html_file = args.file
try:
json.loads(args.add_tags or )
json.loads(args.exc_tags or )
except ValueError:
print( +
.format({"img": "data-url"}\) + )
sys.exit(1)
staticfied = staticfy(html_file, args=args).encode()
file_ops(staticfied, args=args)
|
Main method.
|
372,207
|
def _ldtpize_accessible(self, acc):
actual_role = self._get_role(acc)
label = self._get_title(acc)
if re.match("AXWindow", actual_role, re.M | re.U | re.L):
strip = r"( |\n)"
else:
strip = r"( |:|\.|_|\n)"
if label:
label = re.sub(strip, u"", label)
role = abbreviated_roles.get(actual_role, "ukn")
if self._ldtp_debug and role == "ukn":
print(actual_role, acc)
return role, label
|
Get LDTP format accessibile name
@param acc: Accessible handle
@type acc: object
@return: object type, stripped object name (associated / direct),
associated label
@rtype: tuple
|
372,208
|
def numBlast_sort(blast, numHits, evalueT, bitT):
header = [, , , , , ,
, , , , , ]
yield header
hmm = {h:[] for h in header}
for line in blast:
if line.startswith():
continue
line = line.strip().split()
line[10], line[11] = float(line[10]), float(line[11])
evalue, bit = line[10], line[11]
if evalueT is not False and evalue > evalueT:
continue
if bitT is not False and bit < bitT:
continue
for i, h in zip(line, header):
hmm[h].append(i)
hmm = pd.DataFrame(hmm)
for query, df in hmm.groupby(by = []):
df = df.sort_values(by = [], ascending = False)
for hit in df[header].values[0:numHits]:
yield hit
|
parse b6 output with sorting
|
372,209
|
def gaps(args):
from jcvi.graphics.histogram import loghistogram
p = OptionParser(gaps.__doc__)
p.add_option("--merge", dest="merge", default=False, action="store_true",
help="Merge adjacent gaps (to conform to AGP specification)")
p.add_option("--header", default=False, action="store_true",
help="Produce an AGP header [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
merge = opts.merge
agpfile, = args
if merge:
merged_agpfile = agpfile.replace(".agp", ".merged.agp")
fw = open(merged_agpfile, "w")
agp = AGP(agpfile)
sizes = []
data = []
gtypes = [x.gap_type for x in alines]
for gtype in priorities:
if gtype in gtypes:
b.gap_type = gtype
break
linkages = [x.linkage for x in alines]
for linkage in ("no", "yes"):
if linkage in linkages:
b.linkage = linkage
break
alines = [b]
data.extend(alines)
loghistogram(sizes)
if opts.header:
AGP.print_header(fw, organism="Medicago truncatula",
taxid=3880, source="J. Craig Venter Institute")
if merge:
for ob, bb in groupby(data, lambda x: x.object):
for i, b in enumerate(bb):
b.part_number = i + 1
print(b, file=fw)
return merged_agpfile
|
%prog gaps agpfile
Print out the distribution of gapsizes. Option --merge allows merging of
adjacent gaps which is used by tidy().
|
372,210
|
def generate_doxygen_xml(app):
read_the_docs_build = os.environ.get(, None) ==
if read_the_docs_build:
run_doxygen()
sys.stderr.write()
run_build_lib()
sys.stderr.write( % str(os.listdir()))
rabit._loadlib()
|
Run the doxygen make commands if we're on the ReadTheDocs server
|
372,211
|
def _parse_modes(mode_string, unary_modes=""):
if not mode_string or not mode_string[0] in :
return []
modes = []
parts = mode_string.split()
mode_part, args = parts[0], parts[1:]
for ch in mode_part:
if ch in "+-":
sign = ch
continue
arg = args.pop(0) if ch in unary_modes and args else None
modes.append([sign, ch, arg])
return modes
|
Parse the mode_string and return a list of triples.
If no string is supplied return an empty list.
>>> _parse_modes('')
[]
If no sign is supplied, return an empty list.
>>> _parse_modes('ab')
[]
Discard unused args.
>>> _parse_modes('+a foo bar baz')
[['+', 'a', None]]
Return none for unary args when not provided
>>> _parse_modes('+abc foo', unary_modes='abc')
[['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]]
This function never throws an error:
>>> import random
>>> def random_text(min_len = 3, max_len = 80):
... len = random.randint(min_len, max_len)
... chars_to_choose = [chr(x) for x in range(0,1024)]
... chars = (random.choice(chars_to_choose) for x in range(len))
... return ''.join(chars)
>>> def random_texts(min_len = 3, max_len = 80):
... while True:
... yield random_text(min_len, max_len)
>>> import itertools
>>> texts = itertools.islice(random_texts(), 1000)
>>> set(type(_parse_modes(text)) for text in texts) == {list}
True
|
372,212
|
def newAddress(self, currency=, label=):
request = + currency +
url = self.base_url + request
params = {
: request,
: self.get_nonce()
}
if label != :
params[] = label
return requests.post(url, headers=self.prepare(params))
|
Send a request for a new cryptocurrency deposit address
with an optional label. Return the response.
Arguements:
currency -- a Gemini supported cryptocurrency (btc, eth)
label -- optional label for the deposit address
|
372,213
|
def stop(self, signal=None):
signal = signal or self.int_signal
self.out.log("Cleaning up local Heroku process...")
if self._process is None:
self.out.log("No local Heroku process was running.")
return
try:
os.killpg(os.getpgid(self._process.pid), signal)
self.out.log("Local Heroku process terminated.")
except OSError:
self.out.log("Local Heroku was already terminated.")
self.out.log(traceback.format_exc())
finally:
self._process = None
|
Stop the heroku local subprocess and all of its children.
|
372,214
|
def get_submission(submission_uuid, read_replica=False):
if not isinstance(submission_uuid, six.string_types):
if isinstance(submission_uuid, UUID):
submission_uuid = six.text_type(submission_uuid)
else:
raise SubmissionRequestError(
msg="submission_uuid ({!r}) must be serializable".format(submission_uuid)
)
cache_key = Submission.get_cache_key(submission_uuid)
try:
cached_submission_data = cache.get(cache_key)
except Exception:
logger.exception("Error occurred while retrieving submission from the cache")
cached_submission_data = None
if cached_submission_data:
logger.info("Get submission {} (cached)".format(submission_uuid))
return cached_submission_data
try:
submission = _get_submission_model(submission_uuid, read_replica)
submission_data = SubmissionSerializer(submission).data
cache.set(cache_key, submission_data)
except Submission.DoesNotExist:
logger.error("Submission {} not found.".format(submission_uuid))
raise SubmissionNotFoundError(
u"No submission matching uuid {}".format(submission_uuid)
)
except Exception as exc:
err_msg = "Could not get submission due to error: {}".format(exc)
logger.exception(err_msg)
raise SubmissionInternalError(err_msg)
logger.info("Get submission {}".format(submission_uuid))
return submission_data
|
Retrieves a single submission by uuid.
Args:
submission_uuid (str): Identifier for the submission.
Kwargs:
read_replica (bool): If true, attempt to use the read replica database.
If no read replica is available, use the default database.
Raises:
SubmissionNotFoundError: Raised if the submission does not exist.
SubmissionRequestError: Raised if the search parameter is not a string.
SubmissionInternalError: Raised for unknown errors.
Examples:
>>> get_submission("20b78e0f32df805d21064fc912f40e9ae5ab260d")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
|
372,215
|
def _disc_kn(clearness_index, airmass, max_airmass=12):
kt = clearness_index
am = airmass
am = np.minimum(am, max_airmass)
kt2 = kt * kt
kt3 = kt2 * kt
bools = (kt <= 0.6)
a = np.where(bools,
0.512 - 1.56*kt + 2.286*kt2 - 2.222*kt3,
-5.743 + 21.77*kt - 27.49*kt2 + 11.56*kt3)
b = np.where(bools,
0.37 + 0.962*kt,
41.4 - 118.5*kt + 66.05*kt2 + 31.9*kt3)
c = np.where(bools,
-0.28 + 0.932*kt - 2.048*kt2,
-47.01 + 184.2*kt - 222.0*kt2 + 73.81*kt3)
delta_kn = a + b * np.exp(c*am)
Knc = 0.866 - 0.122*am + 0.0121*am**2 - 0.000653*am**3 + 1.4e-05*am**4
Kn = Knc - delta_kn
return Kn, am
|
Calculate Kn for `disc`
Parameters
----------
clearness_index : numeric
airmass : numeric
max_airmass : float
airmass > max_airmass is set to max_airmass before being used
in calculating Kn.
Returns
-------
Kn : numeric
am : numeric
airmass used in the calculation of Kn. am <= max_airmass.
|
372,216
|
def get_hash(key: str) -> int:
return int(hashlib.sha1(key.encode()).hexdigest(), 16) % 4294967295
|
Gets a hash of the provided key.
Parameters
----------
key :
A string used to create a seed for the random number generator.
Returns
-------
int
A hash of the provided key.
|
372,217
|
def GetErrorText(
self,
Error,
Language = 0):
try:
mybuffer = create_string_buffer(256)
res = self.__m_dllBasic.CAN_GetErrorText(Error,Language,byref(mybuffer))
return TPCANStatus(res),mybuffer.value
except:
logger.error("Exception on PCANBasic.GetErrorText")
raise
|
Configures or sets a PCAN Channel value
Remarks:
The current languages available for translation are:
Neutral (0x00), German (0x07), English (0x09), Spanish (0x0A),
Italian (0x10) and French (0x0C)
The return value of this method is a 2-touple, where
the first value is the result (TPCANStatus) of the method and
the second one, the error text
Parameters:
Error : A TPCANStatus error code
Language : Indicates a 'Primary language ID' (Default is Neutral(0))
Returns:
A touple with 2 values
|
372,218
|
def read_elements(fd, endian, mtps, is_name=False):
mtpn, num_bytes, data = read_element_tag(fd, endian)
if mtps and mtpn not in [etypes[mtp][] for mtp in mtps]:
raise ParseError(.format(
mtpn, .join(.format(
etypes[mtp][], mtp) for mtp in mtps)))
if not data:
data = fd.read(num_bytes)
mod8 = num_bytes % 8
if mod8:
fd.seek(8 - mod8, 1)
if is_name:
fmt =
val = [unpack(endian, fmt, s)
for s in data.split(b) if s]
if len(val) == 0:
val =
elif len(val) == 1:
val = asstr(val[0])
else:
val = [asstr(s) for s in val]
else:
fmt = etypes[inv_etypes[mtpn]][]
val = unpack(endian, fmt, data)
return val
|
Read elements from the file.
If list of possible matrix data types mtps is provided, the data type
of the elements are verified.
|
372,219
|
def delete_validating_webhook_configuration(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_validating_webhook_configuration_with_http_info(name, **kwargs)
else:
(data) = self.delete_validating_webhook_configuration_with_http_info(name, **kwargs)
return data
|
delete a ValidatingWebhookConfiguration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_validating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ValidatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
|
372,220
|
def enrich_rnas_with_genes(graph):
for rna_node in list(graph):
if rna_node[FUNCTION] not in {MIRNA, RNA} or FUSION in rna_node or VARIANTS in rna_node:
continue
gene_node = rna_node.get_gene()
graph.add_transcription(gene_node, rna_node)
|
Add the corresponding gene node for each RNA/miRNA node and connect them with a transcription edge.
:param pybel.BELGraph graph: A BEL graph
|
372,221
|
def calc_requiredremotesupply_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
toy = der.toy[self.idx_sim]
flu.requiredremotesupply = (
con.highestremotesupply[toy] *
smoothutils.smooth_logistic1(
con.waterlevelsupplythreshold[toy]-aid.waterlevel,
der.waterlevelsupplysmoothpar[toy]))
|
Calculate the required maximum supply from another location
that can be discharged into the dam.
Required control parameters:
|HighestRemoteSupply|
|WaterLevelSupplyThreshold|
Required derived parameter:
|WaterLevelSupplySmoothPar|
Required aide sequence:
|WaterLevel|
Calculated flux sequence:
|RequiredRemoteSupply|
Basic equation:
:math:`RequiredRemoteSupply = HighestRemoteSupply \\cdot
smooth_{logistic1}(WaterLevelSupplyThreshold-WaterLevel,
WaterLevelSupplySmoothPar)`
Used auxiliary method:
|smooth_logistic1|
Examples:
Method |calc_requiredremotesupply_v1| is functionally identical
with method |calc_allowedremoterelieve_v2|. Hence the following
examples serve for testing purposes only (see the documentation
on function |calc_allowedremoterelieve_v2| for more detailed
information):
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> highestremotesupply(_11_1_12=1.0, _03_31_12=1.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> waterlevelsupplythreshold(_11_1_12=3.0, _03_31_12=2.0,
... _04_1_12=4.0, _10_31_12=4.0)
>>> waterlevelsupplytolerance(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=1.0, _10_31_12=1.0)
>>> derived.waterlevelsupplysmoothpar.update()
>>> derived.toy.update()
>>> from hydpy import UnitTest
>>> test = UnitTest(model,
... model.calc_requiredremotesupply_v1,
... last_example=9,
... parseqs=(aides.waterlevel,
... fluxes.requiredremotesupply))
>>> test.nexts.waterlevel = range(9)
>>> model.idx_sim = pub.timegrids.init['2001.03.30']
>>> test(first_example=2, last_example=6)
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 3 | 1.0 | 1.0 |
| 4 | 2.0 | 1.0 |
| 5 | 3.0 | 0.0 |
| 6 | 4.0 | 0.0 |
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | waterlevel | requiredremotesupply |
-------------------------------------------
| 1 | 0.0 | 2.0 |
| 2 | 1.0 | 1.999998 |
| 3 | 2.0 | 1.999796 |
| 4 | 3.0 | 1.98 |
| 5 | 4.0 | 1.0 |
| 6 | 5.0 | 0.02 |
| 7 | 6.0 | 0.000204 |
| 8 | 7.0 | 0.000002 |
| 9 | 8.0 | 0.0 |
|
372,222
|
def image_question_encoder(encoder_inputs,
encoder_self_attention_bias,
hparams,
query=None,
name="image_question_encoder",
save_weights_to=None,
make_image_summary=True):
x = encoder_inputs
with tf.variable_scope(name):
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = vqa_layers.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
block_length=hparams.block_length,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms", "encoder_self_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "encoder_self_attention_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
if query is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
query,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
block_length=hparams.block_length,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
scale_dotproduct=hparams.scale_dotproduct,
)
utils.collect_named_outputs(
"norms",
"encoder_decoder_attention_%d"%(layer),
tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms",
"encoder_decoder_attention_post_%d"%(layer),
tf.norm(x, axis=-1))
with tf.variable_scope("ffn"):
y = common_layers.dense_relu_dense(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
)
utils.collect_named_outputs(
"norms", "encoder_ffn_%d"%(layer), tf.norm(y, axis=-1))
x = common_layers.layer_postprocess(x, y, hparams)
utils.collect_named_outputs(
"norms", "encoder_ffn_postprocess_%d"%(layer),
tf.norm(x, axis=-1))
return common_layers.layer_preprocess(x, hparams)
|
A stack of self attention layers.
|
372,223
|
def new_file(self, path: str, checksum: str=None, to_archive: bool=False,
tags: List[models.Tag]=None) -> models.File:
new_file = self.File(path=path, checksum=checksum, to_archive=to_archive, tags=tags)
return new_file
|
Create a new file.
|
372,224
|
def get_lib_name(self):
import toml
cfg = toml.load(self.path)
name = cfg.get("lib", {}).get("name")
if name is None:
name = cfg.get("package", {}).get("name")
if name is None:
raise Exception(
"Can not parse library name from Cargo.toml. "
"Cargo.toml missing value for key "
"in both the [package] section and the [lib] section"
)
name = re.sub(r"[./\\-]", "_", name)
return name
|
Parse Cargo.toml to get the name of the shared library.
|
372,225
|
def score(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
|
Compute the log probability under the model.
|
372,226
|
def request(self, url, params={}, headers={}):
cur_config = self.config.copy()
if in cur_config:
cur_config[] = cur_config[].copy()
cur_config[].update(headers)
else:
cur_config[] = headers
with closing(requests.get(url, params=params, **cur_config)) as response:
if response.status_code == requests.codes.OK:
if (response.headers.get()
and ( in response.headers[])):
enc, fmode = response.encoding,
else:
enc, fmode = None,
if str_type == str:
source = STF(
max_size=self.max_size, mode=fmode, encoding=enc)
else:
source = STF(max_size=self.max_size, mode=fmode)
for c in response.iter_content(chunk_size=1000000,
decode_unicode=bool(enc)):
source.write(c)
else:
source = None
code = int(response.status_code)
if 400 <= code <= 499:
raise response.raise_for_status()
return source, response.url, response.headers, code
|
Retrieve SDMX messages.
If needed, override in subclasses to support other data providers.
:param url: The URL of the message.
:type url: str
:return: the xml data as file-like object
|
372,227
|
def __assert_equal(expected, returned, assert_print_result=True):
result = "Pass"
try:
if assert_print_result:
assert (expected == returned), "{0} is not equal to {1}".format(expected, returned)
else:
assert (expected == returned), "Result is not equal"
except AssertionError as err:
result = "Fail: " + six.text_type(err)
return result
|
Test if two objects are equal
|
372,228
|
def resolve_local(self, uri, base_uri, ref):
file_path = None
item_name = None
if (uri.startswith(u"file") or
uri.startswith(u"File")):
if ref.startswith(u"./"):
ref = ref.split(u"./")[-1]
org_ref = ref
if ref.find(u"
ref = ref.split(u"
if ref.find(u".json") != -1:
item_name = ref.split(u".json")[0]
if base_uri.startswith(u"file://") is True:
base_uri = base_uri.split(u"file://")[1]
elif base_uri.startswith(u"File://") is True:
base_uri = base_uri.split(u"File://")[1]
file_path = os.path.join(base_uri, ref)
result = None
try:
schema_file = open(file_path, "r").read()
result = json.loads(schema_file.decode("utf-8"))
except IOError as e:
log.error(u"file not found %s" % e)
msg = "Could not find schema file. %s" % file_path
raise SalesKingException("SCHEMA_NOT_FOUND", msg)
if self.cache_remote:
self.store[uri] = result
return result
|
Resolve a local ``uri``.
Does not check the store first.
:argument str uri: the URI to resolve
:returns: the retrieved document
|
372,229
|
def features(self, expand=False):
featvals = []
for term in self._terms:
if isinstance(term, AVM):
featvals.extend(term.features(expand=expand))
return featvals
|
Return the list of feature-value pairs in the conjunction.
|
372,230
|
def get_snapshot_command_history(self, name, limit=20, offset=0, view=None):
params = {
: limit,
: offset,
}
if view:
params[] = view
return self._get("snapshots/policies/%s/history" % name, ApiSnapshotCommand, True,
params=params, api_version=6)
|
Retrieve a list of commands triggered by a snapshot policy.
@param name: The name of the snapshot policy.
@param limit: Maximum number of commands to retrieve.
@param offset: Index of first command to retrieve.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: List of commands triggered by a snapshot policy.
@since: API v6
|
372,231
|
def _signed_add_overflow(state, a, b):
add = Operators.SEXTEND(a, 256, 512) + Operators.SEXTEND(b, 256, 512)
cond = Operators.OR(add < -(1 << 255), add >= (1 << 255))
return cond
|
Sign extend the value to 512 bits and check the result can be represented
in 256. Following there is a 32 bit excerpt of this condition:
a + b -80000000 -3fffffff -00000001 +00000000 +00000001 +3fffffff +7fffffff
+80000000 True True True False False False False
+c0000001 True False False False False False False
+ffffffff True False False False False False False
+00000000 False False False False False False False
+00000001 False False False False False False True
+3fffffff False False False False False False True
+7fffffff False False False False True True True
|
372,232
|
def mavlink_packet(self, msg):
for i in range(len(self.graphs) - 1, -1, -1):
if not self.graphs[i].is_alive():
self.graphs[i].close()
self.graphs.pop(i)
for g in self.graphs:
g.add_mavlink_packet(msg)
|
handle an incoming mavlink packet
|
372,233
|
def get(self, chargeback_id, **params):
if not chargeback_id or not chargeback_id.startswith(self.RESOURCE_ID_PREFIX):
raise IdentifierError(
"Invalid chargeback ID: . A chargeback ID should start with .".format(
id=chargeback_id, prefix=self.RESOURCE_ID_PREFIX)
)
return super(Chargebacks, self).get(chargeback_id, **params)
|
Verify the chargeback ID and retrieve the chargeback from the API.
|
372,234
|
def datatype2schemacls(
_datatype, _registry=None, _factory=None, _force=True,
_besteffort=True, **kwargs
):
result = None
gdbt = getbydatatype if _registry is None else _registry.getbydatatype
result = gdbt(_datatype, besteffort=_besteffort)
if result is None:
gscls = getschemacls if _factory is None else _factory.getschemacls
result = gscls(_datatype, besteffort=_besteffort)
if result is None and _force:
_build = build if _factory is None else _factory.build
result = _build(_resource=_datatype, **kwargs)
return result
|
Get a schema class which has been associated to input data type by the
registry or the factory in this order.
:param type datatype: data type from where get associated schema.
:param SchemaRegisgry _registry: registry from where call the getbydatatype
. Default is the global registry.
:param SchemaFactory _factory: factory from where call the getschemacls if
getbydatatype returns None. Default is the global factory.
:param bool _force: if true (default), force the building of schema class
if no schema is associated to input data type.
:param bool _besteffort: if True (default), try to resolve schema by
inheritance.
:param dict kwargs: factory builder kwargs.
:rtype: type
:return: Schema associated to input registry or factory. None if no
association found.
|
372,235
|
def _print_header(data):
print("
print("
print("
for pos in data:
print("
print(, file=STDOUT, end="")
print(, file=STDOUT, end="")
print(, file=STDOUT, end="")
print(, file=STDOUT, end="")
print("
|
Create vcf header to make
a valid vcf.
|
372,236
|
def generate(self, api):
rsrc_folder = os.path.join(os.path.dirname(__file__), )
self.logger.info()
shutil.copy(os.path.join(rsrc_folder, ),
self.target_folder_path)
self.logger.info()
shutil.copy(os.path.join(rsrc_folder, ),
self.target_folder_path)
self.logger.info()
shutil.copy(os.path.join(rsrc_folder, ),
self.target_folder_path)
for namespace in api.namespaces.values():
reserved_namespace_name = fmt_namespace(namespace.name)
with self.output_to_relative_path(.format(reserved_namespace_name)):
self._generate_base_namespace_module(api, namespace)
if reserved_namespace_name != namespace.name:
with self.output_to_relative_path(.format(namespace.name)):
self._generate_dummy_namespace_module(reserved_namespace_name)
|
Generates a module for each namespace.
Each namespace will have Python classes to represent data types and
routes in the Stone spec.
|
372,237
|
def order_by(self, column, direction="asc"):
if self.unions:
prop = "union_orders"
else:
prop = "orders"
if direction.lower() == "asc":
direction = "asc"
else:
direction = "desc"
getattr(self, prop).append({"column": column, "direction": direction})
return self
|
Add a "order by" clause to the query
:param column: The order by column
:type column: str
:param direction: The direction of the order
:type direction: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder
|
372,238
|
def _main(self):
self.set_proctitle(self.name)
self.set_signal_handler()
logger.info("process for module %s is now running (pid=%d)", self.name, os.getpid())
try:
self.main()
except (IOError, EOFError):
pass
except Exception as exp:
logger.exception(, exp)
self.do_stop()
logger.info("process for module %s is now exiting (pid=%d)", self.name, os.getpid())
exit()
|
module "main" method. Only used by external modules.
:return: None
|
372,239
|
def user_provenance(self, document):
self.self_check()
(username, fullname) = _whoami()
if not self.full_name:
self.full_name = fullname
document.add_namespace(UUID)
document.add_namespace(ORCID)
document.add_namespace(FOAF)
account = document.agent(
ACCOUNT_UUID, {provM.PROV_TYPE: FOAF["OnlineAccount"],
"prov:label": username,
FOAF["accountName"]: username})
user = document.agent(
self.orcid or USER_UUID,
{provM.PROV_TYPE: PROV["Person"],
"prov:label": self.full_name,
FOAF["name"]: self.full_name,
FOAF["account"]: account})
document.actedOnBehalfOf(account, user)
|
Add the user provenance.
|
372,240
|
def validate(spec, data):
data = copy.deepcopy(data)
validated_data = {}
def validate_keys(section, config, section_data):
if not isinstance(section_data, dict) or section == spec.MODEL:
return
extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields]
if extra_args:
raise PolyaxonfileError(.format(
section, extra_args))
def add_validated_section(section, config):
if data.get(section):
section_data = data[section]
validate_keys(section=section, config=config, section_data=section_data)
validated_data[section] = config.from_dict(section_data)
add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG)
add_validated_section(spec.BUILD, BuildConfig)
add_validated_section(spec.RUN, RunConfig)
add_validated_section(spec.MODEL, ModelConfig)
add_validated_section(spec.TRAIN, TrainConfig)
add_validated_section(spec.EVAL, EvalConfig)
return validated_data
|
Validates the data and creates the config objects
|
372,241
|
def operands(self):
if not self.is_instruction:
raise ValueError(
% (self._kind,))
it = ffi.lib.LLVMPY_InstructionOperandsIter(self)
parents = self._parents.copy()
parents.update(instruction=self)
return _OperandsIterator(it, parents)
|
Return an iterator over this instruction's operands.
The iterator will yield a ValueRef for each operand.
|
372,242
|
def get_next_action(self, request, application, label, roles):
if in roles:
new_person = None
reason = None
details = None
attrs, _ = saml.parse_attributes(request)
saml_id = attrs[]
if saml_id is not None:
query = Person.objects.filter(saml_id=saml_id)
if application.content_type.model == "person":
query = query.exclude(pk=application.applicant.pk)
if query.count() > 0:
new_person = Person.objects.get(saml_id=saml_id)
reason = "SAML id is already in use by existing person."
details = (
"It is not possible to continue this application "
+ "as is because the saml identity already exists "
+ "as a registered user.")
del query
if request.user.is_authenticated:
new_person = request.user
reason = "%s was logged in " \
"and accessed the secret URL." % new_person
details = (
"If you want to access this application "
+ "as %s " % application.applicant
+ "without %s stealing it, " % new_person
+ "you will have to ensure %s is " % new_person
+ "logged out first.")
if new_person is not None:
if application.applicant != new_person:
if in request.POST:
old_applicant = application.applicant
application.applicant = new_person
application.save()
log.change(
application.application_ptr,
"Stolen application from %s" % old_applicant)
messages.success(
request,
"Stolen application from %s" % old_applicant)
url = base.get_url(request, application, roles, label)
return HttpResponseRedirect(url)
else:
return render(
template_name=
,
context={
: application,
: new_person,
: reason,
: details,
},
request=request)
if ( in roles or in roles) \
and not in roles \
and not in roles:
actions = []
if in request.POST:
return
return render(
template_name=,
context={: application,
: actions, : roles, },
request=request)
return super(StateApplicantEnteringDetails, self) \
.get_next_action(request, application, label, roles)
|
Process the get_next_action request at the current step.
|
372,243
|
def invalid_index(self, name):
self.stderr.write("Unknown index: {}".format(name))
self.stderr.write("Supported indices are:")
for index in index_builder.indexes:
self.stderr.write(" * {}".format(index.__class__.__name__))
|
Show an invalid index error message.
|
372,244
|
def _walk_modules(modules, class_name, path, ignored_formats, args):
for module in _iter_modules(modules=modules,
class_name=class_name,
path=path,
ignored_formats=ignored_formats,
args=args):
for section in module.sections:
for lecture in section.lectures:
for resource in lecture.resources:
yield module, section, lecture, resource
|
Helper generator that traverses modules in returns a flattened
iterator.
|
372,245
|
def tunnel_settings_system_tunnel_replicator_load_balance(self, **kwargs):
config = ET.Element("config")
tunnel_settings = ET.SubElement(config, "tunnel-settings", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
system = ET.SubElement(tunnel_settings, "system")
tunnel = ET.SubElement(system, "tunnel")
replicator = ET.SubElement(tunnel, "replicator")
load_balance = ET.SubElement(replicator, "load-balance")
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
372,246
|
def system_methodHelp(self, method_name: str)->str:
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
if method is None:
return ""
else:
return pydoc.getdoc(method)
|
将docstring返回.
system.methodHelp('add') => "Adds two integers together"
Return:
(str): - 函数的帮助文本
|
372,247
|
def ReadBytes(self, address, num_bytes):
address = int(address)
buf = ctypes.create_string_buffer(num_bytes)
bytesread = ctypes.c_size_t(0)
res = ReadProcessMemory(self.h_process, address, buf, num_bytes,
ctypes.byref(bytesread))
if res == 0:
err = wintypes.GetLastError()
if err == 299:
return buf.raw[:bytesread.value]
raise process_error.ProcessError("Error in ReadProcessMemory: %d" % err)
return buf.raw[:bytesread.value]
|
Reads at most num_bytes starting from offset <address>.
|
372,248
|
def ReleaseFileSystem(self, file_system):
identifier, cache_value = self._file_system_cache.GetCacheValueByObject(
file_system)
if not identifier:
raise RuntimeError()
if not cache_value:
raise RuntimeError()
self._file_system_cache.ReleaseObject(identifier)
result = cache_value.IsDereferenced()
if result:
self._file_system_cache.RemoveObject(identifier)
return result
|
Releases a cached file system object.
Args:
file_system (FileSystem): file system object.
Returns:
bool: True if the file system object can be closed.
Raises:
PathSpecError: if the path specification is incorrect.
RuntimeError: if the file system object is not cached or an inconsistency
is detected in the cache.
|
372,249
|
def servicenames(self):
"Give the list of services available in this folder."
return set([service[].rstrip().split()[-1]
for service in self._json_struct.get(, [])])
|
Give the list of services available in this folder.
|
372,250
|
def process_tree(self, channel_node):
file_names = []
self.process_tree_recur(file_names, channel_node)
return [x for x in set(file_names) if x]
|
Returns a list of all file names associated with a tree. Profiling suggests using a global list with `extend`
is faster than using a global set or deque.
:param channel_node: Root node of the channel being processed
:return: The list of unique file names in `channel_node`.
|
372,251
|
def select(self, **kws):
lst = [i for i in self.values() if i.select(**kws)]
random.shuffle(lst)
return lst
|
Find all servers with indicated protocol support. Shuffled.
Filter by TOR support, and pruning level.
|
372,252
|
def calculate_bidirectional_lstm_output_shapes(operator):
s conversion function for its output shapes.
Input must be a 2-D or 4-D tensorNoneNones _fix_shapes function
if len(operator.inputs) > 1:
Y_h_in = operator.inputs[1]
Y_h_in.type.shape = state_shape
Y_h_rev_in = operator.inputs[3]
Y_h_rev_in.type.shape = state_shape
if len(operator.inputs) > 2:
Y_c_in = operator.inputs[2]
Y_c_in.type.shape = state_shape
Y_c_rev_in = operator.inputs[4]
Y_c_rev_in.type.shape = state_shape
operator.outputs[0].type.shape = output_shape
if len(operator.outputs) > 1:
operator.outputs[1].type.shape = state_shape
operator.outputs[3].type.shape = state_shape
if len(operator.outputs) > 2:
operator.outputs[2].type.shape = state_shape
operator.outputs[4].type.shape = state_shape
|
See bidirectional LSTM's conversion function for its output shapes.
|
372,253
|
def apply_exclude(self, high):
if not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop()
for exc in exclude:
if isinstance(exc, six.string_types):
ex_sls.add(exc)
if isinstance(exc, dict):
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == :
ex_sls.add(exc[])
elif key == :
ex_id.add(exc[])
if ex_sls:
for name, body in six.iteritems(high):
if name.startswith():
continue
if body.get(, ) in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
|
Read in the __exclude__ list and remove all excluded objects from the
high data
|
372,254
|
def send(token, title, **kwargs):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
}
data = {
"user_credentials": token,
"notification[title]": from_unicode(title),
"notification[sound]": "notifier-2"
}
for k, v in kwargs.items():
data[ % k] = from_unicode(v)
http = HTTPSConnection(kwargs.pop("api_url", "new.boxcar.io"))
http.request(
"POST", "/api/notifications",
headers=headers,
body=urlencode(data))
response = http.getresponse()
if response.status != 201:
raise BoxcarError(response.reason)
return True
|
Site: https://boxcar.io/
API: http://help.boxcar.io/knowledgebase/topics/48115-boxcar-api
Desc: Best app for system administrators
|
372,255
|
def _has_expired(self):
expired = False
if hasattr(self, ):
now = datetime.datetime.utcnow()
expiration = datetime.datetime.strptime(self.Expiration, )
expired = (now >= expiration)
else:
raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!")
return expired
|
Has this HIT expired yet?
|
372,256
|
def parse_args():
threshold_choices = docutils.frontend.OptionParser.threshold_choices
parser = argparse.ArgumentParser(
description=__doc__ + (
if SPHINX_INSTALLED else ),
prog=)
parser.add_argument(, nargs=, type=decode_filename,
help=)
parser.add_argument(, metavar=, default=None,
help=)
parser.add_argument(, , action=,
help=)
parser.add_argument(, metavar=,
choices=threshold_choices,
default=,
help=
+
.join(choice for choice in threshold_choices
if not choice.isdigit()) +
)
parser.add_argument(, ,
metavar=, default=,
help=)
parser.add_argument(,
metavar=, default=,
help=)
parser.add_argument(,
metavar=, default=,
help=)
parser.add_argument(,
metavar=, default=,
help=)
parser.add_argument(,
metavar=, default=,
help=)
parser.add_argument(, action=,
help=)
parser.add_argument(, action=,
version= + __version__)
args = parser.parse_args()
if in args.files:
if len(args.files) > 1:
parser.error(" for standard in can only be checked alone")
else:
args.files = list(find_files(filenames=args.files,
recursive=args.recursive))
return args
|
Return parsed command-line arguments.
|
372,257
|
def exec_python(attr, src, executable="python"):
import subprocess
if isinstance(src, basestring):
src = [src]
p = popen([executable, "-c", "; ".join(src)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
from rez.exceptions import InvalidPackageError
raise InvalidPackageError(
"Error determining package attribute :\n%s" % (attr, err))
return out.strip()
|
Runs a python subproc to calculate a package attribute.
Args:
attr (str): Name of package attribute being created.
src (list of str): Python code to execute, will be converted into
semicolon-delimited single line of code.
Returns:
str: Output of python process.
|
372,258
|
def get(self, idx, default=None):
for placeholder in self:
if placeholder.element.ph_idx == idx:
return placeholder
return default
|
Return the first placeholder shape with matching *idx* value, or
*default* if not found.
|
372,259
|
def build(self, builder):
builder.start("SourceID", {})
builder.data(self.source_id)
builder.end("SourceID")
|
Build XML by appending to builder
|
372,260
|
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
|
Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
|
372,261
|
def validate_is_non_abstract_vertex_type(self, vertex_classname):
element = self.get_vertex_schema_element_or_raise(vertex_classname)
if element.abstract:
raise InvalidClassError(u
.format(vertex_classname))
|
Validate that a vertex classname corresponds to a non-abstract vertex class.
|
372,262
|
def docker_version(host=None, component=):
if component.lower() == :
component =
else:
component =
command = .format(component)
if host is None:
success, output = shakedown.run_command_on_master(command, None, None, False)
else:
success, output = shakedown.run_command_on_host(host, command, None, None, False)
if success:
return output
else:
return
|
Return the version of Docker [Server]
:param host: host or IP of the machine Docker is running on
:type host: str
:param component: Docker component
:type component: str
:return: Docker version
:rtype: str
|
372,263
|
def relation_types():
rel_types = []
md = metadata()
for key in (, , ):
section = md.get(key)
if section:
rel_types.extend(section.keys())
return rel_types
|
Get a list of relation types supported by this charm
|
372,264
|
def get_last_result(self):
result = self._device.readList(ADS1x15_POINTER_CONVERSION, 2)
return self._conversion_value(result[1], result[0])
|
Read the last conversion result when in continuous conversion mode.
Will return a signed integer value.
|
372,265
|
def copy_value(self, orig_key, new_key):
data = self.model.get_data()
if isinstance(data, list):
data.append(data[orig_key])
if isinstance(data, set):
data.add(data[orig_key])
else:
data[new_key] = data[orig_key]
self.set_data(data)
|
Copy value
|
372,266
|
def beacon(config):
uncleanshutdowns pid file in `/run`.
.. code-block:: yaml
beacons:
service:
- services:
nginx:
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
servicesservicesrunningservice.statusservice_nametagoncleanshutdownoncleanshutdownemitatstartupemitatstartuponchangeonlyonchangeonlydelaydelayuncleanshutdownrunninguncleanshutdownuncleanshutdownonchangeonlyonchangeonlydelaytimeemitatstartuprunningrunningdelaytimetimetimedelaytime']
ret.append(ret_dict)
else:
ret.append(ret_dict)
return ret
|
Scan for the configured services and fire events
Example Config
.. code-block:: yaml
beacons:
service:
- services:
salt-master: {}
mysql: {}
The config above sets up beacons to check for
the salt-master and mysql services.
The config also supports two other parameters for each service:
`onchangeonly`: when `onchangeonly` is True the beacon will fire
events only when the service status changes. Otherwise, it will fire an
event at each beacon interval. The default is False.
`delay`: when `delay` is greater than 0 the beacon will fire events only
after the service status changes, and the delay (in seconds) has passed.
Applicable only when `onchangeonly` is True. The default is 0.
`emitatstartup`: when `emitatstartup` is False the beacon will not fire
event when the minion is reload. Applicable only when `onchangeonly` is True.
The default is True.
`uncleanshutdown`: If `uncleanshutdown` is present it should point to the
location of a pid file for the service. Most services will not clean up
this pid file if they are shutdown uncleanly (e.g. via `kill -9`) or if they
are terminated through a crash such as a segmentation fault. If the file is
present, then the beacon will add `uncleanshutdown: True` to the event. If
not present, the field will be False. The field is only added when the
service is NOT running. Omitting the configuration variable altogether will
turn this feature off.
Please note that some init systems can remove the pid file if the service
registers as crashed. One such example is nginx on CentOS 7, where the
service unit removes the pid file when the service shuts down (IE: the pid
file is observed as removed when kill -9 is sent to the nginx master
process). The 'uncleanshutdown' option might not be of much use there,
unless the unit file is modified.
Here is an example that will fire an event 30 seconds after the state of nginx
changes and report an uncleanshutdown. This example is for Arch, which
places nginx's pid file in `/run`.
.. code-block:: yaml
beacons:
service:
- services:
nginx:
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
|
372,267
|
def sort_values(self, axis=0, ascending=True, inplace=False,
kind=, na_position=):
inplace = validate_bool_kwarg(inplace, )
self._get_axis_number(axis)
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
def _try_kind_sort(arr):
try:
return arr.argsort(kind=kind)
except TypeError:
return arr.argsort(kind=)
arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
good = ~bad
idx = ibase.default_index(len(self))
argsorted = _try_kind_sort(arr[good])
if is_list_like(ascending):
if len(ascending) != 1:
raise ValueError(
% (len(ascending)))
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError()
if not ascending:
argsorted = argsorted[::-1]
if na_position == :
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == :
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError(.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
|
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
Returns
-------
Series
Series ordered by values.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
|
372,268
|
def has_noquorum(self):
assert self.is_valid
bhs = self.blockhashes()
if not bhs or bhs[0][1] <= 1 / 3. * self.num_eligible_votes:
assert not self.has_quorum_possible
return True
|
less than 1/3 of the known votes are on the same block
|
372,269
|
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
C_SR = self.COEFFS_SOIL_RESPONSE[imt]
C = self.COEFFS_AC10[PGA()]
pga4nl = np.exp(
self._compute_mean(C, rup.mag, dists.rjb, rup.rake)) * 1e-2 / g
if imt == PGA():
mean = (np.log(pga4nl) +
self._get_site_amplification_linear(sites.vs30, C_SR) +
self._get_site_amplification_non_linear(sites.vs30, pga4nl,
C_SR))
else:
C = self.COEFFS_AC10[imt]
mean = (self._compute_mean(C, rup.mag, dists.rjb, rup.rake) +
self._get_site_amplification_linear(sites.vs30, C_SR) +
self._get_site_amplification_non_linear(sites.vs30, pga4nl,
C_SR))
if imt.name == "SA":
mean = np.log(np.exp(mean) * 1e-2 / g)
stddevs = self._get_stddevs(C, stddev_types, num_sites=len(sites.vs30))
return mean, stddevs
|
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
|
372,270
|
def _set_command_line_arguments(self, args):
Global.LOGGER.debug("setting command line arguments")
if args.VERBOSE:
Global.LOGGER.debug("verbose mode active")
Global.CONFIG_MANAGER.log_level = logging.DEBUG
Global.LOGGER_INSTANCE.reconfigure_log_level()
if args.STATS > 0:
Global.LOGGER.debug(f"stats requested every {args.STATS} seconds")
Global.CONFIG_MANAGER.show_stats = True
Global.CONFIG_MANAGER.stats_timeout = args.STATS
if args.INTERVAL > 0:
Global.LOGGER.debug(f"setting sleep interval to {args.INTERVAL} milliseconds")
Global.CONFIG_MANAGER.sleep_interval = float(args.INTERVAL)/1000
if args.TRACE:
Global.LOGGER.debug("tracing mode active")
Global.CONFIG_MANAGER.tracing_mode = True
Global.CONFIG_MANAGER.log_level = logging.DEBUG
Global.LOGGER_INSTANCE.reconfigure_log_level()
if args.MESSAGEINTERVAL is not None and args.MESSAGEINTERVAL > 0:
Global.LOGGER.debug(f"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds")
Global.CONFIG_MANAGER.message_fetcher_sleep_interval = float(args.MESSAGEINTERVAL)/10000
Global.CONFIG_MANAGER.fixed_message_fetcher_interval = True
Global.LOGGER.debug(f"recipes to be parsed: {args.FILENAME}")
Global.CONFIG_MANAGER.recipes = (args.FILENAME)
|
Set internal configuration variables according to
the input parameters
|
372,271
|
def clearReqVars(self):
self.errorMsg = None
self.payload = None
self.url = None
self.resp = None
self.res = None
self.method = None
self.printErrors = None
|
Function clearHistVars
Clear the variables used to get history of all vars
@return RETURN: None
|
372,272
|
def set_maxrad(self,newrad):
if not isinstance(newrad, Quantity):
newrad = newrad * u.arcsec
for pop in self.poplist:
if not pop.is_specific:
try:
pop.maxrad = newrad
except AttributeError:
pass
|
Sets max allowed radius in populations.
Doesn't operate via the :class:`stars.Constraint`
protocol; rather just rescales the sky positions
for the background objects and recalculates
sky area, etc.
|
372,273
|
def _cl_int_from_learner(cls, learn:Learner, ds_type:DatasetType=DatasetType.Valid, tta=False):
"Create an instance of `ClassificationInterpretation`. `tta` indicates if we want to use Test Time Augmentation."
preds = learn.TTA(ds_type=ds_type, with_loss=True) if tta else learn.get_preds(ds_type=ds_type, with_loss=True)
return cls(learn, *preds, ds_type=ds_type)
|
Create an instance of `ClassificationInterpretation`. `tta` indicates if we want to use Test Time Augmentation.
|
372,274
|
def _delete_node(self, tree, node):
temp_tree = copy.deepcopy(tree)
def recourse(temp_tree_, del_node):
if isinstance(temp_tree_, dict):
if temp_tree_[] != -1:
if temp_tree_[] == del_node:
del temp_tree_[]
del temp_tree_[]
del temp_tree_[]
temp_tree_[] = -1
else:
for k in [, ]:
recourse(temp_tree_[k], del_node)
return None
recourse(temp_tree, node)
return temp_tree
|
Private function that eliminate node from tree.
Parameters
----------
tree : object
node : int
node to be eliminated from tree
Returns
-------
pruned_tree : object
|
372,275
|
def running_jobs(self, exit_on_error=True):
with self.handling_exceptions():
if self.using_jobs:
from concurrent.futures import ProcessPoolExecutor
try:
with ProcessPoolExecutor(self.jobs) as self.executor:
yield
finally:
self.executor = None
else:
yield
if exit_on_error:
self.exit_on_error()
|
Initialize multiprocessing.
|
372,276
|
def datetime_to_knx(datetimeval, clock_synced_external=1):
res = [0, 0, 0, 0, 0, 0, 0, 0]
year = datetimeval.year
if (year < 1900) or (year > 2155):
raise KNXException("Only years between 1900 and 2155 supported")
res[0] = year - 1900
res[1] = datetimeval.month
res[2] = datetimeval.day
res[3] = (datetimeval.isoweekday() << 5) + datetimeval.hour
res[4] = datetimeval.minute
res[5] = datetimeval.second
if datetimeval.isoweekday() < 6:
is_working_day = 1
else:
is_working_day = 0
date1 = datetime(year, 4, 1)
dston = date1 - timedelta(days=date1.weekday() + 1)
date2 = datetime(year, 11, 1)
dstoff = date2 - timedelta(days=date2.weekday() + 1)
if dston <= datetimeval.replace(tzinfo=None) < dstoff:
dst = 1
else:
dst = 0
res[6] = (is_working_day << 6) + (1 << 5) + dst
if clock_synced_external:
res[7] = 128
else:
res[7] = 0
return res
|
Convert a Python timestamp to an 8 byte KNX time and date object
|
372,277
|
def unix(value):
if isinstance(value, (date, builtin_datetime)):
pass
elif value < 10000000000:
value = unix2datetime(value)
else:
value = milli2datetime(value)
return str(datetime2unix(value))
|
Convert a date, or datetime to unix timestamp
:param value:
:return:
|
372,278
|
def _extract_clublog_header(self, cty_xml_filename):
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date=", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("%Y-%m-%dT%H:%M:%S.+[", "")
cty_header[] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return
|
Extract the header of the Clublog XML File
|
372,279
|
def get_yeast_sequence(chromosome, start, end, reverse_complement=False):
import requests
if start != end:
if reverse_complement:
rev_option =
else:
rev_option =
param_url = + str(chromosome) + + str(start) + \
+ str(end) + + rev_option
url = + \
param_url
res = requests.get(url)
return coral.DNA(sequence)
|
Acquire a sequence from SGD http://www.yeastgenome.org
:param chromosome: Yeast chromosome.
:type chromosome: int
:param start: A biostart.
:type start: int
:param end: A bioend.
:type end: int
:param reverse_complement: Get the reverse complement.
:type revervse_complement: bool
:returns: A DNA sequence.
:rtype: coral.DNA
|
372,280
|
def _update_font_weight(self, font_weight):
toggle_state = font_weight & wx.FONTWEIGHT_BOLD == wx.FONTWEIGHT_BOLD
self.ToggleTool(wx.FONTFLAG_BOLD, toggle_state)
|
Updates font weight widget
Parameters
----------
font_weight: Integer
\tButton down iif font_weight == wx.FONTWEIGHT_BOLD
|
372,281
|
def resolve_absolute_name(self, name):
//
current = self
while current.enclosing:
current = current.enclosing
if name != :
components = name.split()[1:]
for component in components:
current = current.get_field_by_name(component)
return current
|
Resolve a field from an absolute name.
An absolute name is just like unix absolute path,
starts with '/' and each name component is separated by '/'.
:param name: absolute name, e.g. "/container/subcontainer/field"
:return: field with this absolute name
:raises: KittyException if field could not be resolved
|
372,282
|
def _clean_doc(self, doc=None):
if doc is None:
doc = self.doc
resources = doc[]
return doc
|
Clean the doc before writing it, removing unnecessary properties and doing other operations.
|
372,283
|
def regrep(filename, patterns, reverse=False, terminate_on_match=False,
postprocess=str):
compiled = {k: re.compile(v) for k, v in patterns.items()}
matches = collections.defaultdict(list)
gen = reverse_readfile(filename) if reverse else zopen(filename, "rt")
for i, l in enumerate(gen):
for k, p in compiled.items():
m = p.search(l)
if m:
matches[k].append([[postprocess(g) for g in m.groups()],
-i if reverse else i])
if terminate_on_match and all([
len(matches.get(k, [])) for k in compiled.keys()]):
break
try:
gen.close()
except:
pass
return matches
|
A powerful regular expression version of grep.
Args:
filename (str): Filename to grep.
patterns (dict): A dict of patterns, e.g.,
{"energy": "energy\(sigma->0\)\s+=\s+([\d\-\.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, especially when used with terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Returns:
A dict of the following form:
{key1: [[[matches...], lineno], [[matches...], lineno],
[[matches...], lineno], ...],
key2: ...}
For reverse reads, the lineno is given as a -ve number. Please note
that 0-based indexing is used.
|
372,284
|
def convert(cls, style_dict, num_format_str=None):
props = {}
if num_format_str is not None:
props[] = num_format_str
if style_dict is None:
return props
if in style_dict:
style_dict = style_dict.copy()
style_dict[] = style_dict.pop()
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get(), str):
props[] = 0 if props[] == else 1
for k in [, , , , ]:
if isinstance(props.get(k), str):
try:
props[k] = [, , , , ,
, , , ,
, , ,
,
].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get(), str):
props[] = [, ,
].index(props[])
if isinstance(props.get(), str):
props[] = {: 0, : 1, : 2,
: 33,
: 34}[props[]]
return props
|
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
|
372,285
|
def get_workflow_actions_for(brain_or_object):
portal_type = api.get_portal_type(brain_or_object)
actions = actions_by_type.get(portal_type, None)
if actions:
return actions
actions = []
wf_tool = api.get_tool("portal_workflow")
for wf_id in get_workflow_ids_for(brain_or_object):
workflow = wf_tool.getWorkflowById(wf_id)
wf_actions = map(lambda action: action[0], workflow.transitions.items())
actions.extend(wf_actions)
actions = list(set(actions))
actions_by_type[portal_type] = actions
return actions
|
Returns a list with the actions (transitions) supported by the workflows
the object pass in is bound to. Note it returns all actions, not only those
allowed for the object based on its current state and permissions.
|
372,286
|
def is_siemens(dicom_input):
header = dicom_input[0]
if not in header or not in header:
return False
if header.Modality.upper() != :
return False
if not in header.Manufacturer.upper():
return False
return True
|
Use this function to detect if a dicom series is a siemens dataset
:param dicom_input: directory with dicom files for 1 scan
|
372,287
|
def load_weights(self, weight_file: str) -> None:
requires_grad = self.requires_grad
with h5py.File(cached_path(weight_file), ) as fin:
for i_layer, lstms in enumerate(
zip(self.forward_layers, self.backward_layers)
):
for j_direction, lstm in enumerate(lstms):
cell_size = lstm.cell_size
dataset = fin[ % j_direction][][][ % i_layer
][]
tf_weights = numpy.transpose(dataset[][...])
torch_weights = tf_weights.copy()
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]
for torch_w, tf_w in [[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights]]:
torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :]
torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :]
lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad
tf_bias = dataset[][...]
tf_bias[(2 * cell_size):(3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size):(2 * cell_size)
] = tf_bias[(2 * cell_size):(3 * cell_size)]
torch_bias[(2 * cell_size):(3 * cell_size)
] = tf_bias[(1 * cell_size):(2 * cell_size)]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad
proj_weights = numpy.transpose(dataset[][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad
|
Load the pre-trained weights from the file.
|
372,288
|
def get_group(self, name):
groups = self.get_groups()
for group in groups:
if group[] == name:
return group
msg =
raise FMBaseError(msg.format(name=name))
|
Get contact group by name
:param name: name of group
:type name: ``str``, ``unicode``
:rtype: ``dict`` with group data
|
372,289
|
def get_depth(self, update=False):
if self.parent_id is None:
return 1
try:
if update:
del self._cached_depth
else:
return self._cached_depth
except AttributeError:
pass
depth = 0
node = self
while node:
node = node.parent
depth += 1
self._cached_depth = depth
return depth
|
:returns: the depth (level) of the node
Caches the result in the object itself to help in loops.
:param update: Updates the cached value.
|
372,290
|
def header_match(cls, header):
mo = cls.header_re.match(header)
if mo is None:
msg = f
raise ValueError(msg)
return {
: mo[].decode().strip(),
: mo[].decode().strip(),
: mo[].decode().strip(),
: strptime(mo[]),
: strptime(mo[]),
: float(mo[]),
: mo[].decode().strip(),
: mo[],
}
|
Parse the 4-line (320-byte) library member header.
|
372,291
|
def _handle_userInfo(self, data):
for k, v in data.items():
if k == "nick":
if v == "None":
v = "Volaphile"
setattr(self.room.user, k, v)
self.conn.enqueue_data(k, self.room.user.nick)
elif k != "profile":
if not hasattr(self.room, k):
warnings.warn(f"Skipping unset property {k}", ResourceWarning)
continue
setattr(self.room, k, v)
self.conn.enqueue_data(k, getattr(self.room, k))
self.room.user_info = k, v
self.conn.enqueue_data("user_info", self.room.user_info)
|
Handle user information
|
372,292
|
def ossos_discoveries(directory=parameters.REAL_KBO_AST_DIR,
suffix=,
no_nt_and_u=False,
single_object=None,
all_objects=True,
data_release=None,
):
retval = []
files = [f for f in os.listdir(directory) if (f.endswith() or f.endswith() or f.endswith())]
if single_object is not None:
files = filter(lambda name: name.startswith(single_object), files)
elif all_objects and data_release is not None:
data_release = ossos_release_parser(table=True, data_release=data_release)
objects = data_release[]
files = filter(lambda name: name.partition(suffix)[0].rstrip() in objects, files)
for filename in files:
if no_nt_and_u and (filename.__contains__() or filename.startswith()):
continue
mpc_filename = directory + filename
abg_filename = os.path.abspath(directory + ) + "/" + os.path.splitext(filename)[0] + ".abg"
obj = TNO(None, ast_filename=mpc_filename, abg_filename=abg_filename)
retval.append(obj)
return retval
|
Returns a list of objects holding orbfit.Orbfit objects with the observations in the Orbfit.observations field.
Default is to return only the objects corresponding to the current Data Release.
|
372,293
|
def to_json(self):
result = super(SpaceMembership, self).to_json()
result.update({
: self.admin,
: self.roles
})
return result
|
Returns the JSON representation of the space membership.
|
372,294
|
def get_revoked(self):
results = []
revoked_stack = _lib.X509_CRL_get_REVOKED(self._crl)
for i in range(_lib.sk_X509_REVOKED_num(revoked_stack)):
revoked = _lib.sk_X509_REVOKED_value(revoked_stack, i)
revoked_copy = _lib.Cryptography_X509_REVOKED_dup(revoked)
pyrev = Revoked.__new__(Revoked)
pyrev._revoked = _ffi.gc(revoked_copy, _lib.X509_REVOKED_free)
results.append(pyrev)
if results:
return tuple(results)
|
Return the revocations in this certificate revocation list.
These revocations will be provided by value, not by reference.
That means it's okay to mutate them: it won't affect this CRL.
:return: The revocations in this CRL.
:rtype: :class:`tuple` of :class:`Revocation`
|
372,295
|
def partition(pred, iterable, tolist=False):
t1, t2 = itertools.tee(iterable)
ifalse = six.moves.filterfalse(pred, t1)
itrue = six.moves.filter(pred, t2)
if tolist:
return list(ifalse), list(itrue)
else:
return ifalse, itrue
|
Use a predicate to partition entries into false entries and true entries
|
372,296
|
def raw_data(self, filename):
if self.debug and self.debug.should():
self.debug.write("Reading data from %r" % (filename,))
fdata = open(filename, )
try:
data = pickle.load(fdata)
finally:
fdata.close()
return data
|
Return the raw pickled data from `filename`.
|
372,297
|
def _set_fcoeport(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fcoeport.fcoeport, is_container=, presence=False, yang_name="fcoeport", rest_name="fcoeport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__fcoeport = t
if hasattr(self, ):
self._set()
|
Setter method for fcoeport, mapped from YANG variable /interface/port_channel/fcoeport (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoeport is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoeport() directly.
|
372,298
|
def getMaxPacketSize(self, endpoint):
result = libusb1.libusb_get_max_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result
|
Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
|
372,299
|
def and_(self, other):
if not isinstance(other, (Query, QueryCompound)) or other.isNull():
return self.copy()
elif not self:
return other.copy()
else:
return orb.QueryCompound(self, other, op=orb.QueryCompound.Op.And)
|
Creates a new compound query using the
<orb.QueryCompound.Op.And> type.
:param other <Query> || <orb.QueryCompound>
:return <orb.QueryCompound>
:sa __and__
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).and_((Q('name') == 'Eric')
|>>> print query
|(test is not 1 and name is Eric)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.