docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Return the list of 'MUST' validators for the correct version of STIX.
Args:
options: ValidationOptions instance with validation options for this
validation run, including the STIX spec version. | def _get_musts(options):
if options.version == '2.0':
return musts20.list_musts(options)
else:
return musts21.list_musts(options) | 780,733 |
Return the list of 'SHOULD' validators for the correct version of STIX.
Args:
options: ValidationOptions instance with validation options for this
validation run, including the STIX spec version. | def _get_shoulds(options):
if options.version == '2.0':
return shoulds20.list_shoulds(options)
else:
return shoulds21.list_shoulds(options) | 780,734 |
Perform STIX JSON Schema validation against STIX input.
Find the correct schema by looking at the 'type' property of the
`instance` JSON object.
Args:
instance: A Python dictionary representing a STIX object with a
'type' property.
options: ValidationOptions instance with valid... | def validate_instance(instance, options=None):
if 'type' not in instance:
raise ValidationError("Input must be an object with a 'type' property.")
if not options:
options = ValidationOptions()
error_gens = []
# Schema validation
if instance['type'] == 'bundle' and 'objects' i... | 780,736 |
Print JSON Schema validation errors to stdout.
Args:
results: An instance of ObjectValidationResults.
level: The level at which to print the results. | def print_schema_results(results, level=0):
for error in results.errors:
print_level(logger.error, _RED + "[X] %s", level, error) | 780,801 |
Print the results of validating an object.
Args:
obj_result: An ObjectValidationResults instance. | def print_object_results(obj_result):
print_results_header(obj_result.object_id, obj_result.is_valid)
if obj_result.warnings:
print_warning_results(obj_result, 1)
if obj_result.errors:
print_schema_results(obj_result, 1) | 780,804 |
Print the results of validating a file.
Args:
file_result: A FileValidationResults instance. | def print_file_results(file_result):
print_results_header(file_result.filepath, file_result.is_valid)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1)
if object_result.errors:
print_schema_results(ob... | 780,805 |
Print `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults or ObjectValidationResults
instances. | def print_results(results):
if not isinstance(results, list):
results = [results]
for r in results:
try:
r.log()
except AttributeError:
raise ValueError('Argument to print_results() must be a list of '
'FileValidationResults or O... | 780,806 |
Parses a list of command line arguments into a ValidationOptions object.
Args:
cmd_args (list of str): The list of command line arguments to be parsed.
is_script: Whether the arguments are intended for use in a stand-alone
script or imported into another tool.
Returns:
Inst... | def parse_args(cmd_args, is_script=False):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=NewlinesHelpFormatter,
epilog=CODES_TABLE
)
# Input options
if is_script:
parser.add_argument(
"files",
metavar="FILES",
... | 780,817 |
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj[... | def poweroff_server(self, server=None, server_id=None):
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = ... | 781,351 |
Terminates the session token, effectively logging out the user
from all crowd-enabled services.
Args:
token: The session token.
Returns:
True: If session terminated
None: If session termination failed | def terminate_session(self, token):
url = self.rest_url + "/session/%s" % token
response = self._delete(url)
# For consistency between methods use None rather than False
# If token validation failed for any reason return None
if not response.ok:
return None... | 781,406 |
Set the active state of a user
Args:
username: The account username
active_state: True or False
Returns:
True: If successful
None: If no user or failure occurred | def set_active(self, username, active_state):
if active_state not in (True, False):
raise ValueError("active_state must be True or False")
user = self.get_user(username)
if user is None:
return None
if user['active'] is active_state:
# Alre... | 781,409 |
Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful | def change_password(self, username, newpassword, raise_on_error=False):
response = self._put(self.rest_url + "/user/password",
data=json.dumps({"value": newpassword}),
params={"username": username})
if response.ok:
return T... | 781,413 |
Sends the user a password reset link (by email)
Args:
username: The account username.
Returns:
True: Succeeded
False: If unsuccessful | def send_password_reset_link(self, username):
response = self._post(self.rest_url + "/user/mail/password",
params={"username": username})
if response.ok:
return True
return False | 781,414 |
Retrieve a list of all group names that have <username> as a direct or indirect member.
Args:
username: The account username.
Returns:
list:
A list of strings of group names. | def get_nested_groups(self, username):
response = self._get(self.rest_url + "/user/group/nested",
params={"username": username})
if not response.ok:
return None
return [g['name'] for g in response.json()['groups']] | 781,415 |
Retrieves a list of all users that directly or indirectly belong to the given groupname.
Args:
groupname: The group name.
Returns:
list:
A list of strings of user names. | def get_nested_group_users(self, groupname):
response = self._get(self.rest_url + "/group/user/nested",
params={"groupname": groupname,
"start-index": 0,
"max-results": 99999})
if not respon... | 781,416 |
Determines if the user exists.
Args:
username: The user name.
Returns:
bool:
True if the user exists in the Crowd application. | def user_exists(self, username):
response = self._get(self.rest_url + "/user",
params={"username": username})
if not response.ok:
return None
return True | 781,417 |
Return the JSON mapping file for an index.
Mappings are stored as JSON files in the mappings subdirectory of this
app. They must be saved as {{index}}.json.
Args:
index: string, the name of the index to look for. | def get_index_mapping(index):
# app_path = apps.get_app_config('elasticsearch_django').path
mappings_dir = get_setting("mappings_dir")
filename = "%s.json" % index
path = os.path.join(mappings_dir, filename)
with open(path, "r") as f:
return json.load(f) | 781,541 |
Return list of models configured for a named index.
Args:
index: string, the name of the index to look up. | def get_index_models(index):
models = []
for app_model in get_index_config(index).get("models"):
app, model = app_model.split(".")
models.append(apps.get_model(app, model))
return models | 781,543 |
Return list of all indexes in which a model is configured.
A model may be configured to appear in multiple indexes. This function
will return the names of the indexes as a list of strings. This is
useful if you want to know which indexes need updating when a model
is saved.
Args:
model: a ... | def get_model_indexes(model):
indexes = []
for index in get_index_names():
for app_model in get_index_models(index):
if app_model == model:
indexes.append(index)
return indexes | 781,544 |
Execute the indicated action within the environment and
return the resulting immediate reward dictated by the reward
program.
Usage:
immediate_reward = scenario.execute(selected_action)
Arguments:
action: The action to be executed within the current situation.
... | def execute(self, action):
assert action in self.possible_actions
self.remaining_cycles -= 1
index = int(bitstrings.BitString(
self.current_situation[:self.address_size]
))
bit = self.current_situation[self.address_size + index]
return action == bit | 782,266 |
Execute the indicated action within the environment and
return the resulting immediate reward dictated by the reward
program.
Usage:
immediate_reward = scenario.execute(selected_action)
Arguments:
action: The action to be executed within the current situation.
... | def execute(self, action):
assert action in self.possible_actions
self.remaining_cycles -= 1
return action == self.needle_value | 782,270 |
Execute the indicated action within the environment and
return the resulting immediate reward dictated by the reward
program.
Usage:
immediate_reward = scenario.execute(selected_action)
Arguments:
action: The action to be executed within the current situation.
... | def execute(self, action):
self.logger.debug('Executing action: %s', action)
reward = self.wrapped.execute(action)
if reward:
self.total_reward += reward
self.steps += 1
self.logger.debug('Reward received on this step: %.5f',
rewa... | 782,274 |
Execute the indicated action within the environment and
return the resulting immediate reward dictated by the reward
program.
Usage:
immediate_reward = scenario.execute(selected_action)
Arguments:
action: The action to be executed within the current situation.
... | def execute(self, action):
reward = self.reward_function(
action,
self.classifications[self.steps]
)
self.total_reward += reward
self.steps += 1
return reward | 782,277 |
Create and return a new classifier set initialized for handling
the given scenario.
Usage:
scenario = MUXProblem()
model = algorithm.new_model(scenario)
model.run(scenario, learn=True)
Arguments:
scenario: A Scenario instance.
Return:
... | def new_model(self, scenario):
assert isinstance(scenario, scenarios.Scenario)
return ClassifierSet(self, scenario.get_possible_actions()) | 782,281 |
Returns the maximum number of reads for the given solver parameters.
Args:
**params:
Parameters for the sampling method. Relevant to num_reads:
- annealing_time
- readout_thermalization
- num_reads
- programming_therma... | def max_num_reads(self, **params):
# dev note: in the future it would be good to have a way of doing this
# server-side, as we are duplicating logic here.
properties = self.properties
if self.software or not params:
# software solvers don't use any of the above par... | 782,318 |
Internal method for both sample_ising and sample_qubo.
Args:
linear (list/dict): Linear terms of the model.
quadratic (dict of (int, int):float): Quadratic terms of the model.
**params: Parameters for the sampling method, specified per solver.
Returns:
:... | def _sample(self, type_, linear, quadratic, params):
# Check the problem
if not self.check_problem(linear, quadratic):
raise ValueError("Problem graph incompatible with solver.")
# Mix the new parameters with the default parameters
combined_params = dict(self._param... | 782,321 |
Resume polling for a problem previously submitted.
Args:
id_: Identification of the query.
Returns:
:obj: `Future` | def _retrieve_problem(self, id_):
future = Future(self, id_, self.return_matrix, None)
self.client._poll(future)
return future | 782,324 |
Calculates the entropy of the attribute attr in given data set data.
Parameters:
data<dict|list> :=
if dict, treated as value counts of the given attribute name
if list, treated as a raw list from which the value counts will be generated
attr<string> := the name of the class attribute | def entropy(data, class_attr=None, method=DEFAULT_DISCRETE_METRIC):
assert (class_attr is None and isinstance(data, dict)) \
or (class_attr is not None and isinstance(data, list))
if isinstance(data, dict):
counts = data
else:
counts = defaultdict(float) # {attr:count}
f... | 782,342 |
Calculates the information gain (reduction in entropy) that would
result by splitting the data on the chosen attribute (attr).
Parameters:
prefer_fewer_values := Weights the gain by the count of the attribute's
unique values. If multiple attributes have the same gain, but one has
s... | def get_gain(data, attr, class_attr,
method=DEFAULT_DISCRETE_METRIC,
only_sub=0, prefer_fewer_values=False, entropy_func=None):
entropy_func = entropy_func or entropy
val_freq = defaultdict(float)
subset_entropy = 0.0
# Calculate the frequency of each of the values in the target attribute
... | 782,344 |
Attempts to predict the value of the class attribute by aggregating
the predictions of each tree.
Parameters:
weighting_formula := a callable that takes a list of trees and
returns a list of weights. | def predict(self, record):
# Get raw predictions.
# {tree:raw prediction}
predictions = {}
for tree in self.trees:
_p = tree.predict(record)
if _p is None:
continue
if isinstance(_p, CDist):
if _p.mean ... | 782,404 |
Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct queue.
Args:
message (dict): Update message from the SAPI server wrt. this problem.
future `Future`: future corresponding to the problem
... | def _handle_problem_status(self, message, future):
try:
_LOGGER.trace("Handling response: %r", message)
_LOGGER.debug("Handling response for %s with status %s", message.get('id'), message.get('status'))
# Handle errors in batch mode
if 'error_code' in me... | 782,437 |
Encode the binary quadratic problem for submission to a given solver,
using the `qp` format for data.
Args:
solver (:class:`dwave.cloud.solver.Solver`):
The solver used.
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (d... | def encode_bqm_as_qp(solver, linear, quadratic):
active = active_qubits(linear, quadratic)
# Encode linear terms. The coefficients of the linear terms of the objective
# are encoded as an array of little endian 64 bit doubles.
# This array is then base64 encoded into a string safe for json.
# ... | 782,443 |
Helper for decode_qp, turns a single byte into a list of bits.
Args:
byte: byte to be decoded
Returns:
list of bits corresponding to byte | def _decode_byte(byte):
bits = []
for _ in range(8):
bits.append(byte & 1)
byte >>= 1
return bits | 782,445 |
Helper for decode_qp, decodes a double array.
The double array is stored as little endian 64 bit doubles.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
Args:
message: the double array
Returns:
decoded double array | def _decode_doubles(message):
binary = base64.b64decode(message)
return struct.unpack('<' + ('d' * (len(binary) // 8)), binary) | 782,447 |
Calculate the energy of a state given the Hamiltonian.
Args:
linear: Linear Hamiltonian terms.
quad: Quadratic Hamiltonian terms.
state: Vector of spins describing the system state.
Returns:
Energy of the state evaluated by the given energy function. | def evaluate_ising(linear, quad, state):
# If we were given a numpy array cast to list
if _numpy and isinstance(state, np.ndarray):
return evaluate_ising(linear, quad, state.tolist())
# Accumulate the linear and quadratic values
energy = 0.0
for index, value in uniform_iterator(linear... | 782,451 |
Calculate a set of all active qubits. Qubit is "active" if it has
bias or coupling attached.
Args:
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
... | def active_qubits(linear, quadratic):
active = {idx for idx,bias in uniform_iterator(linear)}
for edge, _ in six.iteritems(quadratic):
active.update(edge)
return active | 782,452 |
Loads a GermaNet instance connected to the given MongoDB instance.
Arguments:
- `host`: the hostname of the MongoDB instance
- `port`: the port number of the MongoDB instance
- `database_name`: the name of the GermaNet database on the
MongoDB instance | def load_germanet(host = None, port = None, database_name = 'germanet'):
client = MongoClient(host, port)
germanet_db = client[database_name]
return GermaNet(germanet_db) | 782,498 |
Creates a new GermaNet object.
Arguments:
- `mongo_db`: a pymongo.database.Database object containing
the GermaNet lexicon | def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE):
self._mongo_db = mongo_db
self._lemma_cache = None
self._synset_cache = None
self.max_min_depths = {}
try:
self.__dict__.update((k, v) for (k, v)
in self._... | 782,499 |
Looks up lemmas in the GermaNet database.
Arguments:
- `lemma`:
- `pos`: | def lemmas(self, lemma, pos = None):
if pos is not None:
if pos not in SHORT_POS_TO_LONG:
return None
pos = SHORT_POS_TO_LONG[pos]
lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma,
... | 782,502 |
Looks up synsets in the GermaNet database.
Arguments:
- `lemma`:
- `pos`: | def synsets(self, lemma, pos = None):
return sorted(set(lemma_obj.synset
for lemma_obj in self.lemmas(lemma, pos))) | 782,504 |
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2) | def synset(self, synset_repr):
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = S... | 782,505 |
Builds a Synset object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object | def get_synset_by_id(self, mongo_id):
cache_hit = None
if self._synset_cache is not None:
cache_hit = self._synset_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id})
if s... | 782,506 |
Builds a Lemma object from the database entry with the given
ObjectId.
Arguments:
- `mongo_id`: a bson.objectid.ObjectId object | def get_lemma_by_id(self, mongo_id):
cache_hit = None
if self._lemma_cache is not None:
cache_hit = self._lemma_cache.get(mongo_id)
if cache_hit is not None:
return cache_hit
lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id})
if lemm... | 782,507 |
Globs the XML files contained in the given directory and sorts
them into sections for import into the MongoDB database.
Arguments:
- `xml_path`: the path to the directory containing the GermaNet
XML files | def find_germanet_xml_files(xml_path):
xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml')))
# sort out the lexical files
lex_files = [xml_file for xml_file in xml_files if
re.match(r'(adj|nomen|verben)\.',
os.path.basename(xml_file).lower())]
xm... | 782,509 |
Reads in a GermaNet lexical information file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: the name of the XML file to read | def read_lexical_file(filename):
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
synsets = []
assert doc.getroot().tag == 'synsets'
for synset in doc.getroot():
if synset.tag != 'synset':
print('unrecognised child of <synsets>', synset)
... | 782,511 |
Reads the GermaNet relation file ``gn_relations.xml`` which lists
all the relations holding between lexical units and synsets.
Arguments:
- `filename`: | def read_relation_file(filename):
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
lex_rels = []
con_rels = []
assert doc.getroot().tag == 'relations'
for child in doc.getroot():
if child.tag == 'lex_rel':
if 0 < len(child):
pri... | 782,512 |
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: | def read_paraphrase_file(filename):
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
... | 782,513 |
Reads in the given lexical information files and inserts their
contents into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `lex_files`: a list of paths to XML files containing lexial
information | def insert_lexical_information(germanet_db, lex_files):
# drop the database collections if they already exist
germanet_db.lexunits.drop()
germanet_db.synsets.drop()
# inject data from XML files into the database
for lex_file in lex_files:
synsets = read_lexical_file(lex_file)
fo... | 782,514 |
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `gn_rels_file`: | def insert_relation_information(germanet_db, gn_rels_file):
lex_rels, con_rels = read_relation_file(gn_rels_file)
# cache the lexunits while we work on them
lexunits = {}
for lex_rel in lex_rels:
if lex_rel['from'] not in lexunits:
lexunits[lex_rel['from']] = germanet_db.lexuni... | 782,515 |
Reads in the given GermaNet relation file and inserts its contents
into the given MongoDB database.
Arguments:
- `germanet_db`: a pymongo.database.Database object
- `wiktionary_files`: | def insert_paraphrase_information(germanet_db, wiktionary_files):
num_paraphrases = 0
# cache the lexunits while we work on them
lexunits = {}
for filename in wiktionary_files:
paraphrases = read_paraphrase_file(filename)
num_paraphrases += len(paraphrases)
for paraphrase in... | 782,516 |
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object | def insert_lemmatisation_data(germanet_db):
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
... | 782,517 |
For every synset in GermaNet, inserts count information derived
from SDEWAC.
Arguments:
- `germanet_db`: a pymongo.database.Database object | def insert_infocontent_data(germanet_db):
gnet = germanet.GermaNet(germanet_db)
# use add one smoothing
gn_counts = defaultdict(lambda: 1.)
total_count = 1
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
WORD_COUNT... | 782,518 |
For every part of speech in GermaNet, computes the maximum
min_depth in that hierarchy.
Arguments:
- `germanet_db`: a pymongo.database.Database object | def compute_max_min_depth(germanet_db):
gnet = germanet.GermaNet(germanet_db)
max_min_depths = defaultdict(lambda: -1)
for synset in gnet.all_synsets():
min_depth = synset.min_depth
if max_min_depths[synset.category] < min_depth:
max_min_depths[synset.category] = m... | 782,519 |
Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error. | def Parse(self, value):
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
# Options are present
options = value_line[1]
for option in options.split(','):
self._Add... | 782,585 |
Passes the line through each rule until a match is made.
Args:
line: A string, the current input line. | def _CheckLine(self, line):
for rule in self._cur_state:
matched = self._CheckRule(rule, line)
if matched:
for value in matched.groupdict():
self._AssignVar(matched, value)
if self._Operations(rule):
# Not a Continue so check for state transition.
if r... | 782,586 |
RimeSolver Constructor
Parameters:
slvr_cfg : SolverConfiguration
Solver Configuration variables | def __init__(self, slvr_cfg):
super(RimeSolver, self).__init__(slvr_cfg)
#=========================================
# Register hypercube Dimensions
#=========================================
cube, slvr_cfg = self.hypercube, self.config()
_setup_hypercube(cube,... | 783,068 |
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
Parameters:
-----------
outgroup:
a node instance within the same tree structure that will be
used as a basal node. | def set_outgroup(self, outgroup):
outgroup = _translate_nodes(self, outgroup)
if self == outgroup:
##return
## why raise an error for this?
raise TreeError("Cannot set myself as outgroup")
parent_outgroup = outgroup.up
# Detects (sub)tree r... | 783,868 |
Returns a string containing an ascii drawing of the tree.
Parameters:
-----------
show_internal:
include internal edge names.
compact:
use exactly one line per tip.
attributes:
A list of node attributes to shown in the ASCII represe... | def get_ascii(self, show_internal=True, compact=False, attributes=None):
(lines, mid) = self._asciiArt(show_internal=show_internal,
compact=compact,
attributes=attributes)
return '\n'+'\n'.join(lines) | 783,871 |
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
Parameters:
-----------
store_attr:
Specifies the no... | def get_cached_content(self, store_attr=None, container_type=set, _store=None):
if _store is None:
_store = {}
for ch in self.children:
ch.get_cached_content(store_attr=store_attr,
container_type=container_type,
... | 783,874 |
Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
... | def draw_tree_grid(self,
nrows=None,
ncols=None,
start=0,
fixed_order=False,
shared_axis=False,
**kwargs):
# return nothing if tree is empty
if not self.treelist:
print("Treelist is empty")
return None, None
... | 783,966 |
Returns a copy of the tree with the selected tips removed. The entered
value can be a name or list of names. To prune on an internal node to
create a subtree see the .prune() function instead.
Parameters:
tips: list of tip names.
# example:
ptre = tre.drop_tips(['a', 'b... | def drop_tips(self, names=None, wildcard=None, regex=None):
# make a deepcopy of the tree
nself = self.copy()
# return if nothing to drop
if not any([names, wildcard, regex]):
return nself
# get matching names list with fuzzy match
tipnames = fuzzy_... | 784,021 |
Create a TransitionList object from a 'transitions' Workflow attribute.
Args:
tdef: list of transition definitions
states (StateList): already parsed state definitions.
prev (TransitionList): transition definitions from a parent.
Returns:
TransitionList: the list of transitions... | def _setup_transitions(tdef, states, prev=()):
trs = list(prev)
for transition in tdef:
if len(transition) == 3:
(name, source, target) = transition
if is_string(source) or isinstance(source, State):
source = [source]
source = [states[src] for src... | 784,093 |
Create a TransitionList.
Args:
transitions (list of (name, source, target) tuple): the transitions
to include. | def __init__(self, transitions):
self._transitions = {}
self._order = []
for trdef in transitions:
self._transitions[trdef.name] = trdef
self._order.append(trdef.name) | 784,103 |
Whether this hook applies to the given transition/state.
Args:
transition (Transition): the transition to check
from_state (State or None): the state to check. If absent, the check
is 'might this hook apply to the related transition, given a
valid source ... | def applies_to(self, transition, from_state=None):
if '*' in self.names:
return True
elif self.kind in (HOOK_BEFORE, HOOK_AFTER, HOOK_CHECK):
return self._match_transition(transition)
elif self.kind == HOOK_ON_ENTER:
return self._match_state(transitio... | 784,109 |
Import previously defined implementations.
Args:
parent_implems (ImplementationList): List of implementations defined
in a parent class. | def load_parent_implems(self, parent_implems):
for trname, attr, implem in parent_implems.get_custom_implementations():
self.implementations[trname] = implem.copy()
self.transitions_at[trname] = attr
self.custom_implems.add(trname) | 784,130 |
Add an implementation.
Args:
transition (Transition): the transition for which the implementation
is added
attribute (str): the name of the attribute where the implementation
will be available
function (callable): the actual implementation fun... | def add_implem(self, transition, attribute, function, **kwargs):
implem = ImplementationProperty(
field_name=self.state_field,
transition=transition,
workflow=self.workflow,
implementation=function,
**kwargs)
self.implementations[trans... | 784,131 |
Log a transition.
Args:
transition (Transition): the name of the performed transition
from_state (State): the source state
instance (object): the modified object
Kwargs:
Any passed when calling the transition | def log_transition(self, transition, from_state, instance, *args, **kwargs):
logger = logging.getLogger('xworkflows.transitions')
try:
instance_repr = u(repr(instance), 'ignore')
except (UnicodeEncodeError, UnicodeDecodeError):
instance_repr = u("<bad repr>")
... | 784,142 |
Iterates over (valid) attributes of a class.
Args:
cls (object): the class to iterate over
Yields:
(str, obj) tuples: the class-level attributes. | def iterclass(cls):
for field in dir(cls):
if hasattr(cls, field):
value = getattr(cls, field)
yield field, value | 784,393 |
Calling the Player Stats API
Args:
player_key: Key of the player
board_key: key of the board
Return:
json data | def get_player_stats(self, player_key, board_key):
player_stats_url = self.api_path + 'player/' + player_key + '/league/' + board_key + '/stats/'
response = self.get_response(player_stats_url)
return response | 784,726 |
Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import ... | def get_airports(self, country):
url = AIRPORT_BASE.format(country.replace(" ", "-"))
return self._fr24.get_airports_data(url) | 785,390 |
Simple method that decodes a given metar string.
Args:
metar (str): The metar data
Returns:
The metar data in readable format
Example::
from pyflightdata import FlightData
f=FlightData()
f.decode_metar('WSSS 181030Z 04009KT 010V080 ... | def decode_metar(self, metar):
try:
from metar import Metar
except:
return "Unable to parse metars. Please install parser from https://github.com/tomp/python-metar."
m = Metar.Metar(metar)
return m.string() | 785,402 |
Convert compiled .ui file from PySide2 to Qt.py
Arguments:
lines (list): Each line of of .ui file
Usage:
>> with open("myui.py") as f:
.. lines = convert(f.readlines()) | def convert(lines):
def parse(line):
line = line.replace("from PySide2 import", "from Qt import")
line = line.replace("QtWidgets.QApplication.translate",
"Qt.QtCompat.translate")
return line
parsed = list()
for line in lines:
line = parse(li... | 786,458 |
Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
... | def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minute... | 786,503 |
Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
... | def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minute... | 786,506 |
Initializes a POSIX timestamp in nanoseconds.
Args:
timestamp (Optional[int]): POSIX timestamp in nanoseconds. | def __init__(self, timestamp=None):
super(PosixTimeInNanoseconds, self).__init__()
self._precision = definitions.PRECISION_1_NANOSECOND
self._timestamp = timestamp | 786,508 |
Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
... | def _CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minut... | 786,510 |
Initializes a Delphi TDateTime timestamp.
Args:
timestamp (Optional[float]): Delphi TDateTime timestamp. | def __init__(self, timestamp=None):
super(DelphiDateTime, self).__init__()
self._precision = definitions.PRECISION_1_MILLISECOND
self._timestamp = timestamp | 786,512 |
Copies a Delphi TDateTime timestamp from a string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
... | def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minute... | 786,514 |
Initializes a date time epoch.
Args:
year (int): year that is the start of the epoch e.g. 1970.
month (int): month that is the start of the epoch, where 1 represents
January.
day_of_month (int): day of the month that is the start of the epoch,
where 1 represents the first day. | def __init__(self, year, month, day_of_month):
super(DateTimeEpoch, self).__init__()
self.day_of_month = day_of_month
self.month = month
self.year = year | 786,528 |
Determines if the date time values are equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are equal to other. | def __eq__(self, other):
if not isinstance(other, DateTimeValues):
return False
normalized_timestamp = self._GetNormalizedTimestamp()
other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access
if normalized_timestamp is None and other_normalized_timesta... | 786,530 |
Determines if the date time values are greater than or equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are greater than or equal to other.
Raises:
ValueError: if other is not an instance of DateTimeValues. | def __ge__(self, other):
if not isinstance(other, DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
normalized_timestamp = self._GetNormalizedTimestamp()
other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access
if normalized... | 786,531 |
Determines if the date time values are greater than other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are greater than other.
Raises:
ValueError: if other is not an instance of DateTimeValues. | def __gt__(self, other):
if not isinstance(other, DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
normalized_timestamp = self._GetNormalizedTimestamp()
other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access
if normalized... | 786,532 |
Determines if the date time values are less than other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are less than other.
Raises:
ValueError: if other is not an instance of DateTimeValues. | def __lt__(self, other):
if not isinstance(other, DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
normalized_timestamp = self._GetNormalizedTimestamp()
other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access
if normalized... | 786,533 |
Determines if the date time values are not equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are not equal to other. | def __ne__(self, other):
if not isinstance(other, DateTimeValues):
return True
normalized_timestamp = self._GetNormalizedTimestamp()
other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access
if normalized_timestamp is None and other_normalized_timestam... | 786,534 |
Copies a date from a string.
Args:
date_string (str): date value formatted as: YYYY-MM-DD
Returns:
tuple[int, int, int]: year, month, day of month.
Raises:
ValueError: if the date string is invalid or not supported. | def _CopyDateFromString(self, date_string):
date_string_length = len(date_string)
# The date string should at least contain 'YYYY-MM-DD'.
if date_string_length < 10:
raise ValueError('Date string too short.')
if date_string[4] != '-' or date_string[7] != '-':
raise ValueError('Invalid... | 786,536 |
Determines date values.
Args:
number_of_days (int): number of days since epoch.
date_time_epoch (DateTimeEpoch): date and time of the epoch.
Returns:
tuple[int, int, int]: year, month, day of month. | def _GetDateValuesWithEpoch(self, number_of_days, date_time_epoch):
return self._GetDateValues(
number_of_days, date_time_epoch.year, date_time_epoch.month,
date_time_epoch.day_of_month) | 786,539 |
Retrieves the day of the year for a specific day of a month in a year.
Args:
year (int): year e.g. 1970.
month (int): month, where 1 represents January.
day_of_month (int): day of the month, where 1 represents the first day.
Returns:
int: day of year.
Raises:
ValueError: if ... | def _GetDayOfYear(self, year, month, day_of_month):
if month not in range(1, 13):
raise ValueError('Month value out of bounds.')
days_per_month = self._GetDaysPerMonth(year, month)
if day_of_month < 1 or day_of_month > days_per_month:
raise ValueError('Day of month value out of bounds.')
... | 786,540 |
Retrieves the number of days in a month of a specific year.
Args:
year (int): year e.g. 1970.
month (int): month, where 1 represents January.
Returns:
int: number of days in the month.
Raises:
ValueError: if the month value is out of bounds. | def _GetDaysPerMonth(self, year, month):
if month not in range(1, 13):
raise ValueError('Month value out of bounds.')
days_per_month = self._DAYS_PER_MONTH[month - 1]
if month == 2 and self._IsLeapYear(year):
days_per_month += 1
return days_per_month | 786,541 |
Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds. | def _GetNumberOfDaysInCentury(self, year):
if year < 0:
raise ValueError('Year value out of bounds.')
year, _ = divmod(year, 100)
if self._IsLeapYear(year):
return 36525
return 36524 | 786,542 |
Determines time values.
Args:
number_of_seconds (int|decimal.Decimal): number of seconds.
Returns:
tuple[int, int, int, int]: days, hours, minutes, seconds. | def _GetTimeValues(self, number_of_seconds):
number_of_seconds = int(number_of_seconds)
number_of_minutes, seconds = divmod(number_of_seconds, 60)
number_of_hours, minutes = divmod(number_of_minutes, 60)
number_of_days, hours = divmod(number_of_hours, 24)
return number_of_days, hours, minutes, ... | 786,544 |
Copies a fake timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
... | def CopyFromDateTimeString(self, time_string):
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minute... | 786,553 |
Initializes a RFC2579 date-time.
Args:
rfc2579_date_time_tuple:
(Optional[tuple[int, int, int, int, int, int, int]]):
RFC2579 date-time time, contains year, month, day of month, hours,
minutes, seconds and deciseconds.
Raises:
ValueError: if the system time is invalid... | def __init__(self, rfc2579_date_time_tuple=None):
super(RFC2579DateTime, self).__init__()
self._number_of_seconds = None
self._precision = definitions.PRECISION_100_MILLISECONDS
self.day_of_month = None
self.hours = None
self.deciseconds = None
self.minutes = None
self.month = None
... | 786,555 |
Initializes a semantic time.
Args:
string (str): semantic representation of the time, such as:
"Never", "Not set". | def __init__(self, string=None):
super(SemanticTime, self).__init__()
self._string = string | 786,565 |
Determines if the date time values are equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are equal to other. | def __eq__(self, other):
if not isinstance(other, SemanticTime):
return False
return self._SORT_ORDER == other._SORT_ORDER | 786,566 |
Determines if the date time values are greater than or equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are greater than or equal to other.
Raises:
ValueError: if other is not an instance of DateTimeValues. | def __ge__(self, other):
if not isinstance(other, interface.DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
if not isinstance(other, SemanticTime):
return False
return self._SORT_ORDER >= other._SORT_ORDER | 786,567 |
Determines if the date time values are less than other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are less than other.
Raises:
ValueError: if other is not an instance of DateTimeValues. | def __lt__(self, other):
if not isinstance(other, interface.DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
if not isinstance(other, SemanticTime):
return True
return self._SORT_ORDER < other._SORT_ORDER | 786,568 |
Determines if the date time values are not equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are not equal to other. | def __ne__(self, other):
if not isinstance(other, SemanticTime):
return True
return self._SORT_ORDER != other._SORT_ORDER | 786,569 |
Determines if the date time values are greater than other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are greater than other.
Raises:
ValueError: if other is not an instance of DateTimeValues. | def __gt__(self, other):
if not isinstance(other, interface.DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
return not isinstance(other, Never) | 786,570 |
Determines if the date time values are less than or equal to other.
Args:
other (DateTimeValues): date time values to compare against.
Returns:
bool: True if the date time values are greater than or equal to other.
Raises:
ValueError: if other is not an instance of DateTimeValues. | def __le__(self, other):
if not isinstance(other, interface.DateTimeValues):
raise ValueError('Other not an instance of DateTimeValues')
return isinstance(other, Never) | 786,571 |
Initializes a FAT date time.
Args:
fat_date_time (Optional[int]): FAT date time. | def __init__(self, fat_date_time=None):
number_of_seconds = None
if fat_date_time is not None:
number_of_seconds = self._GetNumberOfSeconds(fat_date_time)
super(FATDateTime, self).__init__()
self._precision = definitions.PRECISION_2_SECONDS
self._number_of_seconds = number_of_seconds | 786,577 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.