docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Return the list of 'MUST' validators for the correct version of STIX. Args: options: ValidationOptions instance with validation options for this validation run, including the STIX spec version.
def _get_musts(options): if options.version == '2.0': return musts20.list_musts(options) else: return musts21.list_musts(options)
780,733
Return the list of 'SHOULD' validators for the correct version of STIX. Args: options: ValidationOptions instance with validation options for this validation run, including the STIX spec version.
def _get_shoulds(options): if options.version == '2.0': return shoulds20.list_shoulds(options) else: return shoulds21.list_shoulds(options)
780,734
Perform STIX JSON Schema validation against STIX input. Find the correct schema by looking at the 'type' property of the `instance` JSON object. Args: instance: A Python dictionary representing a STIX object with a 'type' property. options: ValidationOptions instance with validation options for this validation run. Returns: A dictionary of validation results
def validate_instance(instance, options=None): if 'type' not in instance: raise ValidationError("Input must be an object with a 'type' property.") if not options: options = ValidationOptions() error_gens = [] # Schema validation if instance['type'] == 'bundle' and 'objects' in instance: # Validate each object in a bundle separately for sdo in instance['objects']: if 'type' not in sdo: raise ValidationError("Each object in bundle must have a 'type' property.") error_gens += _schema_validate(sdo, options) else: error_gens += _schema_validate(instance, options) # Custom validation must_checks = _get_musts(options) should_checks = _get_shoulds(options) output.info("Running the following additional checks: %s." % ", ".join(x.__name__ for x in chain(must_checks, should_checks))) try: errors = _iter_errors_custom(instance, must_checks, options) warnings = _iter_errors_custom(instance, should_checks, options) if options.strict: chained_errors = chain(errors, warnings) warnings = [] else: chained_errors = errors warnings = [pretty_error(x, options.verbose) for x in warnings] except schema_exceptions.RefResolutionError: raise SchemaInvalidError('Invalid JSON schema: a JSON reference ' 'failed to resolve') # List of error generators and message prefixes (to denote which object the # error comes from) error_gens += [(chained_errors, '')] # Prepare the list of errors (this actually triggers the custom validation # functions). error_list = [] for gen, prefix in error_gens: for error in gen: msg = prefix + pretty_error(error, options.verbose) error_list.append(SchemaError(msg)) if error_list: valid = False else: valid = True return ObjectValidationResults(is_valid=valid, object_id=instance.get('id', ''), errors=error_list, warnings=warnings)
780,736
Print JSON Schema validation errors to stdout. Args: results: An instance of ObjectValidationResults. level: The level at which to print the results.
def print_schema_results(results, level=0): for error in results.errors: print_level(logger.error, _RED + "[X] %s", level, error)
780,801
Print the results of validating an object. Args: obj_result: An ObjectValidationResults instance.
def print_object_results(obj_result): print_results_header(obj_result.object_id, obj_result.is_valid) if obj_result.warnings: print_warning_results(obj_result, 1) if obj_result.errors: print_schema_results(obj_result, 1)
780,804
Print the results of validating a file. Args: file_result: A FileValidationResults instance.
def print_file_results(file_result): print_results_header(file_result.filepath, file_result.is_valid) for object_result in file_result.object_results: if object_result.warnings: print_warning_results(object_result, 1) if object_result.errors: print_schema_results(object_result, 1) if file_result.fatal: print_fatal_results(file_result.fatal, 1)
780,805
Print `results` (the results of validation) to stdout. Args: results: A list of FileValidationResults or ObjectValidationResults instances.
def print_results(results): if not isinstance(results, list): results = [results] for r in results: try: r.log() except AttributeError: raise ValueError('Argument to print_results() must be a list of ' 'FileValidationResults or ObjectValidationResults.')
780,806
Parses a list of command line arguments into a ValidationOptions object. Args: cmd_args (list of str): The list of command line arguments to be parsed. is_script: Whether the arguments are intended for use in a stand-alone script or imported into another tool. Returns: Instance of ``ValidationOptions``
def parse_args(cmd_args, is_script=False): parser = argparse.ArgumentParser( description=__doc__, formatter_class=NewlinesHelpFormatter, epilog=CODES_TABLE ) # Input options if is_script: parser.add_argument( "files", metavar="FILES", nargs="*", default=sys.stdin, help="A whitespace separated list of STIX files or directories of " "STIX files to validate. If none given, stdin will be used." ) parser.add_argument( "-r", "--recursive", dest="recursive", action="store_true", default=True, help="Recursively descend into input directories." ) parser.add_argument( "-s", "--schemas", dest="schema_dir", help="Custom schema directory. If provided, input will be validated " "against these schemas in addition to the STIX schemas bundled " "with this script." ) parser.add_argument( "--version", dest="version", default=DEFAULT_VER, help="The version of the STIX specification to validate against (e.g. " "\"2.0\")." ) # Output options parser.add_argument( "-v", "--verbose", dest="verbose", action="store_true", default=False, help="Print informational notes and more verbose error messages." ) parser.add_argument( "-q", "--silent", dest="silent", action="store_true", default=False, help="Silence all output to stdout." ) parser.add_argument( "-d", "--disable", "--ignore", dest="disabled", default="", help="A comma-separated list of recommended best practice checks to " "skip. By default, no checks are disabled. \n\n" "Example: --disable 202,210" ) parser.add_argument( "-e", "--enable", "--select", dest="enabled", default="", help="A comma-separated list of recommended best practice checks to " "enable. If the --disable option is not used, no other checks " "will be run. By default, all checks are enabled.\n\n" "Example: --enable 218" ) parser.add_argument( "--strict", dest="strict", action="store_true", default=False, help="Treat warnings as errors and fail validation if any are found." ) parser.add_argument( "--strict-types", dest="strict_types", action="store_true", default=False, help="Ensure that no custom object types are used, only those defined" " in the STIX specification." ) parser.add_argument( "--strict-properties", dest="strict_properties", action="store_true", default=False, help="Ensure that no custom properties are used, only those defined" " in the STIX specification." ) parser.add_argument( "--no-cache", dest="no_cache", action="store_true", default=False, help="Disable the caching of external source values." ) parser.add_argument( "--refresh-cache", dest="refresh_cache", action="store_true", default=False, help="Clears the cache of external source values, then " "during validation downloads them again." ) parser.add_argument( "--clear-cache", dest="clear_cache", action="store_true", default=False, help="Clear the cache of external source values after validation." ) parser.add_argument( "--enforce-refs", dest="enforce_refs", action="store_true", default=False, help="Ensures that all SDOs being referenced by SROs are contained " "within the same bundle." ) args = parser.parse_args(cmd_args) if not is_script: args.files = "" if not args.version: args.version = DEFAULT_VER return ValidationOptions(args)
780,817
Poweroff a VM. If possible to pass the VM object or simply the ID of the VM that we want to turn on. Args: server: VM Object that represent the VM to power off, server_id: Int or Str representing the ID of the VM to power off. Returns: return True if json_obj['Success'] is 'True' else False
def poweroff_server(self, server=None, server_id=None): sid = server_id if server_id is not None else server.sid if sid is None: raise Exception('No Server Specified.') json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid)) json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme) return True if json_obj['Success'] is 'True' else False
781,351
Terminates the session token, effectively logging out the user from all crowd-enabled services. Args: token: The session token. Returns: True: If session terminated None: If session termination failed
def terminate_session(self, token): url = self.rest_url + "/session/%s" % token response = self._delete(url) # For consistency between methods use None rather than False # If token validation failed for any reason return None if not response.ok: return None # Otherwise return True return True
781,406
Set the active state of a user Args: username: The account username active_state: True or False Returns: True: If successful None: If no user or failure occurred
def set_active(self, username, active_state): if active_state not in (True, False): raise ValueError("active_state must be True or False") user = self.get_user(username) if user is None: return None if user['active'] is active_state: # Already in desired state return True user['active'] = active_state response = self._put(self.rest_url + "/user", params={"username": username}, data=json.dumps(user)) if response.status_code == 204: return True return None
781,409
Change new password for a user Args: username: The account username. newpassword: The account new password. raise_on_error: optional (default: False) Returns: True: Succeeded False: If unsuccessful
def change_password(self, username, newpassword, raise_on_error=False): response = self._put(self.rest_url + "/user/password", data=json.dumps({"value": newpassword}), params={"username": username}) if response.ok: return True if raise_on_error: raise RuntimeError(response.json()['message']) return False
781,413
Sends the user a password reset link (by email) Args: username: The account username. Returns: True: Succeeded False: If unsuccessful
def send_password_reset_link(self, username): response = self._post(self.rest_url + "/user/mail/password", params={"username": username}) if response.ok: return True return False
781,414
Retrieve a list of all group names that have <username> as a direct or indirect member. Args: username: The account username. Returns: list: A list of strings of group names.
def get_nested_groups(self, username): response = self._get(self.rest_url + "/user/group/nested", params={"username": username}) if not response.ok: return None return [g['name'] for g in response.json()['groups']]
781,415
Retrieves a list of all users that directly or indirectly belong to the given groupname. Args: groupname: The group name. Returns: list: A list of strings of user names.
def get_nested_group_users(self, groupname): response = self._get(self.rest_url + "/group/user/nested", params={"groupname": groupname, "start-index": 0, "max-results": 99999}) if not response.ok: return None return [u['name'] for u in response.json()['users']]
781,416
Determines if the user exists. Args: username: The user name. Returns: bool: True if the user exists in the Crowd application.
def user_exists(self, username): response = self._get(self.rest_url + "/user", params={"username": username}) if not response.ok: return None return True
781,417
Return the JSON mapping file for an index. Mappings are stored as JSON files in the mappings subdirectory of this app. They must be saved as {{index}}.json. Args: index: string, the name of the index to look for.
def get_index_mapping(index): # app_path = apps.get_app_config('elasticsearch_django').path mappings_dir = get_setting("mappings_dir") filename = "%s.json" % index path = os.path.join(mappings_dir, filename) with open(path, "r") as f: return json.load(f)
781,541
Return list of models configured for a named index. Args: index: string, the name of the index to look up.
def get_index_models(index): models = [] for app_model in get_index_config(index).get("models"): app, model = app_model.split(".") models.append(apps.get_model(app, model)) return models
781,543
Return list of all indexes in which a model is configured. A model may be configured to appear in multiple indexes. This function will return the names of the indexes as a list of strings. This is useful if you want to know which indexes need updating when a model is saved. Args: model: a Django model class.
def get_model_indexes(model): indexes = [] for index in get_index_names(): for app_model in get_index_models(index): if app_model == model: indexes.append(index) return indexes
781,544
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
def execute(self, action): assert action in self.possible_actions self.remaining_cycles -= 1 index = int(bitstrings.BitString( self.current_situation[:self.address_size] )) bit = self.current_situation[self.address_size + index] return action == bit
782,266
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
def execute(self, action): assert action in self.possible_actions self.remaining_cycles -= 1 return action == self.needle_value
782,270
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
def execute(self, action): self.logger.debug('Executing action: %s', action) reward = self.wrapped.execute(action) if reward: self.total_reward += reward self.steps += 1 self.logger.debug('Reward received on this step: %.5f', reward or 0) self.logger.debug('Average reward per step: %.5f', self.total_reward / self.steps) return reward
782,274
Execute the indicated action within the environment and return the resulting immediate reward dictated by the reward program. Usage: immediate_reward = scenario.execute(selected_action) Arguments: action: The action to be executed within the current situation. Return: A float, the reward received for the action that was executed, or None if no reward is offered.
def execute(self, action): reward = self.reward_function( action, self.classifications[self.steps] ) self.total_reward += reward self.steps += 1 return reward
782,277
Create and return a new classifier set initialized for handling the given scenario. Usage: scenario = MUXProblem() model = algorithm.new_model(scenario) model.run(scenario, learn=True) Arguments: scenario: A Scenario instance. Return: A new, untrained classifier set, suited for the given scenario.
def new_model(self, scenario): assert isinstance(scenario, scenarios.Scenario) return ClassifierSet(self, scenario.get_possible_actions())
782,281
Returns the maximum number of reads for the given solver parameters. Args: **params: Parameters for the sampling method. Relevant to num_reads: - annealing_time - readout_thermalization - num_reads - programming_thermalization Returns: int: The maximum number of reads.
def max_num_reads(self, **params): # dev note: in the future it would be good to have a way of doing this # server-side, as we are duplicating logic here. properties = self.properties if self.software or not params: # software solvers don't use any of the above parameters return properties['num_reads_range'][1] # qpu _, duration = properties['problem_run_duration_range'] annealing_time = params.get('annealing_time', properties['default_annealing_time']) readout_thermalization = params.get('readout_thermalization', properties['default_readout_thermalization']) programming_thermalization = params.get('programming_thermalization', properties['default_programming_thermalization']) return min(properties['num_reads_range'][1], int((duration - programming_thermalization) / (annealing_time + readout_thermalization)))
782,318
Internal method for both sample_ising and sample_qubo. Args: linear (list/dict): Linear terms of the model. quadratic (dict of (int, int):float): Quadratic terms of the model. **params: Parameters for the sampling method, specified per solver. Returns: :obj: `Future`
def _sample(self, type_, linear, quadratic, params): # Check the problem if not self.check_problem(linear, quadratic): raise ValueError("Problem graph incompatible with solver.") # Mix the new parameters with the default parameters combined_params = dict(self._params) combined_params.update(params) # Check the parameters before submitting for key in combined_params: if key not in self.parameters and not key.startswith('x_'): raise KeyError("{} is not a parameter of this solver.".format(key)) # transform some of the parameters in-place self._format_params(type_, combined_params) body = json.dumps({ 'solver': self.id, 'data': encode_bqm_as_qp(self, linear, quadratic), 'type': type_, 'params': combined_params }) _LOGGER.trace("Encoded sample request: %s", body) future = Future(solver=self, id_=None, return_matrix=self.return_matrix, submission_data=(type_, linear, quadratic, params)) _LOGGER.debug("Submitting new problem to: %s", self.id) self.client._submit(body, future) return future
782,321
Resume polling for a problem previously submitted. Args: id_: Identification of the query. Returns: :obj: `Future`
def _retrieve_problem(self, id_): future = Future(self, id_, self.return_matrix, None) self.client._poll(future) return future
782,324
Calculates the entropy of the attribute attr in given data set data. Parameters: data<dict|list> := if dict, treated as value counts of the given attribute name if list, treated as a raw list from which the value counts will be generated attr<string> := the name of the class attribute
def entropy(data, class_attr=None, method=DEFAULT_DISCRETE_METRIC): assert (class_attr is None and isinstance(data, dict)) \ or (class_attr is not None and isinstance(data, list)) if isinstance(data, dict): counts = data else: counts = defaultdict(float) # {attr:count} for record in data: # Note: A missing attribute is treated like an attribute with a value # of None, representing the attribute is "irrelevant". counts[record.get(class_attr)] += 1.0 len_data = float(sum(cnt for _, cnt in iteritems(counts))) n = max(2, len(counts)) total = float(sum(counts.values())) assert total, "There must be at least one non-zero count." try: #return -sum((count/total)*math.log(count/total,n) for count in counts) if method == ENTROPY1: return -sum((count/len_data)*math.log(count/len_data, n) for count in itervalues(counts) if count) elif method == ENTROPY2: return -sum((count/len_data)*math.log(count/len_data, n) for count in itervalues(counts) if count) - ((len(counts)-1)/float(total)) elif method == ENTROPY3: return -sum((count/len_data)*math.log(count/len_data, n) for count in itervalues(counts) if count) - 100*((len(counts)-1)/float(total)) else: raise Exception("Unknown entropy method %s." % method) except Exception: raise
782,342
Calculates the information gain (reduction in entropy) that would result by splitting the data on the chosen attribute (attr). Parameters: prefer_fewer_values := Weights the gain by the count of the attribute's unique values. If multiple attributes have the same gain, but one has slightly fewer attributes, this will cause the one with fewer attributes to be preferred.
def get_gain(data, attr, class_attr, method=DEFAULT_DISCRETE_METRIC, only_sub=0, prefer_fewer_values=False, entropy_func=None): entropy_func = entropy_func or entropy val_freq = defaultdict(float) subset_entropy = 0.0 # Calculate the frequency of each of the values in the target attribute for record in data: val_freq[record.get(attr)] += 1.0 # Calculate the sum of the entropy for each subset of records weighted # by their probability of occuring in the training set. for val in val_freq.keys(): val_prob = val_freq[val] / sum(val_freq.values()) data_subset = [record for record in data if record.get(attr) == val] e = entropy_func(data_subset, class_attr, method=method) subset_entropy += val_prob * e if only_sub: return subset_entropy # Subtract the entropy of the chosen attribute from the entropy of the # whole data set with respect to the target attribute (and return it) main_entropy = entropy_func(data, class_attr, method=method) # Prefer gains on attributes with fewer values. if prefer_fewer_values: # n = len(val_freq) # w = (n+1)/float(n)/2 #return (main_entropy - subset_entropy)*w return ((main_entropy - subset_entropy), 1./len(val_freq)) else: return (main_entropy - subset_entropy)
782,344
Attempts to predict the value of the class attribute by aggregating the predictions of each tree. Parameters: weighting_formula := a callable that takes a list of trees and returns a list of weights.
def predict(self, record): # Get raw predictions. # {tree:raw prediction} predictions = {} for tree in self.trees: _p = tree.predict(record) if _p is None: continue if isinstance(_p, CDist): if _p.mean is None: continue elif isinstance(_p, DDist): if not _p.count: continue predictions[tree] = _p if not predictions: return # Normalize weights and aggregate final prediction. weights = self.weighting_method(predictions.keys()) if not weights: return # assert sum(weights) == 1.0, "Sum of weights must equal 1." if self.data.is_continuous_class: # Merge continuous class predictions. total = sum(w*predictions[tree].mean for w, tree in weights) else: # Merge discrete class predictions. total = DDist() for weight, tree in weights: prediction = predictions[tree] for cls_value, cls_prob in prediction.probs: total.add(cls_value, cls_prob*weight) return total
782,404
Handle the results of a problem submission or results request. This method checks the status of the problem and puts it in the correct queue. Args: message (dict): Update message from the SAPI server wrt. this problem. future `Future`: future corresponding to the problem Note: This method is always run inside of a daemon thread.
def _handle_problem_status(self, message, future): try: _LOGGER.trace("Handling response: %r", message) _LOGGER.debug("Handling response for %s with status %s", message.get('id'), message.get('status')) # Handle errors in batch mode if 'error_code' in message and 'error_msg' in message: raise SolverFailureError(message['error_msg']) if 'status' not in message: raise InvalidAPIResponseError("'status' missing in problem description response") if 'id' not in message: raise InvalidAPIResponseError("'id' missing in problem description response") future.id = message['id'] future.remote_status = status = message['status'] # The future may not have the ID set yet with future._single_cancel_lock: # This handles the case where cancel has been called on a future # before that future received the problem id if future._cancel_requested: if not future._cancel_sent and status == self.STATUS_PENDING: # The problem has been canceled but the status says its still in queue # try to cancel it self._cancel(message['id'], future) # If a cancel request could meaningfully be sent it has been now future._cancel_sent = True if not future.time_received and message.get('submitted_on'): future.time_received = parse_datetime(message['submitted_on']) if not future.time_solved and message.get('solved_on'): future.time_solved = parse_datetime(message['solved_on']) if not future.eta_min and message.get('earliest_estimated_completion'): future.eta_min = parse_datetime(message['earliest_estimated_completion']) if not future.eta_max and message.get('latest_estimated_completion'): future.eta_max = parse_datetime(message['latest_estimated_completion']) if status == self.STATUS_COMPLETE: # TODO: find a better way to differentiate between # `completed-on-submit` and `completed-on-poll`. # Loading should happen only once, not every time when response # doesn't contain 'answer'. # If the message is complete, forward it to the future object if 'answer' in message: future._set_message(message) # If the problem is complete, but we don't have the result data # put the problem in the queue for loading results. else: self._load(future) elif status in self.ANY_STATUS_ONGOING: # If the response is pending add it to the queue. self._poll(future) elif status == self.STATUS_CANCELLED: # If canceled return error raise CanceledFutureError() else: # Return an error to the future object errmsg = message.get('error_message', 'An unknown error has occurred.') if 'solver is offline' in errmsg.lower(): raise SolverOfflineError(errmsg) else: raise SolverFailureError(errmsg) except Exception as error: # If there were any unhandled errors we need to release the # lock in the future, otherwise deadlock occurs. future._set_error(error, sys.exc_info())
782,437
Encode the binary quadratic problem for submission to a given solver, using the `qp` format for data. Args: solver (:class:`dwave.cloud.solver.Solver`): The solver used. linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: encoded submission dictionary
def encode_bqm_as_qp(solver, linear, quadratic): active = active_qubits(linear, quadratic) # Encode linear terms. The coefficients of the linear terms of the objective # are encoded as an array of little endian 64 bit doubles. # This array is then base64 encoded into a string safe for json. # The order of the terms is determined by the _encoding_qubits property # specified by the server. # Note: only active qubits are coded with double, inactive with NaN nan = float('nan') lin = [uniform_get(linear, qubit, 0 if qubit in active else nan) for qubit in solver._encoding_qubits] lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin)) # Encode the coefficients of the quadratic terms of the objective # in the same manner as the linear terms, in the order given by the # _encoding_couplers property, discarding tailing zero couplings quad = [quadratic.get((q1,q2), 0) + quadratic.get((q2,q1), 0) for (q1,q2) in solver._encoding_couplers if q1 in active and q2 in active] quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad)) # The name for this encoding is 'qp' and is explicitly included in the # message for easier extension in the future. return { 'format': 'qp', 'lin': lin.decode('utf-8'), 'quad': quad.decode('utf-8') }
782,443
Helper for decode_qp, turns a single byte into a list of bits. Args: byte: byte to be decoded Returns: list of bits corresponding to byte
def _decode_byte(byte): bits = [] for _ in range(8): bits.append(byte & 1) byte >>= 1 return bits
782,445
Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array
def _decode_doubles(message): binary = base64.b64decode(message) return struct.unpack('<' + ('d' * (len(binary) // 8)), binary)
782,447
Calculate the energy of a state given the Hamiltonian. Args: linear: Linear Hamiltonian terms. quad: Quadratic Hamiltonian terms. state: Vector of spins describing the system state. Returns: Energy of the state evaluated by the given energy function.
def evaluate_ising(linear, quad, state): # If we were given a numpy array cast to list if _numpy and isinstance(state, np.ndarray): return evaluate_ising(linear, quad, state.tolist()) # Accumulate the linear and quadratic values energy = 0.0 for index, value in uniform_iterator(linear): energy += state[index] * value for (index_a, index_b), value in six.iteritems(quad): energy += value * state[index_a] * state[index_b] return energy
782,451
Calculate a set of all active qubits. Qubit is "active" if it has bias or coupling attached. Args: linear (dict[variable, bias]/list[variable, bias]): Linear terms of the model. quadratic (dict[(variable, variable), bias]): Quadratic terms of the model. Returns: set: Active qubits' indices.
def active_qubits(linear, quadratic): active = {idx for idx,bias in uniform_iterator(linear)} for edge, _ in six.iteritems(quadratic): active.update(edge) return active
782,452
Loads a GermaNet instance connected to the given MongoDB instance. Arguments: - `host`: the hostname of the MongoDB instance - `port`: the port number of the MongoDB instance - `database_name`: the name of the GermaNet database on the MongoDB instance
def load_germanet(host = None, port = None, database_name = 'germanet'): client = MongoClient(host, port) germanet_db = client[database_name] return GermaNet(germanet_db)
782,498
Creates a new GermaNet object. Arguments: - `mongo_db`: a pymongo.database.Database object containing the GermaNet lexicon
def __init__(self, mongo_db, cache_size = DEFAULT_CACHE_SIZE): self._mongo_db = mongo_db self._lemma_cache = None self._synset_cache = None self.max_min_depths = {} try: self.__dict__.update((k, v) for (k, v) in self._mongo_db.metainfo.find_one().items() if k not in GERMANET_METAINFO_IGNORE_KEYS) except AttributeError: # ignore error generated if metainfo is not included in # the mongo DB pass try: self._lemma_cache = repoze.lru.LRUCache(cache_size) self._synset_cache = repoze.lru.LRUCache(cache_size) except NameError: pass
782,499
Looks up lemmas in the GermaNet database. Arguments: - `lemma`: - `pos`:
def lemmas(self, lemma, pos = None): if pos is not None: if pos not in SHORT_POS_TO_LONG: return None pos = SHORT_POS_TO_LONG[pos] lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma, 'category': pos}) else: lemma_dicts = self._mongo_db.lexunits.find({'orthForm': lemma}) return sorted([Lemma(self, lemma_dict) for lemma_dict in lemma_dicts])
782,502
Looks up synsets in the GermaNet database. Arguments: - `lemma`: - `pos`:
def synsets(self, lemma, pos = None): return sorted(set(lemma_obj.synset for lemma_obj in self.lemmas(lemma, pos)))
782,504
Looks up a synset in GermaNet using its string representation. Arguments: - `synset_repr`: a unicode string containing the lemma, part of speech, and sense number of the first lemma of the synset >>> gn.synset(u'funktionieren.v.2') Synset(funktionieren.v.2)
def synset(self, synset_repr): parts = synset_repr.split('.') if len(parts) != 3: return None lemma, pos, sensenum = parts if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG: return None sensenum = int(sensenum, 10) pos = SHORT_POS_TO_LONG[pos] lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma, 'category': pos, 'sense': sensenum}) if lemma_dict: return Lemma(self, lemma_dict).synset
782,505
Builds a Synset object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object
def get_synset_by_id(self, mongo_id): cache_hit = None if self._synset_cache is not None: cache_hit = self._synset_cache.get(mongo_id) if cache_hit is not None: return cache_hit synset_dict = self._mongo_db.synsets.find_one({'_id': mongo_id}) if synset_dict is not None: synset = Synset(self, synset_dict) if self._synset_cache is not None: self._synset_cache.put(mongo_id, synset) return synset
782,506
Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object
def get_lemma_by_id(self, mongo_id): cache_hit = None if self._lemma_cache is not None: cache_hit = self._lemma_cache.get(mongo_id) if cache_hit is not None: return cache_hit lemma_dict = self._mongo_db.lexunits.find_one({'_id': mongo_id}) if lemma_dict is not None: lemma = Lemma(self, lemma_dict) if self._lemma_cache is not None: self._lemma_cache.put(mongo_id, lemma) return lemma
782,507
Globs the XML files contained in the given directory and sorts them into sections for import into the MongoDB database. Arguments: - `xml_path`: the path to the directory containing the GermaNet XML files
def find_germanet_xml_files(xml_path): xml_files = sorted(glob.glob(os.path.join(xml_path, '*.xml'))) # sort out the lexical files lex_files = [xml_file for xml_file in xml_files if re.match(r'(adj|nomen|verben)\.', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(lex_files)) if not lex_files: print('ERROR: cannot find lexical information files') # sort out the GermaNet relations file gn_rels_file = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower() == 'gn_relations.xml'] xml_files = sorted(set(xml_files) - set(gn_rels_file)) if not gn_rels_file: print('ERROR: cannot find relations file gn_relations.xml') gn_rels_file = None else: if 1 < len(gn_rels_file): print ('WARNING: more than one relations file gn_relations.xml, ' 'taking first match') gn_rels_file = gn_rels_file[0] # sort out the wiktionary paraphrase files wiktionary_files = [xml_file for xml_file in xml_files if re.match(r'wiktionaryparaphrases-', os.path.basename(xml_file).lower())] xml_files = sorted(set(xml_files) - set(wiktionary_files)) if not wiktionary_files: print('WARNING: cannot find wiktionary paraphrase files') # sort out the interlingual index file ili_files = [xml_file for xml_file in xml_files if os.path.basename(xml_file).lower().startswith( 'interlingualindex')] xml_files = sorted(set(xml_files) - set(ili_files)) if not ili_files: print('WARNING: cannot find interlingual index file') if xml_files: print('WARNING: unrecognised xml files:', xml_files) return lex_files, gn_rels_file, wiktionary_files, ili_files
782,509
Reads in a GermaNet lexical information file and returns its contents as a list of dictionary structures. Arguments: - `filename`: the name of the XML file to read
def read_lexical_file(filename): with open(filename, 'rb') as input_file: doc = etree.parse(input_file) synsets = [] assert doc.getroot().tag == 'synsets' for synset in doc.getroot(): if synset.tag != 'synset': print('unrecognised child of <synsets>', synset) continue synset_dict = dict(synset.items()) synloc = '{0} synset {1},'.format(filename, synset_dict.get('id', '???')) warn_attribs(synloc, synset, SYNSET_ATTRIBS) synset_dict['lexunits'] = [] synsets.append(synset_dict) for child in synset: if child.tag == 'lexUnit': lexunit = child lexunit_dict = dict(lexunit.items()) lexloc = synloc + ' lexUnit {0},'.format( lexunit_dict.get('id', '???')) warn_attribs(lexloc, lexunit, LEXUNIT_ATTRIBS) # convert some properties to booleans for key in ['styleMarking', 'artificial', 'namedEntity']: if key in lexunit_dict: if lexunit_dict[key] not in MAP_YESNO_TO_BOOL: print(lexloc, ('lexunit property {0} has ' 'non-boolean value').format(key), lexunit_dict[key]) continue lexunit_dict[key] = MAP_YESNO_TO_BOOL[lexunit_dict[key]] # convert sense to integer number if 'sense' in lexunit_dict: if lexunit_dict['sense'].isdigit(): lexunit_dict['sense'] = int(lexunit_dict['sense'], 10) else: print(lexloc, 'lexunit property sense has non-numeric value', lexunit_dict['sense']) synset_dict['lexunits'].append(lexunit_dict) lexunit_dict['examples'] = [] lexunit_dict['frames'] = [] for child in lexunit: if child.tag in ['orthForm', 'orthVar', 'oldOrthForm', 'oldOrthVar']: warn_attribs(lexloc, child, set()) if not child.text: print(lexloc, '{0} with no text'.format(child.tag)) continue if child.tag in lexunit_dict: print(lexloc, 'more than one {0}'.format(child.tag)) lexunit_dict[child.tag] = str(child.text) elif child.tag == 'example': example = child text = [child for child in example if child.tag == 'text'] if len(text) != 1 or not text[0].text: print(lexloc, '<example> tag without text') example_dict = {'text': str(text[0].text)} for child in example: if child.tag == 'text': continue elif child.tag == 'exframe': if 'exframe' in example_dict: print(lexloc, 'more than one <exframe> ' 'for <example>') warn_attribs(lexloc, child, set()) if not child.text: print(lexloc, '<exframe> with no text') continue example_dict['exframe'] = str(child.text) else: print(lexloc, 'unrecognised child of <example>', child) lexunit_dict['examples'].append(example_dict) elif child.tag == 'frame': frame = child warn_attribs(lexloc, frame, set()) if 0 < len(frame): print(lexloc, 'unrecognised <frame> children', list(frame)) if not frame.text: print(lexloc, '<frame> without text') continue lexunit_dict['frames'].append(str(frame.text)) elif child.tag == 'compound': compound = child warn_attribs(lexloc, compound, set()) compound_dict = {} for child in compound: if child.tag == 'modifier': modifier_dict = dict(child.items()) warn_attribs(lexloc, child, MODIFIER_ATTRIBS, set()) if not child.text: print(lexloc, 'modifier without text') continue modifier_dict['text'] = str(child.text) if 'modifier' not in compound_dict: compound_dict['modifier'] = [] compound_dict['modifier'].append(modifier_dict) elif child.tag == 'head': head_dict = dict(child.items()) warn_attribs(lexloc, child, HEAD_ATTRIBS, set()) if not child.text: print(lexloc, '<head> without text') continue head_dict['text'] = str(child.text) if 'head' in compound_dict: print(lexloc, 'more than one head in <compound>') compound_dict['head'] = head_dict else: print(lexloc, 'unrecognised child of <compound>', child) continue else: print(lexloc, 'unrecognised child of <lexUnit>', child) continue elif child.tag == 'paraphrase': paraphrase = child warn_attribs(synloc, paraphrase, set()) paraphrase_text = str(paraphrase.text) if not paraphrase_text: print(synloc, 'WARNING: <paraphrase> tag with no text') else: print(synloc, 'unrecognised child of <synset>', child) continue return synsets
782,511
Reads the GermaNet relation file ``gn_relations.xml`` which lists all the relations holding between lexical units and synsets. Arguments: - `filename`:
def read_relation_file(filename): with open(filename, 'rb') as input_file: doc = etree.parse(input_file) lex_rels = [] con_rels = [] assert doc.getroot().tag == 'relations' for child in doc.getroot(): if child.tag == 'lex_rel': if 0 < len(child): print('<lex_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in LEX_REL_DIRS: print('unrecognized <lex_rel> dir', child_dict['dir']) if child_dict['dir'] == 'both' and 'inv' not in child_dict: print('<lex_rel> has dir=both but does not specify inv') lex_rels.append(child_dict) elif child.tag == 'con_rel': if 0 < len(child): print('<con_rel> has unexpected child node') child_dict = dict(child.items()) warn_attribs('', child, RELATION_ATTRIBS, RELATION_ATTRIBS_REQD) if child_dict['dir'] not in CON_REL_DIRS: print('unrecognised <con_rel> dir', child_dict['dir']) if (child_dict['dir'] in ['both', 'revert'] and 'inv' not in child_dict): print('<con_rel> has dir={0} but does not specify inv'.format( child_dict['dir'])) con_rels.append(child_dict) else: print('unrecognised child of <relations>', child) continue return lex_rels, con_rels
782,512
Reads in a GermaNet wiktionary paraphrase file and returns its contents as a list of dictionary structures. Arguments: - `filename`:
def read_paraphrase_file(filename): with open(filename, 'rb') as input_file: doc = etree.parse(input_file) assert doc.getroot().tag == 'wiktionaryParaphrases' paraphrases = [] for child in doc.getroot(): if child.tag == 'wiktionaryParaphrase': paraphrase = child warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS) if 0 < len(paraphrase): print('unrecognised child of <wiktionaryParaphrase>', list(paraphrase)) paraphrase_dict = dict(paraphrase.items()) if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL: print('<paraphrase> attribute "edited" has unexpected value', paraphrase_dict['edited']) else: paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[ paraphrase_dict['edited']] if not paraphrase_dict['wiktionarySenseId'].isdigit(): print('<paraphrase> attribute "wiktionarySenseId" has ' 'non-integer value', paraphrase_dict['edited']) else: paraphrase_dict['wiktionarySenseId'] = \ int(paraphrase_dict['wiktionarySenseId'], 10) paraphrases.append(paraphrase_dict) else: print('unknown child of <wiktionaryParaphrases>', child) return paraphrases
782,513
Reads in the given lexical information files and inserts their contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `lex_files`: a list of paths to XML files containing lexial information
def insert_lexical_information(germanet_db, lex_files): # drop the database collections if they already exist germanet_db.lexunits.drop() germanet_db.synsets.drop() # inject data from XML files into the database for lex_file in lex_files: synsets = read_lexical_file(lex_file) for synset in synsets: synset = dict((SYNSET_KEY_REWRITES.get(key, key), value) for (key, value) in synset.items()) lexunits = synset['lexunits'] synset['lexunits'] = germanet_db.lexunits.insert(lexunits) synset_id = germanet_db.synsets.insert(synset) for lexunit in lexunits: lexunit['synset'] = synset_id lexunit['category'] = synset['category'] germanet_db.lexunits.save(lexunit) # index the two collections by id germanet_db.synsets.create_index('id') germanet_db.lexunits.create_index('id') # also index lexunits by lemma, lemma-pos, and lemma-pos-sensenum germanet_db.lexunits.create_index([('orthForm', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING)]) germanet_db.lexunits.create_index([('orthForm', DESCENDING), ('category', DESCENDING), ('sense', DESCENDING)]) print('Inserted {0} synsets, {1} lexical units.'.format( germanet_db.synsets.count(), germanet_db.lexunits.count()))
782,514
Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `gn_rels_file`:
def insert_relation_information(germanet_db, gn_rels_file): lex_rels, con_rels = read_relation_file(gn_rels_file) # cache the lexunits while we work on them lexunits = {} for lex_rel in lex_rels: if lex_rel['from'] not in lexunits: lexunits[lex_rel['from']] = germanet_db.lexunits.find_one( {'id': lex_rel['from']}) from_lexunit = lexunits[lex_rel['from']] if lex_rel['to'] not in lexunits: lexunits[lex_rel['to']] = germanet_db.lexunits.find_one( {'id': lex_rel['to']}) to_lexunit = lexunits[lex_rel['to']] if 'rels' not in from_lexunit: from_lexunit['rels'] = set() from_lexunit['rels'].add((lex_rel['name'], to_lexunit['_id'])) if lex_rel['dir'] == 'both': if 'rels' not in to_lexunit: to_lexunit['rels'] = set() to_lexunit['rels'].add((lex_rel['inv'], from_lexunit['_id'])) for lexunit in lexunits.values(): if 'rels' in lexunit: lexunit['rels'] = sorted(lexunit['rels']) germanet_db.lexunits.save(lexunit) # cache the synsets while we work on them synsets = {} for con_rel in con_rels: if con_rel['from'] not in synsets: synsets[con_rel['from']] = germanet_db.synsets.find_one( {'id': con_rel['from']}) from_synset = synsets[con_rel['from']] if con_rel['to'] not in synsets: synsets[con_rel['to']] = germanet_db.synsets.find_one( {'id': con_rel['to']}) to_synset = synsets[con_rel['to']] if 'rels' not in from_synset: from_synset['rels'] = set() from_synset['rels'].add((con_rel['name'], to_synset['_id'])) if con_rel['dir'] in ['both', 'revert']: if 'rels' not in to_synset: to_synset['rels'] = set() to_synset['rels'].add((con_rel['inv'], from_synset['_id'])) for synset in synsets.values(): if 'rels' in synset: synset['rels'] = sorted(synset['rels']) germanet_db.synsets.save(synset) print('Inserted {0} lexical relations, {1} synset relations.'.format( len(lex_rels), len(con_rels)))
782,515
Reads in the given GermaNet relation file and inserts its contents into the given MongoDB database. Arguments: - `germanet_db`: a pymongo.database.Database object - `wiktionary_files`:
def insert_paraphrase_information(germanet_db, wiktionary_files): num_paraphrases = 0 # cache the lexunits while we work on them lexunits = {} for filename in wiktionary_files: paraphrases = read_paraphrase_file(filename) num_paraphrases += len(paraphrases) for paraphrase in paraphrases: if paraphrase['lexUnitId'] not in lexunits: lexunits[paraphrase['lexUnitId']] = \ germanet_db.lexunits.find_one( {'id': paraphrase['lexUnitId']}) lexunit = lexunits[paraphrase['lexUnitId']] if 'paraphrases' not in lexunit: lexunit['paraphrases'] = [] lexunit['paraphrases'].append(paraphrase) for lexunit in lexunits.values(): germanet_db.lexunits.save(lexunit) print('Inserted {0} wiktionary paraphrases.'.format(num_paraphrases))
782,516
Creates the lemmatiser collection in the given MongoDB instance using the data derived from the Projekt deutscher Wortschatz. Arguments: - `germanet_db`: a pymongo.database.Database object
def insert_lemmatisation_data(germanet_db): # drop the database collection if it already exists germanet_db.lemmatiser.drop() num_lemmas = 0 input_file = gzip.open(os.path.join(os.path.dirname(__file__), LEMMATISATION_FILE)) for line in input_file: line = line.decode('iso-8859-1').strip().split('\t') assert len(line) == 2 germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line)))) num_lemmas += 1 input_file.close() # index the collection on 'word' germanet_db.lemmatiser.create_index('word') print('Inserted {0} lemmatiser entries.'.format(num_lemmas))
782,517
For every synset in GermaNet, inserts count information derived from SDEWAC. Arguments: - `germanet_db`: a pymongo.database.Database object
def insert_infocontent_data(germanet_db): gnet = germanet.GermaNet(germanet_db) # use add one smoothing gn_counts = defaultdict(lambda: 1.) total_count = 1 input_file = gzip.open(os.path.join(os.path.dirname(__file__), WORD_COUNT_FILE)) num_lines_read = 0 num_lines = 0 for line in input_file: line = line.decode('utf-8').strip().split('\t') num_lines += 1 if len(line) != 3: continue count, pos, word = line num_lines_read += 1 count = int(count) synsets = set(gnet.synsets(word, pos)) if not synsets: continue # Although Resnik (1995) suggests dividing count by the number # of synsets, Patwardhan et al (2003) argue against doing # this. count = float(count) / len(synsets) for synset in synsets: total_count += count paths = synset.hypernym_paths scount = float(count) / len(paths) for path in paths: for ss in path: gn_counts[ss._id] += scount print('Read {0} of {1} lines from count file.'.format(num_lines_read, num_lines)) print('Recorded counts for {0} synsets.'.format(len(gn_counts))) print('Total count is {0}'.format(total_count)) input_file.close() # update all the synset records in GermaNet num_updates = 0 for synset in germanet_db.synsets.find(): synset['infocont'] = gn_counts[synset['_id']] / total_count germanet_db.synsets.save(synset) num_updates += 1 print('Updated {0} synsets.'.format(num_updates))
782,518
For every part of speech in GermaNet, computes the maximum min_depth in that hierarchy. Arguments: - `germanet_db`: a pymongo.database.Database object
def compute_max_min_depth(germanet_db): gnet = germanet.GermaNet(germanet_db) max_min_depths = defaultdict(lambda: -1) for synset in gnet.all_synsets(): min_depth = synset.min_depth if max_min_depths[synset.category] < min_depth: max_min_depths[synset.category] = min_depth if germanet_db.metainfo.count() == 0: germanet_db.metainfo.insert({}) metainfo = germanet_db.metainfo.find_one() metainfo['max_min_depths'] = max_min_depths germanet_db.metainfo.save(metainfo) print('Computed maximum min_depth for all parts of speech:') print(u', '.join(u'{0}: {1}'.format(k, v) for (k, v) in sorted(max_min_depths.items())).encode('utf-8'))
782,519
Parse a 'Value' declaration. Args: value: String line from a template file, must begin with 'Value '. Raises: TextFSMTemplateError: Value declaration contains an error.
def Parse(self, value): value_line = value.split(' ') if len(value_line) < 3: raise TextFSMTemplateError('Expect at least 3 tokens on line.') if not value_line[2].startswith('('): # Options are present options = value_line[1] for option in options.split(','): self._AddOption(option) # Call option OnCreateOptions callbacks [option.OnCreateOptions() for option in self.options] self.name = value_line[2] self.regex = ' '.join(value_line[3:]) else: # There were no valid options, so there are no options. # Treat this argument as the name. self.name = value_line[1] self.regex = ' '.join(value_line[2:]) if len(self.name) > self.max_name_len: raise TextFSMTemplateError( "Invalid Value name '%s' or name too long." % self.name) if (not re.match(r'^\(.*\)$', self.regex) or self.regex.count('(') != self.regex.count(')')): raise TextFSMTemplateError( "Value '%s' must be contained within a '()' pair." % self.regex) self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex)
782,585
Passes the line through each rule until a match is made. Args: line: A string, the current input line.
def _CheckLine(self, line): for rule in self._cur_state: matched = self._CheckRule(rule, line) if matched: for value in matched.groupdict(): self._AssignVar(matched, value) if self._Operations(rule): # Not a Continue so check for state transition. if rule.new_state: if rule.new_state not in ('End', 'EOF'): self._cur_state = self.states[rule.new_state] self._cur_state_name = rule.new_state break
782,586
RimeSolver Constructor Parameters: slvr_cfg : SolverConfiguration Solver Configuration variables
def __init__(self, slvr_cfg): super(RimeSolver, self).__init__(slvr_cfg) #========================================= # Register hypercube Dimensions #========================================= cube, slvr_cfg = self.hypercube, self.config() _setup_hypercube(cube, slvr_cfg) #======================= # Data Sources and Sinks #======================= # Get the defaults data source (default or test data) data_source = slvr_cfg['data_source'] montblanc.log.info("Defaults Data Source '{}'".format(data_source)) # Construct list of data sources and sinks # internal to the solver. # These will be overridden by source and sink # providers supplied by the user in the solve() # method default_prov = _create_defaults_source_provider(cube, data_source) self._source_providers = [default_prov] self._sink_providers = [NullSinkProvider()] #================== # Data Source Cache #================== class SourceCache(object): def __init__(self): self._cache = {} self._lock = threading.Lock() def __getitem__(self, key): with self._lock: return self._cache[key] def __setitem__(self, key, value): with self._lock: self._cache[key]=value def __delitem__(self, key): with self._lock: del self._cache[key] def pop(self, key, default=None): with self._lock: return self._cache.pop(key, default) self._source_cache = SourceCache() #================== # Memory Budgeting #================== # For deciding whether to rebudget self._previous_budget = 0 self._previous_budget_dims = {} #================ # Cube Transcoder #================ self._iter_dims = ['ntime', 'nbl'] self._transcoder = CubeDimensionTranscoder(self._iter_dims) #================================ # Staging Area Data Source Configuration #================================ dfs = { n: a for n, a in cube.arrays().iteritems() if not 'temporary' in a.tags } # Descriptors are not user-defined arrays # but a variable passed through describing a chunk of the # problem. Make it look as if it's an array if 'descriptor' in dfs: raise KeyError("'descriptor' is reserved, " "please use another array name.") dfs['descriptor'] = AttrDict(dtype=np.int32) #========================= # Tensorflow devices #========================= from tensorflow.python.client import device_lib devices = device_lib.list_local_devices() device_type = slvr_cfg['device_type'].upper() gpus = [d.name for d in devices if d.device_type == 'GPU'] cpus = [d.name for d in devices if d.device_type == 'CPU'] if device_type == 'GPU' and len(gpus) == 0: montblanc.log.warn("No GPUs are present, falling back to CPU.") device_type = 'CPU' use_cpus = device_type == 'CPU' montblanc.log.info("Using '{}' devices for compute".format(device_type)) self._devices = cpus if use_cpus else gpus self._shards_per_device = spd = 2 self._nr_of_shards = shards = len(self._devices)*spd # shard_id == d*spd + shard self._shard = lambda d, s: d*spd + s assert len(self._devices) > 0 #========================= # Tensorflow Compute Graph #========================= # Create all tensorflow constructs within the compute graph with tf.Graph().as_default() as compute_graph: # Create our data feeding structure containing # input/output staging_areas and feed once variables self._tf_feed_data = _construct_tensorflow_feed_data( dfs, cube, self._iter_dims, shards) # Construct tensorflow expressions for each shard self._tf_expr = [_construct_tensorflow_expression( slvr_cfg, self._tf_feed_data, dev, self._shard(d,s)) for d, dev in enumerate(self._devices) for s in range(self._shards_per_device)] # Initialisation operation init_op = tf.global_variables_initializer() # Now forbid modification of the graph compute_graph.finalize() #========================================== # Tensorflow Session #========================================== # Create the tensorflow session object # Use supplied target, if present tf_server_target = slvr_cfg.get('tf_server_target', '') montblanc.log.debug("Attaching session to tensorflow server " "'{tfs}'".format(tfs=tf_server_target)) session_config = tf.ConfigProto(allow_soft_placement=True) self._tf_session = tf.Session(tf_server_target, graph=compute_graph, config=session_config) self._tf_session.run(init_op) #====================== # Thread pool executors #====================== tpe = cf.ThreadPoolExecutor self._descriptor_executor = tpe(1) self._feed_executors = [tpe(1) for i in range(shards)] self._compute_executors = [tpe(1) for i in range(shards)] self._consumer_executor = tpe(1) class InputsWaiting(object): def __init__(self, shards): self._lock = threading.Lock() self._inputs_waiting = np.zeros(shape=(shards,), dtype=np.int32) def get(self): with self._lock: return self._inputs_waiting def increment(self, shard): with self._lock: self._inputs_waiting[shard] += 1 def decrement(self, shard): with self._lock: self._inputs_waiting[shard] -= 1 self._inputs_waiting = InputsWaiting(shards) #====================== # Tracing #====================== class RunMetaData(object): def __init__(self): self._rm = [] self._lock = threading.Lock() def clear(self): with self._lock: self._rm = [] def save(self, run_metadata): with self._lock: self._rm.append(run_metadata) def write(self, tag=None): with self._lock: if len(self._rm) == 0: return if tag is None: tag='0' metadata = tf.RunMetadata() [metadata.MergeFrom(m) for m in self._rm] tl = timeline.Timeline(metadata.step_stats) trace_filename = 'compute_timeline_%d.json' % tag with open(trace_filename, 'w') as f: f.write(tl.generate_chrome_trace_format()) f.write('\n') #============================ # Wrap tensorflow Session.run #============================ self._should_trace = False self._run_metadata = RunMetaData() def _tfrunner(session, should_trace=False): trace_level = (tf.RunOptions.FULL_TRACE if should_trace else tf.RunOptions.NO_TRACE) options = tf.RunOptions(trace_level=trace_level) def _runner(*args, **kwargs): return session.run(*args, options=options, **kwargs) def _meta_runner(*args, **kwargs): try: run_metadata = tf.RunMetadata() return session.run(*args, options=options, run_metadata=run_metadata, **kwargs) finally: self._run_metadata.save(run_metadata) return _meta_runner if should_trace else _runner self._tfrun = _tfrunner(self._tf_session, self._should_trace) self._iterations = 0
783,068
Sets a descendant node as the outgroup of a tree. This function can be used to root a tree or even an internal node. Parameters: ----------- outgroup: a node instance within the same tree structure that will be used as a basal node.
def set_outgroup(self, outgroup): outgroup = _translate_nodes(self, outgroup) if self == outgroup: ##return ## why raise an error for this? raise TreeError("Cannot set myself as outgroup") parent_outgroup = outgroup.up # Detects (sub)tree root n = outgroup while n.up is not self: n = n.up # If outgroup is a child from root, but with more than one # sister nodes, creates a new node to group them self.children.remove(n) if len(self.children) != 1: down_branch_connector = self.__class__() down_branch_connector.dist = 0.0 down_branch_connector.support = n.support for ch in self.get_children(): down_branch_connector.children.append(ch) ch.up = down_branch_connector self.children.remove(ch) else: down_branch_connector = self.children[0] # Connects down branch to myself or to outgroup quien_va_ser_padre = parent_outgroup if quien_va_ser_padre is not self: # Parent-child swapping quien_va_ser_hijo = quien_va_ser_padre.up quien_fue_padre = None buffered_dist = quien_va_ser_padre.dist buffered_support = quien_va_ser_padre.support while quien_va_ser_hijo is not self: quien_va_ser_padre.children.append(quien_va_ser_hijo) quien_va_ser_hijo.children.remove(quien_va_ser_padre) buffered_dist2 = quien_va_ser_hijo.dist buffered_support2 = quien_va_ser_hijo.support quien_va_ser_hijo.dist = buffered_dist quien_va_ser_hijo.support = buffered_support buffered_dist = buffered_dist2 buffered_support = buffered_support2 quien_va_ser_padre.up = quien_fue_padre quien_fue_padre = quien_va_ser_padre quien_va_ser_padre = quien_va_ser_hijo quien_va_ser_hijo = quien_va_ser_padre.up quien_va_ser_padre.children.append(down_branch_connector) down_branch_connector.up = quien_va_ser_padre quien_va_ser_padre.up = quien_fue_padre down_branch_connector.dist += buffered_dist outgroup2 = parent_outgroup parent_outgroup.children.remove(outgroup) outgroup2.dist = 0 else: outgroup2 = down_branch_connector outgroup.up = self outgroup2.up = self # outgroup is always the first children. Some function my # trust on this fact, so do no change this. self.children = [outgroup,outgroup2] middist = (outgroup2.dist + outgroup.dist)/2 outgroup.dist = middist outgroup2.dist = middist outgroup2.support = outgroup.support
783,868
Returns a string containing an ascii drawing of the tree. Parameters: ----------- show_internal: include internal edge names. compact: use exactly one line per tip. attributes: A list of node attributes to shown in the ASCII representation.
def get_ascii(self, show_internal=True, compact=False, attributes=None): (lines, mid) = self._asciiArt(show_internal=show_internal, compact=compact, attributes=attributes) return '\n'+'\n'.join(lines)
783,871
Returns a dictionary pointing to the preloaded content of each internal node under this tree. Such a dictionary is intended to work as a cache for operations that require many traversal operations. Parameters: ----------- store_attr: Specifies the node attribute that should be cached (i.e. name, distance, etc.). When none, the whole node instance is cached. _store: (internal use)
def get_cached_content(self, store_attr=None, container_type=set, _store=None): if _store is None: _store = {} for ch in self.children: ch.get_cached_content(store_attr=store_attr, container_type=container_type, _store=_store) if self.children: val = container_type() for ch in self.children: if type(val) == list: val.extend(_store[ch]) if type(val) == set: val.update(_store[ch]) _store[self] = val else: if store_attr is None: val = self else: val = getattr(self, store_attr) _store[self] = container_type([val]) return _store
783,874
Draw a slice of x*y trees into a x,y grid non-overlapping. Parameters: ----------- x (int): Number of grid cells in x dimension. Default=automatically set. y (int): Number of grid cells in y dimension. Default=automatically set. start (int): Starting index of tree slice from .treelist. kwargs (dict): Toytree .draw() arguments as a dictionary.
def draw_tree_grid(self, nrows=None, ncols=None, start=0, fixed_order=False, shared_axis=False, **kwargs): # return nothing if tree is empty if not self.treelist: print("Treelist is empty") return None, None # make a copy of the treelist so we don't modify the original if not fixed_order: treelist = self.copy().treelist else: if fixed_order is True: fixed_order = self.treelist[0].get_tip_labels() treelist = [ ToyTree(i, fixed_order=fixed_order) for i in self.copy().treelist ] # apply kwargs styles to the individual tree styles for tree in treelist: tree.style.update(kwargs) # get reasonable values for x,y given treelist length if not (ncols or nrows): ncols = 5 nrows = 1 elif not (ncols and nrows): if ncols: if ncols == 1: if self.ntrees <= 5: nrows = self.ntrees else: nrows = 2 else: if self.ntrees <= 10: nrows = 2 else: nrows = 3 if nrows: if nrows == 1: if self.ntrees <= 5: ncols = self.ntrees else: ncols = 5 else: if self.ntrees <= 10: ncols = 5 else: ncols = 3 else: pass # Return TereGrid object for debugging draw = TreeGrid(treelist) if kwargs.get("debug"): return draw # Call update to draw plot. Kwargs still here for width, height, axes canvas, axes = draw.update(nrows, ncols, start, shared_axis, **kwargs) return canvas, axes
783,966
Returns a copy of the tree with the selected tips removed. The entered value can be a name or list of names. To prune on an internal node to create a subtree see the .prune() function instead. Parameters: tips: list of tip names. # example: ptre = tre.drop_tips(['a', 'b'])
def drop_tips(self, names=None, wildcard=None, regex=None): # make a deepcopy of the tree nself = self.copy() # return if nothing to drop if not any([names, wildcard, regex]): return nself # get matching names list with fuzzy match tipnames = fuzzy_match_tipnames( ttree=nself, names=names, wildcard=wildcard, regex=regex, mrca=False, mono=False, ) if len(tipnames) == len(nself): raise ToytreeError("You cannot drop all tips from the tree.") if not tipnames: raise ToytreeError("No tips selected.") keeptips = [i for i in nself.get_tip_labels() if i not in tipnames] nself.treenode.prune(keeptips, preserve_branch_length=True) nself._coords.update() return nself
784,021
Create a TransitionList object from a 'transitions' Workflow attribute. Args: tdef: list of transition definitions states (StateList): already parsed state definitions. prev (TransitionList): transition definitions from a parent. Returns: TransitionList: the list of transitions defined in the 'tdef' argument.
def _setup_transitions(tdef, states, prev=()): trs = list(prev) for transition in tdef: if len(transition) == 3: (name, source, target) = transition if is_string(source) or isinstance(source, State): source = [source] source = [states[src] for src in source] target = states[target] tr = Transition(name, source, target) else: raise TypeError( "Elements of the 'transition' attribute of a " "workflow should be three-tuples; got %r instead." % (transition,) ) if any(prev_tr.name == tr.name for prev_tr in trs): # Replacing an existing state trs = [tr if prev_tr.name == tr.name else prev_tr for prev_tr in trs] else: trs.append(tr) return TransitionList(trs)
784,093
Create a TransitionList. Args: transitions (list of (name, source, target) tuple): the transitions to include.
def __init__(self, transitions): self._transitions = {} self._order = [] for trdef in transitions: self._transitions[trdef.name] = trdef self._order.append(trdef.name)
784,103
Whether this hook applies to the given transition/state. Args: transition (Transition): the transition to check from_state (State or None): the state to check. If absent, the check is 'might this hook apply to the related transition, given a valid source state'.
def applies_to(self, transition, from_state=None): if '*' in self.names: return True elif self.kind in (HOOK_BEFORE, HOOK_AFTER, HOOK_CHECK): return self._match_transition(transition) elif self.kind == HOOK_ON_ENTER: return self._match_state(transition.target) elif from_state is None: # Testing whether the hook may apply to at least one source of the # transition return any(self._match_state(src) for src in transition.source) else: return self._match_state(from_state)
784,109
Import previously defined implementations. Args: parent_implems (ImplementationList): List of implementations defined in a parent class.
def load_parent_implems(self, parent_implems): for trname, attr, implem in parent_implems.get_custom_implementations(): self.implementations[trname] = implem.copy() self.transitions_at[trname] = attr self.custom_implems.add(trname)
784,130
Add an implementation. Args: transition (Transition): the transition for which the implementation is added attribute (str): the name of the attribute where the implementation will be available function (callable): the actual implementation function **kwargs: extra arguments for the related ImplementationProperty.
def add_implem(self, transition, attribute, function, **kwargs): implem = ImplementationProperty( field_name=self.state_field, transition=transition, workflow=self.workflow, implementation=function, **kwargs) self.implementations[transition.name] = implem self.transitions_at[transition.name] = attribute return implem
784,131
Log a transition. Args: transition (Transition): the name of the performed transition from_state (State): the source state instance (object): the modified object Kwargs: Any passed when calling the transition
def log_transition(self, transition, from_state, instance, *args, **kwargs): logger = logging.getLogger('xworkflows.transitions') try: instance_repr = u(repr(instance), 'ignore') except (UnicodeEncodeError, UnicodeDecodeError): instance_repr = u("<bad repr>") logger.info( u("%s performed transition %s.%s (%s -> %s)"), instance_repr, self.__class__.__name__, transition.name, from_state.name, transition.target.name)
784,142
Iterates over (valid) attributes of a class. Args: cls (object): the class to iterate over Yields: (str, obj) tuples: the class-level attributes.
def iterclass(cls): for field in dir(cls): if hasattr(cls, field): value = getattr(cls, field) yield field, value
784,393
Calling the Player Stats API Args: player_key: Key of the player board_key: key of the board Return: json data
def get_player_stats(self, player_key, board_key): player_stats_url = self.api_path + 'player/' + player_key + '/league/' + board_key + '/stats/' response = self.get_response(player_stats_url) return response
784,726
Returns a list of all the airports For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc Args: country (str): The country for which the airports will be fetched Example:: from pyflightdata import FlightData f=FlightData() f.get_airports('India')
def get_airports(self, country): url = AIRPORT_BASE.format(country.replace(" ", "-")) return self._fr24.get_airports_data(url)
785,390
Simple method that decodes a given metar string. Args: metar (str): The metar data Returns: The metar data in readable format Example:: from pyflightdata import FlightData f=FlightData() f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')
def decode_metar(self, metar): try: from metar import Metar except: return "Unable to parse metars. Please install parser from https://github.com/tomp/python-metar." m = Metar.Metar(metar) return m.string()
785,402
Convert compiled .ui file from PySide2 to Qt.py Arguments: lines (list): Each line of of .ui file Usage: >> with open("myui.py") as f: .. lines = convert(f.readlines())
def convert(lines): def parse(line): line = line.replace("from PySide2 import", "from Qt import") line = line.replace("QtWidgets.QApplication.translate", "Qt.QtCompat.translate") return line parsed = list() for line in lines: line = parse(line) parsed.append(line) return parsed
786,458
Copies a POSIX timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) self._timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self.is_local_time = False
786,503
Copies a POSIX timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', 0) timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) timestamp *= definitions.MILLISECONDS_PER_SECOND if microseconds: milliseconds, _ = divmod( microseconds, definitions.MILLISECONDS_PER_SECOND) timestamp += milliseconds self._timestamp = timestamp self.is_local_time = False
786,506
Initializes a POSIX timestamp in nanoseconds. Args: timestamp (Optional[int]): POSIX timestamp in nanoseconds.
def __init__(self, timestamp=None): super(PosixTimeInNanoseconds, self).__init__() self._precision = definitions.PRECISION_1_NANOSECOND self._timestamp = timestamp
786,508
Copies a POSIX timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
def _CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', None) timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) timestamp *= definitions.NANOSECONDS_PER_SECOND if microseconds: nanoseconds = microseconds * definitions.MILLISECONDS_PER_SECOND timestamp += nanoseconds self._normalized_timestamp = None self._timestamp = timestamp
786,510
Initializes a Delphi TDateTime timestamp. Args: timestamp (Optional[float]): Delphi TDateTime timestamp.
def __init__(self, timestamp=None): super(DelphiDateTime, self).__init__() self._precision = definitions.PRECISION_1_MILLISECOND self._timestamp = timestamp
786,512
Copies a Delphi TDateTime timestamp from a string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. Raises: ValueError: if the time string is invalid or not supported.
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', None) if year > 9999: raise ValueError('Unsupported year value: {0:d}.'.format(year)) timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) timestamp = float(timestamp) / definitions.SECONDS_PER_DAY timestamp += self._DELPHI_TO_POSIX_BASE if microseconds is not None: timestamp += float(microseconds) / definitions.MICROSECONDS_PER_DAY self._normalized_timestamp = None self._timestamp = timestamp self.is_local_time = False
786,514
Initializes a date time epoch. Args: year (int): year that is the start of the epoch e.g. 1970. month (int): month that is the start of the epoch, where 1 represents January. day_of_month (int): day of the month that is the start of the epoch, where 1 represents the first day.
def __init__(self, year, month, day_of_month): super(DateTimeEpoch, self).__init__() self.day_of_month = day_of_month self.month = month self.year = year
786,528
Determines if the date time values are equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are equal to other.
def __eq__(self, other): if not isinstance(other, DateTimeValues): return False normalized_timestamp = self._GetNormalizedTimestamp() other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access if normalized_timestamp is None and other_normalized_timestamp is not None: return False if normalized_timestamp is not None and other_normalized_timestamp is None: return False return normalized_timestamp == other_normalized_timestamp
786,530
Determines if the date time values are greater than or equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are greater than or equal to other. Raises: ValueError: if other is not an instance of DateTimeValues.
def __ge__(self, other): if not isinstance(other, DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') normalized_timestamp = self._GetNormalizedTimestamp() other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access if normalized_timestamp is None: return other_normalized_timestamp is None if other_normalized_timestamp is None: return True return normalized_timestamp >= other_normalized_timestamp
786,531
Determines if the date time values are greater than other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are greater than other. Raises: ValueError: if other is not an instance of DateTimeValues.
def __gt__(self, other): if not isinstance(other, DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') normalized_timestamp = self._GetNormalizedTimestamp() other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access if normalized_timestamp is None: return False if other_normalized_timestamp is None: return True return normalized_timestamp > other_normalized_timestamp
786,532
Determines if the date time values are less than other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are less than other. Raises: ValueError: if other is not an instance of DateTimeValues.
def __lt__(self, other): if not isinstance(other, DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') normalized_timestamp = self._GetNormalizedTimestamp() other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access if normalized_timestamp is None: return other_normalized_timestamp is not None if other_normalized_timestamp is None: return False return normalized_timestamp < other_normalized_timestamp
786,533
Determines if the date time values are not equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are not equal to other.
def __ne__(self, other): if not isinstance(other, DateTimeValues): return True normalized_timestamp = self._GetNormalizedTimestamp() other_normalized_timestamp = other._GetNormalizedTimestamp() # pylint: disable=protected-access if normalized_timestamp is None and other_normalized_timestamp is not None: return True if normalized_timestamp is not None and other_normalized_timestamp is None: return True return normalized_timestamp != other_normalized_timestamp
786,534
Copies a date from a string. Args: date_string (str): date value formatted as: YYYY-MM-DD Returns: tuple[int, int, int]: year, month, day of month. Raises: ValueError: if the date string is invalid or not supported.
def _CopyDateFromString(self, date_string): date_string_length = len(date_string) # The date string should at least contain 'YYYY-MM-DD'. if date_string_length < 10: raise ValueError('Date string too short.') if date_string[4] != '-' or date_string[7] != '-': raise ValueError('Invalid date string.') try: year = int(date_string[0:4], 10) except ValueError: raise ValueError('Unable to parse year.') try: month = int(date_string[5:7], 10) except ValueError: raise ValueError('Unable to parse month.') try: day_of_month = int(date_string[8:10], 10) except ValueError: raise ValueError('Unable to parse day of month.') days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') return year, month, day_of_month
786,536
Determines date values. Args: number_of_days (int): number of days since epoch. date_time_epoch (DateTimeEpoch): date and time of the epoch. Returns: tuple[int, int, int]: year, month, day of month.
def _GetDateValuesWithEpoch(self, number_of_days, date_time_epoch): return self._GetDateValues( number_of_days, date_time_epoch.year, date_time_epoch.month, date_time_epoch.day_of_month)
786,539
Retrieves the day of the year for a specific day of a month in a year. Args: year (int): year e.g. 1970. month (int): month, where 1 represents January. day_of_month (int): day of the month, where 1 represents the first day. Returns: int: day of year. Raises: ValueError: if the month or day of month value is out of bounds.
def _GetDayOfYear(self, year, month, day_of_month): if month not in range(1, 13): raise ValueError('Month value out of bounds.') days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') day_of_year = day_of_month for past_month in range(1, month): day_of_year += self._GetDaysPerMonth(year, past_month) return day_of_year
786,540
Retrieves the number of days in a month of a specific year. Args: year (int): year e.g. 1970. month (int): month, where 1 represents January. Returns: int: number of days in the month. Raises: ValueError: if the month value is out of bounds.
def _GetDaysPerMonth(self, year, month): if month not in range(1, 13): raise ValueError('Month value out of bounds.') days_per_month = self._DAYS_PER_MONTH[month - 1] if month == 2 and self._IsLeapYear(year): days_per_month += 1 return days_per_month
786,541
Retrieves the number of days in a century. Args: year (int): year in the century e.g. 1970. Returns: int: number of (remaining) days in the century. Raises: ValueError: if the year value is out of bounds.
def _GetNumberOfDaysInCentury(self, year): if year < 0: raise ValueError('Year value out of bounds.') year, _ = divmod(year, 100) if self._IsLeapYear(year): return 36525 return 36524
786,542
Determines time values. Args: number_of_seconds (int|decimal.Decimal): number of seconds. Returns: tuple[int, int, int, int]: days, hours, minutes, seconds.
def _GetTimeValues(self, number_of_seconds): number_of_seconds = int(number_of_seconds) number_of_minutes, seconds = divmod(number_of_seconds, 60) number_of_hours, minutes = divmod(number_of_minutes, 60) number_of_days, hours = divmod(number_of_hours, 24) return number_of_days, hours, minutes, seconds
786,544
Copies a fake timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._microseconds = date_time_values.get('microseconds', None) self.is_local_time = False
786,553
Initializes a RFC2579 date-time. Args: rfc2579_date_time_tuple: (Optional[tuple[int, int, int, int, int, int, int]]): RFC2579 date-time time, contains year, month, day of month, hours, minutes, seconds and deciseconds. Raises: ValueError: if the system time is invalid.
def __init__(self, rfc2579_date_time_tuple=None): super(RFC2579DateTime, self).__init__() self._number_of_seconds = None self._precision = definitions.PRECISION_100_MILLISECONDS self.day_of_month = None self.hours = None self.deciseconds = None self.minutes = None self.month = None self.seconds = None self.year = None if rfc2579_date_time_tuple: if len(rfc2579_date_time_tuple) < 10: raise ValueError( 'Invalid RFC2579 date-time tuple 10 elements required.') if rfc2579_date_time_tuple[0] < 0 or rfc2579_date_time_tuple[0] > 65536: raise ValueError('Year value out of bounds.') if rfc2579_date_time_tuple[1] not in range(1, 13): raise ValueError('Month value out of bounds.') days_per_month = self._GetDaysPerMonth( rfc2579_date_time_tuple[0], rfc2579_date_time_tuple[1]) if (rfc2579_date_time_tuple[2] < 1 or rfc2579_date_time_tuple[2] > days_per_month): raise ValueError('Day of month value out of bounds.') if rfc2579_date_time_tuple[3] not in range(0, 24): raise ValueError('Hours value out of bounds.') if rfc2579_date_time_tuple[4] not in range(0, 60): raise ValueError('Minutes value out of bounds.') # TODO: support a leap second? if rfc2579_date_time_tuple[5] not in range(0, 60): raise ValueError('Seconds value out of bounds.') if rfc2579_date_time_tuple[6] < 0 or rfc2579_date_time_tuple[6] > 9: raise ValueError('Deciseconds value out of bounds.') if rfc2579_date_time_tuple[7] not in ('+', '-'): raise ValueError('Direction from UTC value out of bounds.') if rfc2579_date_time_tuple[8] not in range(0, 14): raise ValueError('Hours from UTC value out of bounds.') if rfc2579_date_time_tuple[9] not in range(0, 60): raise ValueError('Minutes from UTC value out of bounds.') time_zone_offset = ( (rfc2579_date_time_tuple[8] * 60) + rfc2579_date_time_tuple[9]) # Note that when the sign of the time zone offset is negative # the difference needs to be added. We do so by flipping the sign. if rfc2579_date_time_tuple[7] != '-': time_zone_offset = -time_zone_offset self.year, self.month, self.day_of_month, self.hours, self.minutes = ( self._AdjustForTimeZoneOffset( rfc2579_date_time_tuple[0], rfc2579_date_time_tuple[1], rfc2579_date_time_tuple[2], rfc2579_date_time_tuple[3], rfc2579_date_time_tuple[4], time_zone_offset)) self.deciseconds = rfc2579_date_time_tuple[6] self.seconds = rfc2579_date_time_tuple[5] self._number_of_seconds = self._GetNumberOfSecondsFromElements( self.year, self.month, self.day_of_month, self.hours, self.minutes, self.seconds)
786,555
Initializes a semantic time. Args: string (str): semantic representation of the time, such as: "Never", "Not set".
def __init__(self, string=None): super(SemanticTime, self).__init__() self._string = string
786,565
Determines if the date time values are equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are equal to other.
def __eq__(self, other): if not isinstance(other, SemanticTime): return False return self._SORT_ORDER == other._SORT_ORDER
786,566
Determines if the date time values are greater than or equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are greater than or equal to other. Raises: ValueError: if other is not an instance of DateTimeValues.
def __ge__(self, other): if not isinstance(other, interface.DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') if not isinstance(other, SemanticTime): return False return self._SORT_ORDER >= other._SORT_ORDER
786,567
Determines if the date time values are less than other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are less than other. Raises: ValueError: if other is not an instance of DateTimeValues.
def __lt__(self, other): if not isinstance(other, interface.DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') if not isinstance(other, SemanticTime): return True return self._SORT_ORDER < other._SORT_ORDER
786,568
Determines if the date time values are not equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are not equal to other.
def __ne__(self, other): if not isinstance(other, SemanticTime): return True return self._SORT_ORDER != other._SORT_ORDER
786,569
Determines if the date time values are greater than other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are greater than other. Raises: ValueError: if other is not an instance of DateTimeValues.
def __gt__(self, other): if not isinstance(other, interface.DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') return not isinstance(other, Never)
786,570
Determines if the date time values are less than or equal to other. Args: other (DateTimeValues): date time values to compare against. Returns: bool: True if the date time values are greater than or equal to other. Raises: ValueError: if other is not an instance of DateTimeValues.
def __le__(self, other): if not isinstance(other, interface.DateTimeValues): raise ValueError('Other not an instance of DateTimeValues') return isinstance(other, Never)
786,571
Initializes a FAT date time. Args: fat_date_time (Optional[int]): FAT date time.
def __init__(self, fat_date_time=None): number_of_seconds = None if fat_date_time is not None: number_of_seconds = self._GetNumberOfSeconds(fat_date_time) super(FATDateTime, self).__init__() self._precision = definitions.PRECISION_2_SECONDS self._number_of_seconds = number_of_seconds
786,577