code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def vector_norm(data, axis=None, out=None): data = np.array(data, dtype=np.float64, copy=True) if out is None: if data.ndim == 1: return math.sqrt(np.dot(data, data)) data *= data out = np.atleast_1d(np.sum(data, axis=axis)) np.sqrt(out, out) return out else: data *= data np.sum(data, axis=axis, out=out) np.sqrt(out, out)
Return length, i.e. Euclidean norm, of ndarray along axis. >>> v = np.random.random(3) >>> n = vector_norm(v) >>> np.allclose(n, np.linalg.norm(v)) True >>> v = np.random.rand(6, 5, 3) >>> n = vector_norm(v, axis=-1) >>> np.allclose(n, np.sqrt(np.sum(v*v, axis=2))) True >>> n = vector_norm(v, axis=1) >>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1))) True >>> v = np.random.rand(5, 4, 3) >>> n = np.empty((5, 3)) >>> vector_norm(v, axis=1, out=n) >>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1))) True >>> vector_norm([]) 0.0 >>> vector_norm([1]) 1.0
def hdc_disk_interface(self, hdc_disk_interface): self._hdc_disk_interface = hdc_disk_interface log.info('QEMU VM "{name}" [{id}] has set the QEMU hdc disk interface to {interface}'.format(name=self._name, id=self._id, interface=self._hdc_disk_interface))
Sets the hdc disk interface for this QEMU VM. :param hdc_disk_interface: QEMU hdc disk interface
def switchport(self, **kwargs): int_type = kwargs.pop('int_type').lower() name = kwargs.pop('name') enabled = kwargs.pop('enabled', True) callback = kwargs.pop('callback', self._callback) int_types = ['gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet', 'port_channel', 'vlan'] if int_type not in int_types: raise ValueError("`int_type` must be one of: %s" % repr(int_types)) if not pynos.utilities.valid_interface(int_type, name): raise ValueError('`name` must be in the format of x/y/z for ' 'physical interfaces or x for port channel.') switchport_args = dict(name=name) switchport = getattr(self._interface, 'interface_%s_switchport_basic_basic' % int_type) config = switchport(**switchport_args) if not enabled: config.find('.//*switchport-basic').set('operation', 'delete') if kwargs.pop('get', False): return callback(config, handler='get_config') else: return callback(config)
Set interface switchport status. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, etc) name (str): Name of interface. (1/0/5, 1/0/10, etc) enabled (bool): Is the interface enabled? (True, False) get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `int_type` or `name` is not specified. ValueError: if `name` or `int_type` is not a valid value. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.switchport(name='225/0/19', ... int_type='tengigabitethernet') ... output = dev.interface.switchport(name='225/0/19', ... int_type='tengigabitethernet', enabled=False) ... dev.interface.switchport() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
def get_true_false_both(query_params, field_name, default): valid = ('true', 'false', 'both') value = query_params.get(field_name, default).lower() if value in valid: return value v = ', '.join(sorted(valid)) raise serializers.ValidationError({ field_name: ['Must be one of [%s]' % v], })
Tries to get and return a valid of true, false, or both from the field name in the query string, raises a ValidationError for invalid values.
def installStatsLoop(statsFile, statsDelay): def dumpStats(): scales.dumpStatsTo(statsFile) reactor.callLater(statsDelay, dumpStats) def startStats(): reactor.callLater(statsDelay, dumpStats) reactor.callWhenRunning(startStats)
Installs an interval loop that dumps stats to a file.
def add_customers(self, service_desk_id, list_of_usernames): url = 'rest/servicedeskapi/servicedesk/{}/customer'.format(service_desk_id) data = {'usernames': list_of_usernames} return self.post(url, headers=self.experimental_headers, data=data)
Adds one or more existing customers to the given service desk. If you need to create a customer, see Create customer method. Administer project permission is required, or agents if public signups and invites are enabled for the Service Desk project. :param service_desk_id: str :param list_of_usernames: list :return: the customers added to the service desk
def allRnaQuantifications(self): for dataset in self.getDatasets(): for rnaQuantificationSet in dataset.getRnaQuantificationSets(): for rnaQuantification in \ rnaQuantificationSet.getRnaQuantifications(): yield rnaQuantification
Return an iterator over all rna quantifications
def wait_for_job(self, job, interval=5, timeout=60): complete = False job_id = str(job if isinstance(job, (six.binary_type, six.text_type, int)) else job['jobReference']['jobId']) job_resource = None start_time = time() elapsed_time = 0 while not (complete or elapsed_time > timeout): sleep(interval) request = self.bigquery.jobs().get(projectId=self.project_id, jobId=job_id) job_resource = request.execute(num_retries=self.num_retries) self._raise_executing_exception_if_error(job_resource) complete = job_resource.get('status').get('state') == u'DONE' elapsed_time = time() - start_time if not complete: logger.error('BigQuery job %s timeout' % job_id) raise BigQueryTimeoutException() return job_resource
Waits until the job indicated by job_resource is done or has failed Parameters ---------- job : Union[dict, str] ``dict`` representing a BigQuery job resource, or a ``str`` representing the BigQuery job id interval : float, optional Polling interval in seconds, default = 5 timeout : float, optional Timeout in seconds, default = 60 Returns ------- dict Final state of the job resouce, as described here: https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#get Raises ------ Union[JobExecutingException, BigQueryTimeoutException] On http/auth failures or timeout
def avail(search=None, verbose=False): ret = {} cmd = 'imgadm avail -j' res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = _exit_status(retcode) return ret for image in salt.utils.json.loads(res['stdout']): if image['manifest']['disabled'] or not image['manifest']['public']: continue if search and search not in image['manifest']['name']: continue uuid = image['manifest']['uuid'] data = _parse_image_meta(image, verbose) if data: ret[uuid] = data return ret
Return a list of available images search : string search keyword verbose : boolean (False) toggle verbose output CLI Example: .. code-block:: bash salt '*' imgadm.avail [percona] salt '*' imgadm.avail verbose=True
def get_cached_token(self): token_info = None if self.cache_path: try: f = open(self.cache_path) token_info_string = f.read() f.close() token_info = json.loads(token_info_string) if self.is_token_expired(token_info): token_info = self.refresh_access_token(token_info['refresh_token']) except IOError: pass return token_info
Gets a cached auth token
def verify(self, dataset, publication_date, source, refernce_url): path = '/api/1.0/meta/verifydataset' req = definition.DatasetVerifyRequest(dataset, publication_date, source, refernce_url) result = self._api_post(definition.DatasetVerifyResponse, path, req) if result.status == 'failed': ver_err = '\r\n'.join(result.errors) msg = 'Dataset has not been verified, because of the following error(s): {}'.format(ver_err) raise ValueError(msg)
The method is verifying dataset by it's id
def spin_z_op(param, oper): slaves = param['slaves'] oper['Sz'] = np.array([spin_z(slaves, spin) for spin in range(slaves)]) oper['Sz+1/2'] = oper['Sz'] + 0.5*np.eye(2**slaves) oper['sumSz2'] = oper['Sz'].sum(axis=0)**2 Sz_mat_shape = oper['Sz'].reshape(param['orbitals'], 2, 2**slaves, 2**slaves) oper['sumSz-sp2'] = (Sz_mat_shape.sum(axis=1)**2).sum(axis=0) oper['sumSz-or2'] = (Sz_mat_shape.sum(axis=0)**2).sum(axis=0)
Generates the required Sz operators, given the system parameter setup and the operator dictionary
def history(self): if self.changeset is None: raise NodeError('Unable to get changeset for this FileNode') return self.changeset.get_file_history(self.path)
Returns a list of changeset for this file in which the file was changed
def add_url(self, post_data): img_desc = post_data['desc'] img_path = post_data['file1'] cur_uid = tools.get_uudd(4) while MEntity.get_by_uid(cur_uid): cur_uid = tools.get_uudd(4) MEntity.create_entity(cur_uid, img_path, img_desc, kind=post_data['kind'] if 'kind' in post_data else '3') kwd = { 'kind': post_data['kind'] if 'kind' in post_data else '3', } self.render('misc/entity/entity_view.html', filename=img_path, cfg=config.CMS_CFG, kwd=kwd, userinfo=self.userinfo)
Adding the URL as entity.
def GetMethodConfig(self, method): method_config = self._method_configs.get(method) if method_config: return method_config func = getattr(self, method, None) if func is None: raise KeyError(method) method_config = getattr(func, 'method_config', None) if method_config is None: raise KeyError(method) self._method_configs[method] = config = method_config() return config
Returns service cached method config for given method.
def _backward_delete_char(text, pos): if pos == 0: return text, pos return text[:pos - 1] + text[pos:], pos - 1
Delete the character behind pos.
def _frank_help(alpha, tau): def debye(t): return t / (np.exp(t) - 1) debye_value = integrate.quad(debye, EPSILON, alpha)[0] / alpha return 4 * (debye_value - 1) / alpha + 1 - tau
Compute first order debye function to estimate theta.
def recv(request_context=None, non_blocking=False): if non_blocking: result = uwsgi.websocket_recv_nb(request_context) else: result = uwsgi.websocket_recv(request_context) return result
Receives data from websocket. :param request_context: :param bool non_blocking: :rtype: bytes|str :raises IOError: If unable to receive a message.
def CreateApproval(self, reason=None, notified_users=None, email_cc_addresses=None): if not reason: raise ValueError("reason can't be empty") if not notified_users: raise ValueError("notified_users list can't be empty.") approval = user_pb2.ApiHuntApproval( reason=reason, notified_users=notified_users, email_cc_addresses=email_cc_addresses or []) args = user_pb2.ApiCreateHuntApprovalArgs( hunt_id=self.hunt_id, approval=approval) data = self._context.SendRequest("CreateHuntApproval", args) return HuntApproval( data=data, username=self._context.username, context=self._context)
Create a new approval for the current user to access this hunt.
def signature(cls): snake_scope = cls.options_scope.replace('-', '_') partial_construct_optionable = functools.partial(_construct_optionable, cls) partial_construct_optionable.__name__ = 'construct_scope_{}'.format(snake_scope) return dict( output_type=cls.optionable_cls, input_selectors=tuple(), func=partial_construct_optionable, input_gets=(Get.create_statically_for_rule_graph(ScopedOptions, Scope),), dependency_optionables=(cls.optionable_cls,), )
Returns kwargs to construct a `TaskRule` that will construct the target Optionable. TODO: This indirection avoids a cycle between this module and the `rules` module.
def findLabel(self, query, create=False): if isinstance(query, six.string_types): query = query.lower() for label in self._labels.values(): if (isinstance(query, six.string_types) and query == label.name.lower()) or \ (isinstance(query, Pattern) and query.search(label.name)): return label return self.createLabel(query) if create and isinstance(query, six.string_types) else None
Find a label with the given name. Args: name (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name. create (bool): Whether to create the label if it doesn't exist (only if name is a str). Returns: Union[gkeepapi.node.Label, None]: The label.
def _in_list(self, original_list, item): for item_list in original_list: if item is item_list: return True return False
Check that an item as contained in a list. :param original_list: The list. :type original_list: list(object) :param item: The item. :type item: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the item contained in the list or False if not. :rtype: bool
def _GetTempOutputFileHandles(self, value_type): try: return self.temp_output_trackers[value_type], False except KeyError: return self._CreateOutputFileHandles(value_type), True
Returns the tracker for a given value type.
def handle_new_user(self, provider, access, info): "Create a shell auth.User and redirect." user = self.get_or_create_user(provider, access, info) access.user = user AccountAccess.objects.filter(pk=access.pk).update(user=user) user = authenticate(provider=access.provider, identifier=access.identifier) login(self.request, user) return redirect(self.get_login_redirect(provider, user, access, True))
Create a shell auth.User and redirect.
def object_upload(self, bucket, key, content, content_type): args = {'uploadType': 'media', 'name': key} headers = {'Content-Type': content_type} url = Api._UPLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, '')) return google.datalab.utils.Http.request(url, args=args, data=content, headers=headers, credentials=self._credentials, raw_response=True)
Writes text content to the object. Args: bucket: the name of the bucket containing the object. key: the key of the object to be written. content: the text content to be written. content_type: the type of text content. Raises: Exception if the object could not be written to.
def on_click(self, event): button = event["button"] if button == self.button_pause: if self.state == "STOP": self.py3.command_run("mocp --play") else: self.py3.command_run("mocp --toggle-pause") elif button == self.button_stop: self.py3.command_run("mocp --stop") elif button == self.button_next: self.py3.command_run("mocp --next") elif button == self.button_previous: self.py3.command_run("mocp --prev") else: self.py3.prevent_refresh()
Control moc with mouse clicks.
def _process_binary_trigger(trigger_value, condition): ops = { 0: ">", 1: "<", 2: ">=", 3: "<=", 4: "==", 5: 'always' } sources = { 0: 'value', 1: 'count' } encoded_source = condition & 0b1 encoded_op = condition >> 1 oper = ops.get(encoded_op, None) source = sources.get(encoded_source, None) if oper is None: raise ArgumentError("Unknown operation in binary trigger", condition=condition, operation=encoded_op, known_ops=ops) if source is None: raise ArgumentError("Unknown value source in binary trigger", source=source, known_sources=sources) if oper == 'always': return TrueTrigger() return InputTrigger(source, oper, trigger_value)
Create an InputTrigger object.
def _days_in_month(date): if date.month == 12: reference = type(date)(date.year + 1, 1, 1) else: reference = type(date)(date.year, date.month + 1, 1) return (reference - timedelta(days=1)).day
The number of days in the month of the given date
def __run_post_all(self): for d in self.dirs: post_all_py_path = os.path.join(d, 'post-all.py') if os.path.isfile(post_all_py_path): print(' Applying post-all.py...', end=' ') self.__run_py_file(post_all_py_path, 'post-all') print('OK') post_all_sql_path = os.path.join(d, 'post-all.sql') if os.path.isfile(post_all_sql_path): print(' Applying post-all.sql...', end=' ') self.__run_sql_file(post_all_sql_path) print('OK')
Execute the post-all.py and post-all.sql files if they exist
def OnSpellCheckToggle(self, event): spelltoolid = self.main_window.main_toolbar.label2id["CheckSpelling"] self.main_window.main_toolbar.ToggleTool(spelltoolid, not config["check_spelling"]) config["check_spelling"] = repr(not config["check_spelling"]) self.main_window.grid.grid_renderer.cell_cache.clear() self.main_window.grid.ForceRefresh()
Spell checking toggle event handler
def install_dependencies(plugins_directory, ostream=sys.stdout): plugin_directories = plugins_directory.realpath().dirs() print >> ostream, 50 * '*' print >> ostream, 'Processing plugins:' print >> ostream, '\n'.join([' - {}'.format(p) for p in plugin_directories]) print >> ostream, '\n' + 50 * '-' + '\n' for plugin_dir_i in plugin_directories: try: on_plugin_install(plugin_dir_i, ostream=ostream) except RuntimeError, exception: print exception print >> ostream, '\n' + 50 * '-' + '\n'
Run ``on_plugin_install`` script for each plugin directory found in specified plugins directory. Parameters ---------- plugins_directory : str File system path to directory containing zero or more plugin subdirectories. ostream : file-like Output stream for status messages (default: ``sys.stdout``).
def get_term_by_year_and_quarter(year, quarter): url = "{}/{},{}.json".format( term_res_url_prefix, year, quarter.lower()) return _json_to_term_model(get_resource(url))
Returns a uw_sws.models.Term object, for the passed year and quarter.
def parse_net_kwargs(kwargs): if not kwargs: return kwargs resolved = {} for k, v in kwargs.items(): resolved[k] = _resolve_dotted_name(v) return resolved
Parse arguments for the estimator. Resolves dotted names and instantiated classes. Examples -------- >>> kwargs = {'lr': 0.1, 'module__nonlin': 'torch.nn.Hardtanh(-2, max_val=3)'} >>> parse_net_kwargs(kwargs) {'lr': 0.1, 'module__nonlin': Hardtanh(min_val=-2, max_val=3)}
def get_table_width(self): if self.column_count == 0: return 0 width = sum(self._column_widths) width += ((self._column_count - 1) * termwidth(self.column_separator_char)) width += termwidth(self.left_border_char) width += termwidth(self.right_border_char) return width
Get the width of the table as number of characters. Column width should be set prior to calling this method. Returns ------- int Width of the table as number of characters.
def resnet18(pretrained=False, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model
Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
def _check_for_batch_clashes(xs): names = set([x["description"] for x in xs]) dups = set([]) for x in xs: batches = tz.get_in(("metadata", "batch"), x) if batches: if not isinstance(batches, (list, tuple)): batches = [batches] for batch in batches: if batch in names: dups.add(batch) if len(dups) > 0: raise ValueError("Batch names must be unique from sample descriptions.\n" "Clashing batch names: %s" % sorted(list(dups)))
Check that batch names do not overlap with sample names.
def _sanitize_and_check(indexes): kinds = list({type(index) for index in indexes}) if list in kinds: if len(kinds) > 1: indexes = [Index(com.try_sort(x)) if not isinstance(x, Index) else x for x in indexes] kinds.remove(list) else: return indexes, 'list' if len(kinds) > 1 or Index not in kinds: return indexes, 'special' else: return indexes, 'array'
Verify the type of indexes and convert lists to Index. Cases: - [list, list, ...]: Return ([list, list, ...], 'list') - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...]) Lists are sorted and converted to Index. - [Index, Index, ...]: Return ([Index, Index, ...], TYPE) TYPE = 'special' if at least one special type, 'array' otherwise. Parameters ---------- indexes : list of Index or list objects Returns ------- sanitized_indexes : list of Index or list objects type : {'list', 'array', 'special'}
def use_federated_vault_view(self): self._vault_view = FEDERATED for session in self._get_provider_sessions(): try: session.use_federated_vault_view() except AttributeError: pass
Pass through to provider AuthorizationLookupSession.use_federated_vault_view
def on_demand_annotation(twitter_app_key, twitter_app_secret, user_twitter_id): twitter = login(twitter_app_key, twitter_app_secret) twitter_lists_list = twitter.get_list_memberships(user_id=user_twitter_id, count=1000) for twitter_list in twitter_lists_list: print(twitter_list) return twitter_lists_list
A service that leverages twitter lists for on-demand annotation of popular users. TODO: Do this.
def close(self): if not self.is_opened(): return self.__open_status = False if self.__reading_thread and self.__reading_thread.is_alive(): self.__reading_thread.abort() if self._input_report_queue: self._input_report_queue.release_events() if self.__input_processing_thread and \ self.__input_processing_thread.is_alive(): self.__input_processing_thread.abort() if self.ptr_preparsed_data: ptr_preparsed_data = self.ptr_preparsed_data self.ptr_preparsed_data = None hid_dll.HidD_FreePreparsedData(ptr_preparsed_data) if self.__reading_thread: self.__reading_thread.join() if self.hid_handle: winapi.CloseHandle(self.hid_handle) if self.__input_processing_thread: self.__input_processing_thread.join() button_caps_storage = self.__button_caps_storage self.__reset_vars() while button_caps_storage: item = button_caps_storage.pop() del item
Release system resources
def spy(self): spy = Spy(self) self._expectations.append(spy) return spy
Add a spy to this stub. Return the spy.
def conflicting_deps(tree): conflicting = defaultdict(list) for p, rs in tree.items(): for req in rs: if req.is_conflicting(): conflicting[p].append(req) return conflicting
Returns dependencies which are not present or conflict with the requirements of other packages. e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed :param tree: the requirements tree (dict) :returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage :rtype: dict
def _data_to_json(data): if type(data) not in [str, unicode]: data = json.dumps(data) return data
Convert to json if it isn't already a string. Args: data (str): data to convert to json
def unassign(self): if self.object_uuid is None and self.object_type is None: return True try: with db.session.begin_nested(): if self.is_redirected(): db.session.delete(Redirect.query.get(self.object_uuid)) self.status = PIDStatus.REGISTERED self.object_type = None self.object_uuid = None db.session.add(self) except SQLAlchemyError: logger.exception("Failed to unassign object.".format(self), extra=dict(pid=self)) raise logger.info("Unassigned object from {0}.".format(self), extra=dict(pid=self)) return True
Unassign the registered object. Note: Only registered PIDs can be redirected so we set it back to registered. :returns: `True` if the PID is successfully unassigned.
def check_for_missing_options(config): for section_name, section in config: for option_name, option in section: if option.required and option.value is None: raise exc.MissingRequiredOption( "Option {0} in namespace {1} is required.".format( option_name, section_name, ) ) return config
Iter over a config and raise if a required option is still not set. Args: config (confpy.core.config.Configuration): The configuration object to validate. Raises: MissingRequiredOption: If any required options are not set in the configuration object. Required options with default values are considered set and will not cause this function to raise.
def evaluate(self, x): r x = np.asanyarray(x) y = np.empty([self.Nf] + list(x.shape)) for i, kernel in enumerate(self._kernels): y[i] = kernel(x) return y
r"""Evaluate the kernels at given frequencies. Parameters ---------- x : array_like Graph frequencies at which to evaluate the filter. Returns ------- y : ndarray Frequency response of the filters. Shape ``(g.Nf, len(x))``. Examples -------- Frequency response of a low-pass filter: >>> import matplotlib.pyplot as plt >>> G = graphs.Logo() >>> G.compute_fourier_basis() >>> f = filters.Expwin(G) >>> G.compute_fourier_basis() >>> y = f.evaluate(G.e) >>> plt.plot(G.e, y[0]) # doctest: +ELLIPSIS [<matplotlib.lines.Line2D object at ...>]
def generate_folds(node_label_matrix, labelled_node_indices, number_of_categories, percentage, number_of_folds=10): number_of_labeled_nodes = labelled_node_indices.size training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100)) train_list = list() test_list = list() for trial in np.arange(number_of_folds): train, test = valid_train_test(node_label_matrix[labelled_node_indices, :], training_set_size, number_of_categories, trial) train = labelled_node_indices[train] test = labelled_node_indices[test] train_list.append(train) test_list.append(test) folds = ((train, test) for train, test in zip(train_list, test_list)) return folds
Form the seed nodes for training and testing. Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format. - labelled_node_indices: A NumPy array containing the labelled node indices. - number_of_categories: The number of categories/classes in the learning. - percentage: The percentage of labelled samples that will be used for training. Output: - folds: A generator containing train/test set folds.
def to_json(self): result = super(ApiKey, self).to_json() result.update({ 'name': self.name, 'description': self.description, 'accessToken': self.access_token, 'environments': [e.to_json() for e in self.environments] }) return result
Returns the JSON representation of the API key.
def _make_info(self, name, stat_result, namespaces): info = { 'basic': { 'name': name, 'is_dir': stat.S_ISDIR(stat_result.st_mode) } } if 'details' in namespaces: info['details'] = self._make_details_from_stat(stat_result) if 'stat' in namespaces: info['stat'] = { k: getattr(stat_result, k) for k in dir(stat_result) if k.startswith('st_') } if 'access' in namespaces: info['access'] = self._make_access_from_stat(stat_result) return Info(info)
Create an `Info` object from a stat result.
def Open(self, filename, read_only=False): if self._connection: raise RuntimeError('Cannot open database already opened.') self.filename = filename self.read_only = read_only try: self._connection = sqlite3.connect(filename) except sqlite3.OperationalError: return False if not self._connection: return False self._cursor = self._connection.cursor() if not self._cursor: return False return True
Opens the database file. Args: filename (str): filename of the database. read_only (Optional[bool]): True if the database should be opened in read-only mode. Since sqlite3 does not support a real read-only mode we fake it by only permitting SELECT queries. Returns: bool: True if successful. Raises: RuntimeError: if the database is already opened.
def register_elastic_task(self, *args, **kwargs): kwargs["task_class"] = ElasticTask return self.register_task(*args, **kwargs)
Register an elastic task.
def yaml_tag_constructor(loader, tag, node): def _f(loader, tag, node): if tag == '!GetAtt': return node.value.split('.') elif type(node) == yaml.SequenceNode: return loader.construct_sequence(node) else: return node.value if tag == '!Ref': key = 'Ref' else: key = 'Fn::{}'.format(tag[1:]) return {key: _f(loader, tag, node)}
convert shorthand intrinsic function to full name
def get_id(date=None, project: str = 'sip', instance_id: int = None) -> str: if date is None: date = datetime.datetime.utcnow() if isinstance(date, datetime.datetime): date = date.strftime('%Y%m%d') if instance_id is None: instance_id = randint(0, 9999) return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)
Get a SBI Identifier. Args: date (str or datetime.datetime, optional): UTC date of the SBI project (str, optional ): Project Name instance_id (int, optional): SBI instance identifier Returns: str, Scheduling Block Instance (SBI) ID.
def strip_files(files, argv_max=(256 * 1024)): tostrip = [(fn, flipwritable(fn)) for fn in files] while tostrip: cmd = list(STRIPCMD) flips = [] pathlen = reduce(operator.add, [len(s) + 1 for s in cmd]) while pathlen < argv_max: if not tostrip: break added, flip = tostrip.pop() pathlen += len(added) + 1 cmd.append(added) flips.append((added, flip)) else: cmd.pop() tostrip.append(flips.pop()) os.spawnv(os.P_WAIT, cmd[0], cmd) for args in flips: flipwritable(*args)
Strip a list of files
def get_field_mapping(self, using=None, **kwargs): return self._get_connection(using).indices.get_field_mapping(index=self._name, **kwargs)
Retrieve mapping definition of a specific field. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_field_mapping`` unchanged.
def get_target_list(module, action_parameter=None): exec_output = exec_action(module, 'list', action_parameter=action_parameter) if not exec_output: return None target_list = [] if isinstance(exec_output, list): for item in exec_output: target_list.append(item.split(None, 1)[0]) return target_list return None
List available targets for the given module. module name of the module to be queried for its targets action_parameter additional params passed to the defined action .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' eselect.get_target_list kernel
def execute(self): cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as err: log.error("Cannot stop cluster `%s`: %s", cluster_name, err) return os.EX_NOINPUT if not self.params.yes: confirm_or_abort( "Do you want really want to stop cluster `{cluster_name}`?" .format(cluster_name=cluster_name), msg="Aborting upon user request.") print("Destroying cluster `%s` ..." % cluster_name) cluster.stop(force=self.params.force, wait=self.params.wait)
Stops the cluster if it's running.
def copy(self): fs = self.__class__.__new__(self.__class__) fs.__dict__ = self.__dict__.copy() fs._frameSet = None if self._frameSet is not None: fs._frameSet = self._frameSet.copy() return fs
Create a deep copy of this sequence Returns: :obj:`.FileSequence`:
def parse_url(url): try: url = unicode(url) except NameError: url = url parsed = pystache.parse(url) variables = (element.key for element in parsed._parse_tree if isinstance(element, _EscapeNode)) return pystache.render(url, {variable: os.environ[variable] for variable in variables})
Parse the given url and update it with environment value if required. :param basestring url: :rtype: basestring :raise: KeyError if environment variable is needed but not found.
def html(self, data='', py=True): if py: value = self.to_html(data) else: value = data if self.static: return str('<span class="value">%s</span>' % safe_str(value)) else: if self.hidden: build = Hidden else: build = self.build self._get_http_attrs() return str(build(name=self.name, value=value, id=self.id, **self.html_attrs))
Convert data to html value format.
def gfdist(target, abcorr, obsrvr, relate, refval, adjust, step, nintvls, cnfine, result=None): assert isinstance(cnfine, stypes.SpiceCell) assert cnfine.is_double() if result is None: result = stypes.SPICEDOUBLE_CELL(2000) else: assert isinstance(result, stypes.SpiceCell) assert result.is_double() target = stypes.stringToCharP(target) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) relate = stypes.stringToCharP(relate) refval = ctypes.c_double(refval) adjust = ctypes.c_double(adjust) step = ctypes.c_double(step) nintvls = ctypes.c_int(nintvls) libspice.gfdist_c(target, abcorr, obsrvr, relate, refval, adjust, step, nintvls, ctypes.byref(cnfine), ctypes.byref(result)) return result
Return the time window over which a specified constraint on observer-target distance is met. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfdist_c.html :param target: Name of the target body. :type target: str :param abcorr: Aberration correction flag. :type abcorr: str :param obsrvr: Name of the observing body. :type obsrvr: str :param relate: Relational operator. :type relate: str :param refval: Reference value. :type refval: float :param adjust: Adjustment value for absolute extrema searches. :type adjust: float :param step: Step size used for locating extrema and roots. :type step: float :param nintvls: Workspace window interval count. :type nintvls: int :param cnfine: SPICE window to which the search is confined. :type cnfine: spiceypy.utils.support_types.SpiceCell :param result: Optional SPICE window containing results. :type result: spiceypy.utils.support_types.SpiceCell
def expires_on(self): timestamp = self._info.get('expirationTime', None) if timestamp is None: return None return _parser.Parser.parse_timestamp(timestamp)
The timestamp for when the table will expire, or None if unknown.
def DetectGce(): metadata_url = 'http://{}'.format( os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal')) try: o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( urllib_request.Request( metadata_url, headers={'Metadata-Flavor': 'Google'})) except urllib_error.URLError: return False return (o.getcode() == http_client.OK and o.headers.get('metadata-flavor') == 'Google')
Determine whether or not we're running on GCE. This is based on: https://cloud.google.com/compute/docs/metadata#runninggce Returns: True iff we're running on a GCE instance.
def aggregate_returns(returns, convert_to): def cumulate_returns(x): return cum_returns(x).iloc[-1] if convert_to == WEEKLY: grouping = [lambda x: x.year, lambda x: x.isocalendar()[1]] elif convert_to == MONTHLY: grouping = [lambda x: x.year, lambda x: x.month] elif convert_to == YEARLY: grouping = [lambda x: x.year] else: raise ValueError( 'convert_to must be {}, {} or {}'.format(WEEKLY, MONTHLY, YEARLY) ) return returns.groupby(grouping).apply(cumulate_returns)
Aggregates returns by week, month, or year. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. convert_to : str Can be 'weekly', 'monthly', or 'yearly'. Returns ------- aggregated_returns : pd.Series
def parse(self): while True: status, self._buffer, packet = Packet.parse_msg(self._buffer) if status == PARSE_RESULT.INCOMPLETE: return status if status == PARSE_RESULT.OK and packet: packet.received = datetime.datetime.now() if isinstance(packet, UTETeachInPacket) and self.teach_in: response_packet = packet.create_response_packet(self.base_id) self.logger.info('Sending response to UTE teach-in.') self.send(response_packet) if self.__callback is None: self.receive.put(packet) else: self.__callback(packet) self.logger.debug(packet)
Parses messages and puts them to receive queue
def inject_dll(self): self.logger.info('Injecting DLL') injector_dir = os.path.join(get_dll_directory(), 'RLBot_Injector.exe') for file in ['RLBot_Injector.exe', 'RLBot_Core.dll', 'RLBot_Core_Interface.dll', 'RLBot_Core_Interface_32.dll']: file_path = os.path.join(get_dll_directory(), file) if not os.path.isfile(file_path): raise FileNotFoundError(f'{file} was not found in {get_dll_directory()}. ' 'Please check that the file exists and your antivirus ' 'is not removing it. See https://github.com/RLBot/RLBot/wiki/Antivirus-Notes') incode = subprocess.call([injector_dir, 'hidden']) injector_codes = ['INJECTION_SUCCESSFUL', 'INJECTION_FAILED', 'MULTIPLE_ROCKET_LEAGUE_PROCESSES_FOUND', 'RLBOT_DLL_ALREADY_INJECTED', 'RLBOT_DLL_NOT_FOUND', 'MULTIPLE_RLBOT_DLL_FILES_FOUND'] injector_valid_codes = ['INJECTION_SUCCESSFUL', 'RLBOT_DLL_ALREADY_INJECTED'] injection_status = injector_codes[incode] if injection_status in injector_valid_codes: self.logger.info('Finished Injecting DLL') return injection_status else: self.logger.error('Failed to inject DLL: ' + injection_status) sys.exit()
Calling this function will inject the DLL without GUI DLL will return status codes from 0 to 5 which correspond to injector_codes DLL injection is only valid if codes are 0->'INJECTION_SUCCESSFUL' or 3->'RLBOT_DLL_ALREADY_INJECTED' It will print the output code and if it's not valid it will kill runner.py If RL isn't running the Injector will stay hidden waiting for RL to open and inject as soon as it does
def get_selected_text_metrics(self): selected_text = self.get_selected_text() if not selected_text: return tuple() return (selected_text, self.get_cursor_line(), self.get_cursor_column() - len(selected_text))
Returns current document selected text metrics. :return: Selected text metrics. :rtype: tuple
def dependency_list(self): r dtree = self.dependency_graph() cycles = list(nx.simple_cycles(dtree)) if cycles: raise Exception('Cyclic dependency found: ' + ' -> '.join( cycles[0] + [cycles[0][0]])) d = nx.algorithms.dag.lexicographical_topological_sort(dtree, sorted) return list(d)
r''' Returns a list of dependencies in the order with which they should be called to ensure data is calculated by one model before it's asked for by another. Notes ----- This raises an exception if the graph has cycles which means the dependencies are unresolvable (i.e. there is no order which the models can be called that will work). In this case it is possible to visually inspect the graph using ``dependency_graph``. See Also -------- dependency_graph dependency_map
def splitterfields(data, commdct): objkey = "Connector:Splitter".upper() fieldlists = splittermixerfieldlists(data, commdct, objkey) return extractfields(data, commdct, objkey, fieldlists)
get splitter fields to diagram it
def preallocate_memory(self, capacity): if capacity < 0: raise ValueError(u"The capacity value cannot be negative") if self.__samples is None: self.log(u"Not initialized") self.__samples = numpy.zeros(capacity) self.__samples_length = 0 else: self.log([u"Previous sample length was (samples): %d", self.__samples_length]) self.log([u"Previous sample capacity was (samples): %d", self.__samples_capacity]) self.__samples = numpy.resize(self.__samples, capacity) self.__samples_length = min(self.__samples_length, capacity) self.__samples_capacity = capacity self.log([u"Current sample capacity is (samples): %d", self.__samples_capacity])
Preallocate memory to store audio samples, to avoid repeated new allocations and copies while performing several consecutive append operations. If ``self.__samples`` is not initialized, it will become an array of ``capacity`` zeros. If ``capacity`` is larger than the current capacity, the current ``self.__samples`` will be extended with zeros. If ``capacity`` is smaller than the current capacity, the first ``capacity`` values of ``self.__samples`` will be retained. :param int capacity: the new capacity, in number of samples :raises: ValueError: if ``capacity`` is negative .. versionadded:: 1.5.0
def auto_display_limits(self): display_data_and_metadata = self.get_calculated_display_values(True).display_data_and_metadata data = display_data_and_metadata.data if display_data_and_metadata else None if data is not None: mn, mx = numpy.nanmin(data), numpy.nanmax(data) self.display_limits = mn, mx
Calculate best display limits and set them.
def get_length(self, y): lens = [self.find_pad_index(row) for row in y] return lens
Get true length of y. Args: y (list): padded list. Returns: lens: true length of y. Examples: >>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]] >>> self.get_length(y) [1, 2, 3]
def _replace_oov(original_vocab, line): return u" ".join( [word if word in original_vocab else u"UNK" for word in line.split()])
Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words.
def aa3_to_aa1(seq): if seq is None: return None return "".join(aa3_to_aa1_lut[aa3] for aa3 in [seq[i:i + 3] for i in range(0, len(seq), 3)])
convert string of 3-letter amino acids to 1-letter amino acids >>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu") 'CATSARELAME' >>> aa3_to_aa1(None)
def intersect(lst1, lst2): if isinstance(lst1, collections.Hashable) and isinstance(lst2, collections.Hashable): return set(lst1) & set(lst2) return unique([ele for ele in lst1 if ele in lst2])
Returns the intersection of two lists. .. code-block:: jinja {% my_list = [1,2,3,4] -%} {{ set my_list | intersect([2, 4, 6]) }} will be rendered as: .. code-block:: text [2, 4]
def append(self, data): self.io.write(data) if not self.monitors: return buf = str(self) for item in self.monitors: regex_list, callback, bytepos, limit = item bytepos = max(bytepos, len(buf) - limit) for i, regex in enumerate(regex_list): match = regex.search(buf, bytepos) if match is not None: item[2] = match.end() callback(i, match)
Appends the given data to the buffer, and triggers all connected monitors, if any of them match the buffer content. :type data: str :param data: The data that is appended.
def inner_join(df, other, **kwargs): left_on, right_on, suffixes = get_join_parameters(kwargs) joined = df.merge(other, how='inner', left_on=left_on, right_on=right_on, suffixes=suffixes) return joined
Joins on values present in both DataFrames. Args: df (pandas.DataFrame): Left DataFrame (passed in via pipe) other (pandas.DataFrame): Right DataFrame Kwargs: by (str or list): Columns to join on. If a single string, will join on that column. If a list of lists which contain strings or integers, the right/left columns to join on. suffixes (list): String suffixes to append to column names in left and right DataFrames. Example: a >> inner_join(b, by='x1') x1 x2 x3 0 A 1 True 1 B 2 False
def _get_id(self, player): name_tag = player('td[data-stat="player"] a') name = re.sub(r'.*/players/./', '', str(name_tag)) return re.sub(r'\.shtml.*', '', name)
Parse the player ID. Given a PyQuery object representing a single player on the team roster, parse the player ID and return it as a string. Parameters ---------- player : PyQuery object A PyQuery object representing the player information from the roster table. Returns ------- string Returns a string of the player ID.
def rewire_inputs(data_list): if len(data_list) < 2: return data_list mapped_ids = {bundle['original'].id: bundle['copy'].id for bundle in data_list} for bundle in data_list: updated = False copy = bundle['copy'] for field_schema, fields in iterate_fields(copy.input, copy.process.input_schema): name = field_schema['name'] value = fields[name] if field_schema['type'].startswith('data:') and value in mapped_ids: fields[name] = mapped_ids[value] updated = True elif field_schema['type'].startswith('list:data:') and any([id_ in mapped_ids for id_ in value]): fields[name] = [mapped_ids[id_] if id_ in mapped_ids else id_ for id_ in value] updated = True if updated: copy.save() return data_list
Rewire inputs of provided data objects. Input parameter is a list of original and copied data object model instances: ``[{'original': original, 'copy': copy}]``. This function finds which objects reference other objects (in the list) on the input and replaces original objects with the copies (mutates copies' inputs).
def remove_from(self, target, ctx=None): annotations_key = Annotation.__ANNOTATIONS_KEY__ try: local_annotations = get_local_property( target, annotations_key, ctx=ctx ) except TypeError: raise TypeError('target {0} must be hashable'.format(target)) if local_annotations is not None: if target in self.targets: self.targets.remove(target) while self in local_annotations: local_annotations.remove(self) if not local_annotations: del_properties(target, annotations_key)
Remove self annotation from target annotations. :param target: target from where remove self annotation. :param ctx: target ctx.
def save_sentences(twg, stmts, filename, agent_limit=300): sentences = [] unmapped_texts = [t[0] for t in twg] counter = 0 logger.info('Getting sentences for top %d unmapped agent texts.' % agent_limit) for text in unmapped_texts: agent_sentences = get_sentences_for_agent(text, stmts) sentences += map(lambda tup: (text,) + tup, agent_sentences) counter += 1 if counter >= agent_limit: break write_unicode_csv(filename, sentences, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n')
Write evidence sentences for stmts with ungrounded agents to csv file. Parameters ---------- twg: list of tuple list of tuples of ungrounded agent_texts with counts of the number of times they are mentioned in the list of statements. Should be sorted in descending order by the counts. This is of the form output by the function ungrounded texts. stmts: list of :py:class:`indra.statements.Statement` filename : str Path to output file agent_limit : Optional[int] Number of agents to include in output file. Takes the top agents by count.
def assign(self, V, py): if isinstance(py, (bytes, unicode)): for i,C in enumerate(V['value.choices'] or self._choices): if py==C: V['value.index'] = i return V['value.index'] = py
Store python value in Value
def get_stored_metadata(self, temp_ver): with open(self._prefixed('%s.metadata' % temp_ver.name)) as f: return json.load(f)
Retrieves the metadata for the given template version from the store Args: temp_ver (TemplateVersion): template version to retrieve the metadata for Returns: dict: the metadata of the given template version
def run_action(self, event): action = self.get_action() if action is not None: try: return bool( action(event) ) except Exception: e = sys.exc_info()[1] msg = ("Breakpoint action callback %r" " raised an exception: %s") msg = msg % (action, traceback.format_exc(e)) warnings.warn(msg, BreakpointCallbackWarning) return False return True
Executes the breakpoint action callback, if any was set. @type event: L{Event} @param event: Debug event triggered by the breakpoint.
def reduce(self, func): return self.map(lambda x: (None, x)).reduceByKey(func, 1).map(lambda x: x[1])
Return a new DStream in which each RDD has a single element generated by reducing each RDD of this DStream.
def safe_values(self, value): string_val = "" if isinstance(value, datetime.date): try: string_val = value.strftime('{0}{1}{2}'.format( current_app.config['DATETIME']['DATE_FORMAT'], current_app.config['DATETIME']['SEPARATOR'], current_app.config['DATETIME']['TIME_FORMAT'])) except RuntimeError as error: string_val = value.strftime('%Y-%m-%d %H:%M:%S') elif isinstance(value, bytes): string_val = value.decode('utf-8') elif isinstance(value, decimal.Decimal): string_val = float(value) else: string_val = value return string_val
Parse non-string values that will not serialize
def data(self): if self._children: for child in self._children: self._action_data.setdefault('children', []).append(child.data) return self._action_data
Return File Occurrence data.
def keys_of_type_exist(self, *keys): keys_exist = [(key, key in self.keys(), expected_type) for key, expected_type in keys] return tuple(ContextItemInfo( key=k[0], key_in_context=k[1], expected_type=k[2], is_expected_type=isinstance(self[k[0]], k[2]) if k[1] else None, has_value=k[1] and not self[k[0]] is None ) for k in keys_exist)
Check if keys exist in context and if types are as expected. Args: *keys: *args for keys to check in context. Each arg is a tuple (str, type) Returns: Tuple of namedtuple ContextItemInfo, same order as *keys. ContextItemInfo(key, key_in_context, expected_type, is_expected_type) Remember if there is only one key in keys, the return assignment needs an extra comma to remind python that it's a tuple: # one a, = context.keys_of_type_exist('a') # > 1 a, b = context.keys_of_type_exist('a', 'b')
def getObjectList(IDs, date, pos): objList = [getObject(ID, date, pos) for ID in IDs] return ObjectList(objList)
Returns a list of objects.
def select(self, pyliste): if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") val = _fitz.Document_select(self, pyliste) self._reset_page_refs() self.initData() return val
Build sub-pdf with page numbers in 'list'.
def _next_non_masked_element(a, idx): try: next_idx = idx + a[idx:].mask.argmin() if ma.is_masked(a[next_idx]): return None, None else: return next_idx, a[next_idx] except (AttributeError, TypeError, IndexError): return idx, a[idx]
Return the next non masked element of a masked array. If an array is masked, return the next non-masked element (if the given index is masked). If no other unmasked points are after the given masked point, returns none. Parameters ---------- a : array-like 1-dimensional array of numeric values idx : integer index of requested element Returns ------- Index of next non-masked element and next non-masked element
def detach(self): self._client.post('{}/detach'.format(Volume.api_endpoint), model=self) return True
Detaches this Volume if it is attached
def delete(key, host=DEFAULT_HOST, port=DEFAULT_PORT, time=DEFAULT_TIME): if not isinstance(time, six.integer_types): raise SaltInvocationError('\'time\' must be an integer') conn = _connect(host, port) _check_stats(conn) return bool(conn.delete(key, time))
Delete a key from memcache server CLI Example: .. code-block:: bash salt '*' memcached.delete <key>
def lstlti(x, n, array): array = stypes.toIntVector(array) x = ctypes.c_int(x) n = ctypes.c_int(n) return libspice.lstlti_c(x, n, array)
Given a number x and an array of non-decreasing int, find the index of the largest array element less than x. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstlti_c.html :param x: Value to search against :type x: int :param n: Number elements in array :type n: int :param array: Array of possible lower bounds :type array: list :return: index of the last element of array that is less than x. :rtype: int
def emit(self, **kwargs): self._ensure_emit_kwargs(kwargs) for slot in self.slots: slot(**kwargs)
Emit signal by calling all connected slots. The arguments supplied have to match the signal definition. Args: kwargs: Keyword arguments to be passed to connected slots. Raises: :exc:`InvalidEmit`: If arguments don't match signal specification.
def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return
Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80
def sequence_type(seq): if isinstance(seq, coral.DNA): material = 'dna' elif isinstance(seq, coral.RNA): material = 'rna' elif isinstance(seq, coral.Peptide): material = 'peptide' else: raise ValueError('Input was not a recognized coral.sequence object.') return material
Validates a coral.sequence data type. :param sequence_in: input DNA sequence. :type sequence_in: any :returns: The material - 'dna', 'rna', or 'peptide'. :rtype: str :raises: ValueError
def ParseMultiple(self, result_dicts): for result_dict in result_dicts: yield rdf_client.HardwareInfo( serial_number=result_dict["IdentifyingNumber"], system_manufacturer=result_dict["Vendor"])
Parse the WMI output to get Identifying Number.
def _ReadTable(self, tables, file_object, table_offset): table_header = self._ReadTableHeader(file_object, table_offset) for record_offset in table_header.record_offsets: if record_offset == 0: continue record_offset += table_offset if table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO: self._ReadRecordSchemaInformation(tables, file_object, record_offset) elif table_header.record_type == ( self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES): self._ReadRecordSchemaIndexes(tables, file_object, record_offset) elif table_header.record_type == ( self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES): self._ReadRecordSchemaAttributes(tables, file_object, record_offset) else: self._ReadRecord( tables, file_object, record_offset, table_header.record_type)
Reads the table. Args: tables (dict[int, KeychainDatabaseTable]): tables per identifier. file_object (file): file-like object. table_offset (int): offset of the table relative to the start of the file. Raises: ParseError: if the table cannot be read.
def get_base(self, option): if option: if option.isupper(): if len(option) > 3: return getattr(settings, option), True elif len(option) == 3: return option, True raise ImproperlyConfigured("Invalid currency code found: %s" % option) for attr in ('CURRENCIES_BASE', 'SHOP_DEFAULT_CURRENCY'): try: return getattr(settings, attr), True except AttributeError: continue return 'USD', False
Parse the base command option. Can be supplied as a 3 character code or a settings variable name If base is not supplied, looks for settings CURRENCIES_BASE and SHOP_DEFAULT_CURRENCY