code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def update_dns_ha_resource_params(resources, resource_params, relation_id=None, crm_ocf='ocf:maas:dns'): _relation_data = {'resources': {}, 'resource_params': {}} update_hacluster_dns_ha(charm_name(), _relation_data, crm_ocf) resources.update(_relation_data['resources']) resource_params.update(_relation_data['resource_params']) relation_set(relation_id=relation_id, groups=_relation_data['groups'])
Configure DNS-HA resources based on provided configuration and update resource dictionaries for the HA relation. @param resources: Pointer to dictionary of resources. Usually instantiated in ha_joined(). @param resource_params: Pointer to dictionary of resource parameters. Usually instantiated in ha_joined() @param relation_id: Relation ID of the ha relation @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA
async def create(cls, block_device: BlockDevice, size: int): params = {} if isinstance(block_device, BlockDevice): params['system_id'] = block_device.node.system_id params['device_id'] = block_device.id else: raise TypeError( 'block_device must be a BlockDevice, not %s' % ( type(block_device).__name__)) if not size: raise ValueError("size must be provided and greater than zero.") params['size'] = size return cls._object(await cls._handler.create(**params))
Create a partition on a block device. :param block_device: BlockDevice to create the paritition on. :type block_device: `BlockDevice` :param size: The size of the partition in bytes. :type size: `int`
def createSomeItems(store, itemType, values, counter): for i in counter: itemType(store=store, **values)
Create some instances of a particular type in a store.
def user_info(self, kv): key, value = kv self.__user_info[key] = value
Sets user_info dict entry through a tuple.
def process_items(r, keys, timeout, limit=0, log_every=1000, wait=.1): limit = limit or float('inf') processed = 0 while processed < limit: ret = r.blpop(keys, timeout) if ret is None: time.sleep(wait) continue source, data = ret try: item = json.loads(data) except Exception: logger.exception("Failed to load item:\n%r", pprint.pformat(data)) continue try: name = item.get('name') or item.get('title') url = item.get('url') or item.get('link') logger.debug("[%s] Processing item: %s <%s>", source, name, url) except KeyError: logger.exception("[%s] Failed to process item:\n%r", source, pprint.pformat(item)) continue processed += 1 if processed % log_every == 0: logger.info("Processed %s items", processed)
Process items from a redis queue. Parameters ---------- r : Redis Redis connection instance. keys : list List of keys to read the items from. timeout: int Read timeout.
def is_str(string): if sys.version_info[:2] >= (3, 0): return isinstance(string, str) return isinstance(string, basestring)
Python 2 and 3 compatible string checker. Args: string (str | basestring): the string to check Returns: bool: True or False
def chunker(iterable, size=5, fill=''): for index in range(0, len(iterable) // size + 1): to_yield = iterable[index * size: (index + 1) * size] if len(to_yield) == 0: break if len(to_yield) < size: yield to_yield + [fill] * (size - len(to_yield)) else: yield to_yield
Chunk the iterable. Parameters ---------- iterable A list. size The size of the chunks. fill Fill value if the chunk is not of length 'size'. Yields ------- chunk A chunk of length 'size'. Examples ------- >>> l = list(range(6)) >>> chunks = list(chunker(l, size=4, fill='')) >>> chunks == [[0, 1, 2, 3], [4, 5, '', '']] True
def assert_keys_have_values(self, caller, *keys): for key in keys: self.assert_key_has_value(key, caller)
Check that keys list are all in context and all have values. Args: *keys: Will check each of these keys in context caller: string. Calling function name - just used for informational messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if *keys is None
def _find_gvcf_blocks(vcf_file, region, tmp_dir): region_file = os.path.join(tmp_dir, "cur_region.bed") with open(region_file, "w") as out_handle: chrom, coords = region.split(":") start, end = coords.split("-") out_handle.write("\t".join([chrom, start, end]) + "\n") final_file = os.path.join(tmp_dir, "split_regions.bed") cmd = "gvcf_regions.py {vcf_file} | bedtools intersect -a - -b {region_file} > {final_file}" do.run(cmd.format(**locals())) regions = [] with open(final_file) as in_handle: for line in in_handle: chrom, start, end = line.strip().split("\t") regions.append("%s:%s-%s" % (chrom, start, end)) return regions
Retrieve gVCF blocks within our current evaluation region. gvcfgenotyper does not support calling larger regions with individual coverage blocks, so we split our big region into potentially multiple.
def load_from_dict(self, conf_dict=None): self.set_to_default() self._update_dict(self._config, conf_dict) self._update_python_paths()
Load the configuration from a dictionary. Args: conf_dict (dict): Dictionary with the configuration.
def make_headers(context: TraceContext) -> Headers: headers = { TRACE_ID_HEADER: context.trace_id, SPAN_ID_HEADER: context.span_id, FLAGS_HEADER: '0', SAMPLED_ID_HEADER: '1' if context.sampled else '0', } if context.parent_id is not None: headers[PARENT_ID_HEADER] = context.parent_id return headers
Creates dict with zipkin headers from supplied trace context.
def validate(self): validate_marked_location(self.location) if not isinstance(self.optional, bool): raise TypeError(u'Expected bool optional, got: {} {}'.format( type(self.optional).__name__, self.optional))
Ensure that the Backtrack block is valid.
def grepPDF(self, path): with open(path, 'rb') as pdf_file_obj: match = set() text = '' pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj) pages = pdf_reader.numPages for page in range(pages): page_obj = pdf_reader.getPage(page) text += '\n' + page_obj.extractText() match.update(set(x.lower() for x in re.findall( self._keywords, text, re.IGNORECASE))) return match
Parse PDF files text content for keywords. Args: path: PDF file path. Returns: match: set of unique occurrences of every match.
def rename_file(self, old_path, new_path): self.log.debug("S3contents.GenericManager: Init rename of '%s' to '%s'", old_path, new_path) if self.file_exists(new_path) or self.dir_exists(new_path): self.already_exists(new_path) elif self.file_exists(old_path) or self.dir_exists(old_path): self.log.debug("S3contents.GenericManager: Actually renaming '%s' to '%s'", old_path, new_path) self.fs.mv(old_path, new_path) else: self.no_such_entity(old_path)
Rename a file or directory. NOTE: This method is unfortunately named on the base class. It actually moves a file or a directory.
def prepare_env(org): key_service = org.service(type='builtin:cobalt_secure_store', name='Keystore') wf_service = org.service(type='builtin:workflow_service', name='Workflow', parameters='{}') env = org.environment(name='default') env.clean() env.add_service(key_service) env.add_service(wf_service) env.add_policy( {"action": "provisionVms", "parameter": "publicKeyId", "value": key_service.regenerate()['id']}) access = { "provider": "aws-ec2", "usedEnvironments": [], "ec2SecurityGroup": "default", "providerCopy": "aws-ec2", "name": "test-provider", "jcloudsIdentity": KEY, "jcloudsCredential": SECRET_KEY, "jcloudsRegions": "us-east-1" } prov = org.provider(access) env.add_provider(prov) return org.organizationId
Example shows how to configure environment from scratch
def process_metadata(pkg_name, metadata_lines): tpip_pkg = dict( PkgName=pkg_name, PkgType='python package', PkgMgrURL='https://pypi.org/project/%s/' % pkg_name, ) for line in metadata_lines: get_package_info_from_line(tpip_pkg, line) if 'PkgAuthorEmail' in tpip_pkg: tpip_pkg['PkgOriginator'] = '%s <%s>' % ( tpip_pkg['PkgOriginator'], tpip_pkg.pop('PkgAuthorEmail') ) explicit_license = license_cleanup(tpip_pkg.get('PkgLicense')) license_candidates = tpip_pkg.pop('PkgLicenses', []) if explicit_license: tpip_pkg['PkgLicense'] = explicit_license else: tpip_pkg['PkgLicense'] = ' '.join(set(license_candidates)) return tpip_pkg
Create a dictionary containing the relevant fields. The following is an example of the generated dictionary: :Example: { 'name': 'six', 'version': '1.11.0', 'repository': 'pypi.python.org/pypi/six', 'licence': 'MIT', 'classifier': 'MIT License' } :param str pkg_name: name of the package :param metadata_lines: metadata resource as list of non-blank non-comment lines :returns: Dictionary of each of the fields :rtype: Dict[str, str]
def make_prototype_request(*args, **kwargs): if args and inspect.isclass(args[0]) and issubclass(args[0], Request): request_cls, arg_list = args[0], args[1:] return request_cls(*arg_list, **kwargs) if args and isinstance(args[0], Request): if args[1:] or kwargs: raise_args_err("can't interpret args") return args[0] return Request(*args, **kwargs)
Make a prototype Request for a Matcher.
def inverse(self): if self.scalar == 0.0: raise ZeroDivisionError('scaling operator not invertible for ' 'scalar==0') return ScalingOperator(self.domain, 1.0 / self.scalar)
Return the inverse operator. Examples -------- >>> r3 = odl.rn(3) >>> vec = r3.element([1, 2, 3]) >>> op = ScalingOperator(r3, 2.0) >>> inv = op.inverse >>> inv(op(vec)) == vec True >>> op(inv(vec)) == vec True
def check(self, user, provider, permission, **kwargs): try: social_user = self._get_social_user(user, provider) if not social_user: return False except SocialUserDoesNotExist: return False backend = self.get_backend(social_user, provider, context=kwargs) return backend.check(permission)
user - django User or UserSocialAuth instance provider - name of publisher provider permission - if backend maintains check permissions vk - binary mask in int format facebook - scope string
def get_param(self, name): value = c_uint64(0) windivert_dll.WinDivertGetParam(self._handle, name, byref(value)) return value.value
Get a WinDivert parameter. See pydivert.Param for the list of parameters. The remapped function is WinDivertGetParam:: BOOL WinDivertGetParam( __in HANDLE handle, __in WINDIVERT_PARAM param, __out UINT64 *pValue ); For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param :return: The parameter value.
def _get_handler_set(cls, request, fail_enum, header_proto=None): added = set() handlers = [] for controls in request.sorting: control_bytes = controls.SerializeToString() if control_bytes not in added: added.add(control_bytes) handlers.append( cls._ValueHandler(controls, fail_enum, header_proto)) return handlers
Goes through the list of ClientSortControls and returns a list of unique _ValueHandlers. Maintains order, but drops ClientSortControls that have already appeared to help prevent spamming.
def open_recruitment(self, n=1): logger.info("Multi recruitment running for {} participants".format(n)) recruitments = [] messages = {} remaining = n for recruiter, count in self.recruiters(n): if not count: break if recruiter.nickname in messages: result = recruiter.recruit(count) recruitments.extend(result) else: result = recruiter.open_recruitment(count) recruitments.extend(result["items"]) messages[recruiter.nickname] = result["message"] remaining -= count if remaining <= 0: break logger.info( ( "Multi-recruited {} out of {} participants, " "using {} recruiters." ).format(n - remaining, n, len(messages)) ) return {"items": recruitments, "message": "\n".join(messages.values())}
Return initial experiment URL list.
def load_tree(self): timeperiod = settings.settings['synergy_start_timeperiod'] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod) monthly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod) daily_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_DAILY, timeperiod) hourly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_HOURLY, timeperiod) self._build_tree_by_level(QUALIFIER_HOURLY, COLLECTION_JOB_HOURLY, since=hourly_timeperiod) self._build_tree_by_level(QUALIFIER_DAILY, COLLECTION_JOB_DAILY, since=daily_timeperiod) self._build_tree_by_level(QUALIFIER_MONTHLY, COLLECTION_JOB_MONTHLY, since=monthly_timeperiod) self._build_tree_by_level(QUALIFIER_YEARLY, COLLECTION_JOB_YEARLY, since=yearly_timeperiod)
method iterates thru all objects older than synergy_start_timeperiod parameter in job collections and loads them into this timetable
def mark_read(self): raise NotImplementedError( "The Kippt API does not yet support marking notifications as read." ) data = json.dumps({"action": "mark_seen"}) r = requests.post( "https://kippt.com/api/notifications", headers=self.kippt.header, data=data ) return (r.json())
Mark notifications as read. CURRENT UNSUPPORTED: https://github.com/kippt/api-documentation/blob/master/endpoints/notifications/POST_notifications.md
def remove_variants(self, variants): chroms = set([i.chrom for i in variants]) for chrom in chroms: if self.append_chromosome: chrom = 'chr%s' % chrom to_delete = [pos for pos in self.positions[chrom] if pos in variants] for pos in to_delete: del self.positions[chrom][pos]
Remove a list of variants from the positions we are scanning
def transliterate(table, text): if table == 'sr-Latn': return text.translate(SR_LATN_TABLE) elif table == 'az-Latn': return text.translate(AZ_LATN_TABLE) else: raise ValueError("Unknown transliteration table: {!r}".format(table))
Transliterate text according to one of the tables above. `table` chooses the table. It looks like a language code but comes from a very restricted set: - 'sr-Latn' means to convert Serbian, which may be in Cyrillic, into the Latin alphabet. - 'az-Latn' means the same for Azerbaijani Cyrillic to Latn.
def vm_info(name, quiet=False): data = query(quiet=True) return _find_vm(name, data, quiet)
Return the information on the named VM
def getatom(self, atomends=None): atomlist = [''] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return EMPTYSTRING.join(atomlist)
Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).
def configure_visual_directories(cls, driver_info): if cls.screenshots_directory is None: date = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S') folder_name = '%s_%s' % (date, driver_info) if driver_info else date folder_name = get_valid_filename(folder_name) cls.screenshots_directory = os.path.join(cls.output_directory, 'screenshots', folder_name) cls.screenshots_number = 1 cls.videos_directory = os.path.join(cls.output_directory, 'videos', folder_name) cls.logs_directory = os.path.join(cls.output_directory, 'logs', folder_name) cls.videos_number = 1 cls.visual_output_directory = os.path.join(cls.output_directory, 'visualtests', folder_name) cls.visual_number = 1
Configure screenshots, videos and visual directories :param driver_info: driver property value to name folders
def parse_line(self, line): line = line.lstrip() toks = shlex.split(line) cmd = toks[0] arg = line[len(cmd):] return cmd, [ arg, ]
Parser for the debugging shell. Treat everything after the first token as one literal entity. Whitespace characters between the first token and the next first non-whitespace character are preserved. For example, ' foo dicj didiw ' is parsed as ( 'foo', ' dicj didiw ' ) Returns: A tuple (cmd, args), where the args is a list that consists of one and only one string containing everything after the cmd as is.
def _InitializeGraph(self, os_name, artifact_list): dependencies = artifact_registry.REGISTRY.SearchDependencies( os_name, artifact_list) artifact_names, attribute_names = dependencies self._AddAttributeNodes(attribute_names) self._AddArtifactNodesAndEdges(artifact_names)
Creates the nodes and directed edges of the dependency graph. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names.
def set_as_data(self, as_data): self._as_data = as_data self.asDataChanged.emit(as_data)
Set if data type conversion
def make_cookie(name, load, seed, expire=0, domain="", path="", timestamp=""): cookie = SimpleCookie() if not timestamp: timestamp = str(int(time.mktime(time.gmtime()))) signature = cookie_signature(seed, load, timestamp) cookie[name] = "|".join([load, timestamp, signature]) if path: cookie[name]["path"] = path if domain: cookie[name]["domain"] = domain if expire: cookie[name]["expires"] = _expiration(expire, "%a, %d-%b-%Y %H:%M:%S GMT") return tuple(cookie.output().split(": ", 1))
Create and return a cookie :param name: Cookie name :param load: Cookie load :param seed: A seed for the HMAC function :param expire: Number of minutes before this cookie goes stale :param domain: The domain of the cookie :param path: The path specification for the cookie :return: A tuple to be added to headers
def plat_specific_errors(*errnames): missing_attr = set([None, ]) unique_nums = set(getattr(errno, k, None) for k in errnames) return list(unique_nums - missing_attr)
Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names.
def from_other(cls, item): if isinstance(item, Bitmath): return cls(bits=item.bits) else: raise ValueError("The provided items must be a valid bitmath class: %s" % str(item.__class__))
Factory function to return instances of `item` converted into a new instance of ``cls``. Because this is a class method, it may be called from any bitmath class object without the need to explicitly instantiate the class ahead of time. *Implicit Parameter:* * ``cls`` A bitmath class, implicitly set to the class of the instance object it is called on *User Supplied Parameter:* * ``item`` A :class:`bitmath.Bitmath` subclass instance *Example:* >>> import bitmath >>> kib = bitmath.KiB.from_other(bitmath.MiB(1)) >>> print kib KiB(1024.0)
def key( seq: Sequence, tooth: Callable[[Sequence], str] = ( lambda seq: str(random.SystemRandom().choice(seq)).strip() ), nteeth: int = 6, delimiter: str = ' ', ) -> str: return delimiter.join(tooth(seq) for _ in range(nteeth))
Concatenate strings generated by the tooth function.
def load_df(self, df): self.reset() df_dict = {} df_dict['mat'] = deepcopy(df) data_formats.df_to_dat(self, df_dict, define_cat_colors=True)
Load Pandas DataFrame.
def _infer_map(node, context): values = {} for name, value in node.items: if isinstance(name, nodes.DictUnpack): double_starred = helpers.safe_infer(value, context) if not double_starred: raise exceptions.InferenceError if not isinstance(double_starred, nodes.Dict): raise exceptions.InferenceError(node=node, context=context) unpack_items = _infer_map(double_starred, context) values = _update_with_replacement(values, unpack_items) else: key = helpers.safe_infer(name, context=context) value = helpers.safe_infer(value, context=context) if any(not elem for elem in (key, value)): raise exceptions.InferenceError(node=node, context=context) values = _update_with_replacement(values, {key: value}) return values
Infer all values based on Dict.items
def is_data_dependent(fmto, data): if callable(fmto.data_dependent): return fmto.data_dependent(data) return fmto.data_dependent
Check whether a formatoption is data dependent Parameters ---------- fmto: Formatoption The :class:`Formatoption` instance to check data: xarray.DataArray The data array to use if the :attr:`~Formatoption.data_dependent` attribute is a callable Returns ------- bool True, if the formatoption depends on the data
def get_woeid(lat, lon): yql = _YQL_WOEID.format(lat, lon) tmpData = _yql_query(yql) if tmpData is None: _LOGGER.error("No woid is received!") return None return tmpData.get("place", {}).get("woeid", None)
Ask Yahoo! who is the woeid from GPS position.
def mesh(**kwargs): obs_params = [] syn_params, constraints = mesh_syn(syn=False, **kwargs) obs_params += syn_params.to_list() obs_params += [SelectParameter(qualifier='include_times', value=kwargs.get('include_times', []), description='append to times from the following datasets/time standards', choices=['t0@system'])] obs_params += [SelectParameter(qualifier='columns', value=kwargs.get('columns', []), description='columns to expose within the mesh', choices=_mesh_columns)] return ParameterSet(obs_params), constraints
Create parameters for a new mesh dataset. Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_dataset` :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s
def has_ended(self): assessment_offered = self.get_assessment_offered() now = DateTime.utcnow() if self._my_map['completionTime'] is not None: return True elif assessment_offered.has_deadline() and assessment_offered.has_duration(): if self._my_map['actualStartTime'] is None: return now >= assessment_offered.get_deadline() else: return (now >= assessment_offered.get_deadline() and now >= self._my_map['actualStartTime'] + assessment_offered.get_duration()) elif assessment_offered.has_deadline(): return now >= assessment_offered.get_deadline() elif assessment_offered.has_duration() and self._my_map['actualStartTime'] is not None: return now >= self._my_map['actualStartTime'] + assessment_offered.get_duration() else: return False
Tests if this assessment has ended. return: (boolean) - ``true`` if the assessment has ended, ``false`` otherwise *compliance: mandatory -- This method must be implemented.*
def add_distances(self, indices, periodic=True, indices2=None): r from .distances import DistanceFeature atom_pairs = _parse_pairwise_input( indices, indices2, self.logger, fname='add_distances()') atom_pairs = self._check_indices(atom_pairs) f = DistanceFeature(self.topology, atom_pairs, periodic=periodic) self.__add_feature(f)
r""" Adds the distances between atoms to the feature list. Parameters ---------- indices : can be of two types: ndarray((n, 2), dtype=int): n x 2 array with the pairs of atoms between which the distances shall be computed iterable of integers (either list or ndarray(n, dtype=int)): indices (not pairs of indices) of the atoms between which the distances shall be computed. periodic : optional, boolean, default is True If periodic is True and the trajectory contains unitcell information, distances will be computed under the minimum image convention. indices2: iterable of integers (either list or ndarray(n, dtype=int)), optional: Only has effect if :py:obj:`indices` is an iterable of integers. Instead of the above behaviour, only the distances between the atoms in :py:obj:`indices` and :py:obj:`indices2` will be computed. .. note:: When using the iterable of integers input, :py:obj:`indices` and :py:obj:`indices2` will be sorted numerically and made unique before converting them to a pairlist. Please look carefully at the output of :py:func:`describe()` to see what features exactly have been added.
def save_network_to_file(self, filename = "network0.pkl" ): import cPickle, os, re if filename == "network0.pkl": while os.path.exists( os.path.join(os.getcwd(), filename )): filename = re.sub('\d(?!\d)', lambda x: str(int(x.group(0)) + 1), filename) with open( filename , 'wb') as file: store_dict = { "n_inputs" : self.n_inputs, "layers" : self.layers, "n_weights" : self.n_weights, "weights" : self.weights, } cPickle.dump( store_dict, file, 2 )
This save method pickles the parameters of the current network into a binary file for persistant storage.
async def render_template_string(source: str, **context: Any) -> str: await current_app.update_template_context(context) template = current_app.jinja_env.from_string(source) return await _render(template, context)
Render the template source with the context given. Arguments: source: The template source code. context: The variables to pass to the template.
def _url_base64_encode(msg): msg_base64 = base64.b64encode(msg) msg_base64 = msg_base64.replace('+', '-') msg_base64 = msg_base64.replace('=', '_') msg_base64 = msg_base64.replace('/', '~') return msg_base64
Base64 encodes a string using the URL-safe characters specified by Amazon.
def get_source_event_declaration(self, event): return next((x.source_mapping for x in self.events if x.name == event))
Return the source mapping where the event is declared Args: event (str): event name Returns: (dict): sourceMapping
def solve(self, value, filter_): args, kwargs = filter_.get_args_and_kwargs() source = self.registry[value] return source.solve(value, filter_.name, args, kwargs)
Returns the value of an attribute of the value, or the result of a call to a function. Arguments --------- value : ? A value to solve in combination with the given filter. filter_ : dataql.resource.Filter An instance of ``Filter`` to solve with the given value. Returns ------- Depending on the source, the filter may ask for an attribute of the value, or for the result of a call to a standalone function taking the value as first argument. This method returns this attribute or result. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date, ['day', 'strftime']) >>> solver = FilterSolver(registry) >>> solver.solve(date(2015, 6, 1), Filter(name='day')) 1 >>> from dataql.resources import PosArg >>> solver.solve(date(2015, 6, 1), Filter(name='strftime', args=[PosArg('%F')])) '2015-06-01'
def _check_valid_data(self, data): if data.dtype.type is not np.uint8: raise ValueError( 'Illegal data type. Color images only support uint8 arrays') if len(data.shape) != 3 or data.shape[2] != 3: raise ValueError( 'Illegal data type. Color images only support three channels')
Checks that the given data is a uint8 array with one or three channels. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid.
def get_assignment_by_name(self, assignment_name, assignments=None): if assignments is None: assignments = self.get_assignments() for assignment in assignments: if assignment['name'] == assignment_name: return assignment['assignmentId'], assignment return None, None
Get assignment by name. Get an assignment by name. It works by retrieving all assignments and returning the first assignment with a matching name. If the optional parameter ``assignments`` is provided, it uses this collection rather than retrieving all assignments from the service. Args: assignment_name (str): name of assignment assignments (list): assignments to search, default: None When ``assignments`` is unspecified, all assignments are retrieved from the service. Raises: requests.RequestException: Exception connection error ValueError: Unable to decode response content Returns: tuple: tuple of assignment id and assignment dictionary .. code-block:: python ( 16708850, { u'assignmentId': 16708850, u'categoryId': 1293820, u'description': u'', u'dueDate': 1383541200000, u'dueDateString': u'11-04-2013', u'gradebookId': 1293808, u'graderVisible': False, u'gradingSchemeId': 16708851, u'gradingSchemeType': u'NUMERIC', u'isComposite': False, u'isHomework': False, u'maxPointsTotal': 100.0, u'name': u'midterm1', u'shortName': u'mid1', u'userDeleted': False, u'weight': 1.0 } )
def WriteUInt256(self, value): if type(value) is UInt256: value.Serialize(self) else: raise Exception("Cannot write value that is not UInt256")
Write a UInt256 type to the stream. Args: value (UInt256): Raises: Exception: when `value` is not of neocore.UInt256 type.
def get_magnitude_depth_distribution(self, magnitude_bins, depth_bins, normalisation=False, bootstrap=None): if len(self.data['depth']) == 0: raise ValueError('Depths missing in catalogue') if len(self.data['depthError']) == 0: self.data['depthError'] = np.zeros(self.get_number_events(), dtype=float) if len(self.data['sigmaMagnitude']) == 0: self.data['sigmaMagnitude'] = np.zeros(self.get_number_events(), dtype=float) return bootstrap_histogram_2D(self.data['magnitude'], self.data['depth'], magnitude_bins, depth_bins, boundaries=[(0., None), (None, None)], xsigma=self.data['sigmaMagnitude'], ysigma=self.data['depthError'], normalisation=normalisation, number_bootstraps=bootstrap)
Returns a 2-D magnitude-depth histogram for the catalogue :param numpy.ndarray magnitude_bins: Bin edges for the magnitudes :param numpy.ndarray depth_bins: Bin edges for the depths :param bool normalisation: Choose to normalise the results such that the total contributions sum to 1.0 (True) or not (False) :param int bootstrap: Number of bootstrap samples :returns: 2D histogram of events in magnitude-depth bins
def stopall(self, sudo=False, quiet=True): from spython.utils import run_command, check_install check_install() subgroup = 'instance.stop' if 'version 3' in self.version(): subgroup = ["instance", "stop"] cmd = self._init_command(subgroup) cmd = cmd + ['--all'] output = run_command(cmd, sudo=sudo, quiet=quiet) if output['return_code'] != 0: message = '%s : return code %s' %(output['message'], output['return_code']) bot.error(message) return output['return_code'] return output['return_code']
stop ALL instances. This command is only added to the command group as it doesn't make sense to call from a single instance Parameters ========== sudo: if the command should be done with sudo (exposes different set of instances)
def _create_array(self, format, args): builder = None if args is None or not args[0]: rest_format = self._create(format[1:], None)[1] element_type = format[:len(format) - len(rest_format)] builder = GLib.VariantBuilder.new(variant_type_from_string(element_type)) else: builder = GLib.VariantBuilder.new(variant_type_from_string('a*')) for i in range(len(args[0])): (v, rest_format, _) = self._create(format[1:], args[0][i:]) builder.add_value(v) if args is not None: args = args[1:] return (builder.end(), rest_format, args)
Handle the case where the outermost type of format is an array.
def add_output(self, key, value, variable_type): index = '{}-{}'.format(key, variable_type) self.output_data.setdefault(index, {}) if value is None: return if variable_type in ['String', 'Binary', 'KeyValue', 'TCEntity', 'TCEnhancedEntity']: self.output_data[index] = {'key': key, 'type': variable_type, 'value': value} elif variable_type in [ 'StringArray', 'BinaryArray', 'KeyValueArray', 'TCEntityArray', 'TCEnhancedEntityArray', ]: self.output_data[index].setdefault('key', key) self.output_data[index].setdefault('type', variable_type) if isinstance(value, list): self.output_data[index].setdefault('value', []).extend(value) else: self.output_data[index].setdefault('value', []).append(value)
Dynamically add output to output_data dictionary to be written to DB later. This method provides an alternative and more dynamic way to create output variables in an App. Instead of storing the output data manually and writing all at once the data can be stored inline, when it is generated and then written before the App completes. .. code-block:: python :linenos: :lineno-start: 1 for color in ['blue', 'red', 'yellow']: tcex.playbook.add_output('app.colors', color, 'StringArray') tcex.playbook.write_output() # writes the output stored in output_data .. code-block:: json :linenos: :lineno-start: 1 { "my_color-String": { "key": "my_color", "type": "String", "value": "blue" }, "my_numbers-String": { "key": "my_numbers", "type": "String", "value": "seven" }, "my_numbers-StringArray": { "key": "my_numbers", "type": "StringArray", "value": ["seven", "five"] } } Args: key (string): The variable name to write to storage. value (any): The value to write to storage. variable_type (string): The variable type being written.
def release(self, conn): self._in_use.remove(conn) if conn.reader.at_eof() or conn.reader.exception(): self._do_close(conn) else: self._pool.put_nowait(conn)
Releases connection back to the pool. :param conn: ``namedtuple`` (reader, writer)
def _prepare_request_file_vs_dir(self, request: Request) -> bool: if self._item_session.url_record.link_type: is_file = self._item_session.url_record.link_type == LinkType.file elif request.url_info.path.endswith('/'): is_file = False else: is_file = 'unknown' if is_file == 'unknown': files = yield from self._fetch_parent_path(request) if not files: return True filename = posixpath.basename(request.file_path) for file_entry in files: if file_entry.name == filename: _logger.debug('Found entry in parent. Type {}', file_entry.type) is_file = file_entry.type != 'dir' break else: _logger.debug('Did not find entry. Assume file.') return True if not is_file: request.url = append_slash_to_path_url(request.url_info) _logger.debug('Request URL changed to {}. Path={}.', request.url, request.file_path) return is_file
Check if file, modify request, and return whether is a file. Coroutine.
def confirm_email_with_link(self, link): user = self.first(email_link=link) if not user: return False elif user and user.email_confirmed: return True elif user and user.email_link_expired(): raise x.EmailLinkExpired('Link expired, generate a new one') user.confirm_email() db.session.add(user) db.session.commit() events.email_confirmed_event.send(user) return user
Confirm email with link A universal method to confirm email. used for both initial confirmation and when email is changed.
def flipped(self): forward, reverse = self.value return self.__class__((reverse, forward))
Return the flipped version of this direction.
def _generate_rsa_key(key_length): private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_length, backend=default_backend()) key_bytes = private_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ) return key_bytes, EncryptionKeyType.PRIVATE, KeyEncodingType.DER
Generate a new RSA private key. :param int key_length: Required key length in bits :returns: DER-encoded private key, private key identifier, and DER encoding identifier :rtype: tuple(bytes, :class:`EncryptionKeyType`, :class:`KeyEncodingType`)
def _instantiate_layers(self): with self._enter_variable_scope(check_same_graph=False): self._layers = [basic.Linear(self._output_sizes[i], name="linear_{}".format(i), initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, use_bias=self.use_bias) for i in xrange(self._num_layers)]
Instantiates all the linear modules used in the network. Layers are instantiated in the constructor, as opposed to the build function, because MLP implements the Transposable interface, and the transpose function can be called before the module is actually connected to the graph and build is called. Notice that this is safe since layers in the transposed module are instantiated using a lambda returning input_size of the mlp layers, and this doesn't have to return sensible values until the original module is connected to the graph.
def has_free_parameters(self): for component in self._components.values(): for par in component.shape.parameters.values(): if par.free: return True for par in self.position.parameters.values(): if par.free: return True return False
Returns True or False whether there is any parameter in this source :return:
def a_configuration_inconsistency(ctx): ctx.msg = "This SDR's running configuration is inconsistent with persistent configuration. " \ "No configuration commits for this SDR will be allowed until a 'clear configuration inconsistency' " \ "command is performed." ctx.device.chain.connection.emit_message("Configuration inconsistency.", log_level=logging.ERROR) ctx.finished = True raise ConfigurationErrors("Configuration inconsistency.")
Raise the configuration inconsistency error.
def all_nodes_that_receive(service, service_configuration=None, run_only=False, deploy_to_only=False): assert not (run_only and deploy_to_only) if service_configuration is None: service_configuration = read_services_configuration() runs_on = service_configuration[service]['runs_on'] deployed_to = service_configuration[service].get('deployed_to') if deployed_to is None: deployed_to = [] if run_only: result = runs_on elif deploy_to_only: result = deployed_to else: result = set(runs_on) | set(deployed_to) return list(sorted(result))
If run_only, returns only the services that are in the runs_on list. If deploy_to_only, returns only the services in the deployed_to list. If neither, both are returned, duplicates stripped. Results are always sorted.
def Clone(self): return AccountState(self.ScriptHash, self.IsFrozen, self.Votes, self.Balances)
Clone self. Returns: AccountState:
def _get_svc_list(name='*', status=None): return sorted([os.path.basename(el) for el in _get_svc_path(name, status)])
Return list of services that have the specified service ``status`` name a glob for service name. default is '*' status None : all services (no filter, default choice) 'DISABLED' : available service that is not enabled 'ENABLED' : enabled service (whether started on boot or not)
def wrongstatus(data, sb, msb, lsb): status = int(data[sb-1]) value = bin2int(data[msb-1:lsb]) if not status: if value != 0: return True return False
Check if the status bit and field bits are consistency. This Function is used for checking BDS code versions.
def to_fits(self, filename, wavelengths=None, **kwargs): w, y = self._get_arrays(wavelengths) kwargs['flux_col'] = 'Av/E(B-V)' kwargs['flux_unit'] = self._internal_flux_unit if 'pad_zero_ends' not in kwargs: kwargs['pad_zero_ends'] = False if 'trim_zero' not in kwargs: kwargs['trim_zero'] = False bkeys = {'tdisp1': 'G15.7', 'tdisp2': 'G15.7'} if 'expr' in self.meta: bkeys['expr'] = (self.meta['expr'], 'synphot expression') if 'ext_header' in kwargs: kwargs['ext_header'].update(bkeys) else: kwargs['ext_header'] = bkeys specio.write_fits_spec(filename, w, y, **kwargs)
Write the reddening law to a FITS file. :math:`R(V)` column is automatically named 'Av/E(B-V)'. Parameters ---------- filename : str Output filename. wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None` Wavelength values for sampling. If not a Quantity, assumed to be in Angstrom. If `None`, ``self.waveset`` is used. kwargs : dict Keywords accepted by :func:`~synphot.specio.write_fits_spec`.
def __create(self, opcode): tftpassert(opcode in self.classes, "Unsupported opcode: %d" % opcode) packet = self.classes[opcode]() return packet
This method returns the appropriate class object corresponding to the passed opcode.
def emit_children(self, node): return "".join([self.emit_node(child) for child in node.children])
Emit all the children of a node.
def process_frames(self): while len(self._frame_buffer): frame = self._frame_buffer.popleft() if self._emergency_close_pending: if (not isinstance(frame, MethodFrame) or frame.class_id != self.channel.CLASS_ID or frame.method_id not in (self.channel.CLOSE_METHOD_ID, self.channel.CLOSE_OK_METHOD_ID)): self.logger.warn("Emergency channel close: dropping input " "frame %.255s", frame) continue try: self.dispatch(frame) except ProtocolClass.FrameUnderflow: return except (ConnectionClosed, ChannelClosed): raise except Exception: self.logger.exception( "Closing on failed dispatch of frame %.255s", frame) self._emergency_close_pending = True try: raise finally: try: self.close(500, "Failed to dispatch %s" % (str(frame))) except Exception: self.logger.exception("Channel close failed") pass
Process the input buffer.
def launch_configuration_exists(name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) retries = 30 while True: try: lc = conn.get_all_launch_configurations(names=[name]) if lc: return True else: msg = 'The launch configuration does not exist in region {0}'.format(region) log.debug(msg) return False except boto.exception.BotoServerError as e: if retries and e.code == 'Throttling': log.debug('Throttled by AWS API, retrying in 5 seconds...') time.sleep(5) retries -= 1 continue log.error(e) return False
Check for a launch configuration's existence. CLI example:: salt myminion boto_asg.launch_configuration_exists mylc
def parse_uci(self, uci: str) -> Move: move = Move.from_uci(uci) if not move: return move move = self._to_chess960(move) move = self._from_chess960(self.chess960, move.from_square, move.to_square, move.promotion, move.drop) if not self.is_legal(move): raise ValueError("illegal uci: {!r} in {}".format(uci, self.fen())) return move
Parses the given move in UCI notation. Supports both Chess960 and standard UCI notation. The returned move is guaranteed to be either legal or a null move. :raises: :exc:`ValueError` if the move is invalid or illegal in the current position (but not a null move).
def sample_distinct(self, n_to_sample, **kwargs): n_notsampled = np.sum(np.isnan(self.cached_labels_)) if n_notsampled == 0: raise Exception("All distinct items have already been sampled.") if n_to_sample > n_notsampled: warnings.warn("Only {} distinct item(s) have not yet been sampled." " Setting n_to_sample = {}.".format(n_notsampled, \ n_notsampled)) n_to_sample = n_notsampled n_sampled = 0 while n_sampled < n_to_sample: self.sample(1,**kwargs) n_sampled += self._queried_oracle[self.t_ - 1]*1
Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations.
def get_all_items_of_offer(self, offer_id): return self._iterate_through_pages( get_function=self.get_items_of_offer_per_page, resource=OFFER_ITEMS, **{'offer_id': offer_id} )
Get all items of offer This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param offer_id: the offer id :return: list
def get_default_value_by_type(type_, state=None): if type_ in ['byte', 'char', 'short', 'int', 'boolean']: return BVS('default_value_{}'.format(type_), 32) elif type_ == "long": return BVS('default_value_{}'.format(type_), 64) elif type_ == 'float': return FPS('default_value_{}'.format(type_), FSORT_FLOAT) elif type_ == 'double': return FPS('default_value_{}'.format(type_), FSORT_DOUBLE) elif state is not None: if type_ == 'java.lang.String': return SimSootValue_StringRef.new_string(state, StringS('default_value_{}'.format(type_), 1000)) if type_.endswith('[][]'): raise NotImplementedError elif type_.endswith('[]'): array = SimSootExpr_NewArray.new_array(state, type_[:-2], BVV(2, 32)) return array else: return SimSootValue_ThisRef.new_object(state, type_, symbolic=True, init_object=False) else: return SootNullConstant()
Java specify defaults values for primitive and reference types. This method returns the default value for a given type. :param str type_: Name of type. :return: Default value for this type.
def _filter_from_dict(current: Dict[str, Any]) -> Dict[str, Any]: filter_ = dict() for k, v in current.items(): if isinstance(v, dict): for sub, v2 in _filter_from_dict(v).items(): filter_[f'{k}.{sub}'] = v2 else: filter_[k] = v return filter_
Takes in a nested dictionary as a filter and returns a flattened filter dictionary
def _path_hash(path, transform, kwargs): sortedargs = ["%s:%r:%s" % (key, value, type(value)) for key, value in sorted(iteritems(kwargs))] srcinfo = "{path}:{transform}:{{{kwargs}}}".format(path=os.path.abspath(path), transform=transform, kwargs=",".join(sortedargs)) return digest_string(srcinfo)
Generate a hash of source file path + transform + args
def _replace_variable_with_pattern(match): positional = match.group("positional") name = match.group("name") template = match.group("template") if name is not None: if not template: return _SINGLE_SEGMENT_PATTERN.format(name) elif template == "**": return _MULTI_SEGMENT_PATTERN.format(name) else: return _generate_pattern_for_template(template) elif positional == "*": return _SINGLE_SEGMENT_PATTERN elif positional == "**": return _MULTI_SEGMENT_PATTERN else: raise ValueError("Unknown template expression {}".format(match.group(0)))
Replace a variable match with a pattern that can be used to validate it. Args: match (re.Match): A regular expression match Returns: str: A regular expression pattern that can be used to validate the variable in an expanded path. Raises: ValueError: If an unexpected template expression is encountered.
def summarize(self, host): return dict( ok = self.ok.get(host, 0), failures = self.failures.get(host, 0), unreachable = self.dark.get(host,0), changed = self.changed.get(host, 0), skipped = self.skipped.get(host, 0) )
return information about a particular host
def __update_mouse(self, milliseconds): for button in self.gui_buttons: was_hovering = button.is_mouse_hovering button.update(milliseconds) if was_hovering == False and button.is_mouse_hovering: old_index = self.current_index self.current_index = self.gui_buttons.index(button) self.__handle_selections(old_index, self.current_index) elif Ragnarok.get_world().Mouse.is_clicked(self.mouse_select_button) and button.is_mouse_hovering: button.clicked_action()
Use the mouse to control selection of the buttons.
def evaluate(self, genomes, config): if self.mode != MODE_PRIMARY: raise ModeError("Not in primary mode!") tasks = [(genome_id, genome, config) for genome_id, genome in genomes] id2genome = {genome_id: genome for genome_id, genome in genomes} tasks = chunked(tasks, self.secondary_chunksize) n_tasks = len(tasks) for task in tasks: self.inqueue.put(task) tresults = [] while len(tresults) < n_tasks: try: sr = self.outqueue.get(block=True, timeout=0.2) except (queue.Empty, managers.RemoteError): continue tresults.append(sr) results = [] for sr in tresults: results += sr for genome_id, fitness in results: genome = id2genome[genome_id] genome.fitness = fitness
Evaluates the genomes. This method raises a ModeError if the DistributedEvaluator is not in primary mode.
def validate_row(self, row): clean_row = {} if isinstance(row, (tuple, list)): assert self.header_order, "No attribute order specified." assert len(row) == len(self.header_order), \ "Row length does not match header length." itr = zip(self.header_order, row) else: assert isinstance(row, dict) itr = iteritems(row) for el_name, el_value in itr: if self.header_types[el_name] == ATTR_TYPE_DISCRETE: clean_row[el_name] = int(el_value) elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS: clean_row[el_name] = float(el_value) else: clean_row[el_name] = el_value return clean_row
Ensure each element in the row matches the schema.
def sample(self, sampling_period, start=None, end=None, interpolate='previous'): start, end, mask = self._check_boundaries(start, end) sampling_period = \ self._check_regularization(start, end, sampling_period) result = [] current_time = start while current_time <= end: value = self.get(current_time, interpolate=interpolate) result.append((current_time, value)) current_time += sampling_period return result
Sampling at regular time periods.
def mock_chroot(self, release, cmd, **kwargs): return self.mock_cmd(release, '--chroot', cmd, **kwargs)
Run a commend in the mock container for a release
def is_intent_name(name): def can_handle_wrapper(handler_input): return (isinstance( handler_input.request_envelope.request, IntentRequest) and handler_input.request_envelope.request.intent.name == name) return can_handle_wrapper
A predicate function returning a boolean, when name matches the name in Intent Request. The function can be applied on a :py:class:`ask_sdk_core.handler_input.HandlerInput`, to check if the input is of :py:class:`ask_sdk_model.intent_request.IntentRequest` type and if the name of the request matches with the passed name. :param name: Name to be matched with the Intent Request Name :type name: str :return: Predicate function that can be used to check name of the request :rtype: Callable[[HandlerInput], bool]
def mail(ui, repo, *pats, **opts): if codereview_disabled: raise hg_util.Abort(codereview_disabled) cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc) if err != "": raise hg_util.Abort(err) cl.Upload(ui, repo, gofmt_just_warn=True) if not cl.reviewer: if not defaultcc: raise hg_util.Abort("no reviewers listed in CL") cl.cc = Sub(cl.cc, defaultcc) cl.reviewer = defaultcc cl.Flush(ui, repo) if cl.files == []: raise hg_util.Abort("no changed files, not sending mail") cl.Mail(ui, repo)
mail a change for review Uploads a patch to the code review server and then sends mail to the reviewer and CC list asking for a review.
def _make_cookie(self): return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC, os.getpid(), id(self), thread.get_ident())
Return a string encoding the ID of the process, instance and thread. This disambiguates legitimate wake-ups, accidental writes to the FD, and buggy internal FD sharing.
def buckets_insert(self, bucket, project_id=None): args = {'project': project_id if project_id else self._project_id} data = {'name': bucket} url = Api._ENDPOINT + (Api._BUCKET_PATH % '') return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
Issues a request to create a new bucket. Args: bucket: the name of the bucket. project_id: the project to use when inserting the bucket. Returns: A parsed bucket information dictionary. Raises: Exception if there is an error performing the operation.
def eigenvalues_(self): utils.validation.check_is_fitted(self, 's_') return np.square(self.s_).tolist()
The eigenvalues associated with each principal component.
def parse_duration_with_start(start, duration): elements = _parse_duration_string(_clean(duration)) year, month = _year_month_delta_from_elements(elements) end = start.replace( year=start.year + year, month=start.month + month ) del elements['years'] del elements['months'] end += _timedelta_from_elements(elements) return start, end - start
Attepmt to parse an ISO8601 formatted duration based on a start datetime. Accepts a ``duration`` and a start ``datetime``. ``duration`` must be an ISO8601 formatted string. Returns a ``datetime.timedelta`` object.
def sort_schemas(cls, schemas_list): return sorted(schemas_list, key=lambda x: ( x.priority, x.compiled.key_schema.priority if x.compiled_type == const.COMPILED_TYPE.MARKER else 0 ), reverse=True)
Sort the provided list of schemas according to their priority. This also supports markers, and markers of a single type are also sorted according to the priority of the wrapped schema. :type schemas_list: list[CompiledSchema] :rtype: list[CompiledSchema]
def _socket_close(self): callback = self.__callback self.__callback = None try: if callback: callback(None, InterfaceError('connection closed')) finally: self.__job_queue = [] self.__alive = False self.__pool.cache(self)
cleanup after the socket is closed by the other end
def write_branch_data(self, file): branch_sheet = self.book.add_sheet("Branches") for i, branch in enumerate(self.case.branches): for j, attr in enumerate(BRANCH_ATTRS): branch_sheet.write(i, j, getattr(branch, attr))
Writes branch data to an Excel spreadsheet.
def _combine_msd_quan(msd, quan): dim1 = msd.shape n_par, _, n_chains = dim1 ll = [] for i in range(n_chains): a1 = msd[:, :, i] a2 = quan[:, :, i] ll.append(np.column_stack([a1, a2])) msdquan = np.dstack(ll) return msdquan
Combine msd and quantiles in chain summary Parameters ---------- msd : array of shape (num_params, 2, num_chains) mean and sd for chains cquan : array of shape (num_params, num_quan, num_chains) quantiles for chains Returns ------- msdquan : array of shape (num_params, 2 + num_quan, num_chains)
def SInt64(value, min_value=None, max_value=None, encoder=ENC_INT_DEFAULT, fuzzable=True, name=None, full_range=False): return BitField(value, 64, signed=True, min_value=min_value, max_value=max_value, encoder=encoder, fuzzable=fuzzable, name=name, full_range=full_range)
Signed 64-bit field
def get_collection(self, collection_id): sql = cursor = self._execute(sql, (collection_id, collection_id)) sql_result = cursor.fetchone() return { "collection_id": sql_result[0], "type": sql_result[1], "name": sql_result[2], "path": sql_result[3], "doc": sql_result[4], "version": sql_result[5], "scope": sql_result[6], "namedargs": sql_result[7], "doc_format": sql_result[8] } return sql_result
Get a specific collection
def set_logger(self): self.logger = logging.getLogger(self.logger_name) self.logger.setLevel(self.logger_level)
Prepare the logger, using self.logger_name and self.logger_level
async def set_pairwise_metadata(wallet_handle: int, their_did: str, metadata: Optional[str]) -> None: logger = logging.getLogger(__name__) logger.debug("set_pairwise_metadata: >>> wallet_handle: %r, their_did: %r, metadata: %r", wallet_handle, their_did, metadata) if not hasattr(set_pairwise_metadata, "cb"): logger.debug("set_pairwise_metadata: Creating callback") set_pairwise_metadata.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32)) c_wallet_handle = c_int32(wallet_handle) c_their_did = c_char_p(their_did.encode('utf-8')) c_metadata = c_char_p(metadata.encode('utf-8')) if metadata is not None else None await do_call('indy_set_pairwise_metadata', c_wallet_handle, c_their_did, c_metadata, set_pairwise_metadata.cb) logger.debug("set_pairwise_metadata: <<<")
Save some data in the Wallet for pairwise associated with Did. :param wallet_handle: wallet handler (created by open_wallet). :param their_did: encoded DID :param metadata: some extra information for pairwise :return: Error code
def gen_challenge(self, state): state.decrypt(self.key) chal = Challenge(state.chunks, self.prime, Random.new().read(32)) return chal
This function generates a challenge for given state. It selects a random number and sets that as the challenge key. By default, v_max is set to the prime, and the number of chunks to challenge is the number of chunks in the file. (this doesn't guarantee that the whole file will be checked since some chunks could be selected twice and some selected none. :param state: the state to use. it can be encrypted, as it will have just been received from the server