code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def halt(self): buf = [] buf.append(self.act_end) buf.append(0) crc = self.calculate_crc(buf) self.clear_bitmask(0x08, 0x80) self.card_write(self.mode_transrec, buf) self.clear_bitmask(0x08, 0x08) self.authed = False
Switch state to HALT
def set_description(self, name, action, seqno, value=None, default=False, disable=False): commands = ['route-map %s %s %s' % (name, action, seqno)] if value is not None: commands.append(self.command_builder('description', disable=True)) commands.append(self.command_builder('description', value=value, default=default, disable=disable)) return self.configure(commands)
Configures the routemap description Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. value (string): The value to configure for the routemap description default (bool): Specifies to default the routemap description value disable (bool): Specifies to negate the routemap description Returns: True if the operation succeeds otherwise False is returned
def _require_homogeneous_roots(self, accept_predicate, reject_predicate): if len(self.context.target_roots) == 0: raise self.NoActivationsError('No target specified.') def resolve(targets): for t in targets: if type(t) == Target: for r in resolve(t.dependencies): yield r else: yield t expanded_roots = list(resolve(self.context.target_roots)) accepted = list(filter(accept_predicate, expanded_roots)) rejected = list(filter(reject_predicate, expanded_roots)) if len(accepted) == 0: return None elif len(rejected) == 0: return accepted else: def render_target(target): return '{} (a {})'.format(target.address.reference(), target.type_alias) raise self.IncompatibleActivationsError('Mutually incompatible targets specified: {} vs {} ' '(and {} others)' .format(render_target(accepted[0]), render_target(rejected[0]), len(accepted) + len(rejected) - 2))
Ensures that there is no ambiguity in the context according to the given predicates. If any targets in the context satisfy the accept_predicate, and no targets satisfy the reject_predicate, returns the accepted targets. If no targets satisfy the accept_predicate, returns None. Otherwise throws TaskError.
def reinstall_ruby(ruby, runas=None, env=None): return _rvm(['reinstall', ruby], runas=runas, env=env)
Reinstall a ruby implementation ruby The version of ruby to reinstall runas The user under which to run rvm. If not specified, then rvm will be run as the user under which Salt is running. CLI Example: .. code-block:: bash salt '*' rvm.reinstall_ruby 1.9.3-p385
def db(self): if self._db is None: if self.tcex.default_args.tc_playbook_db_type == 'Redis': from .tcex_redis import TcExRedis self._db = TcExRedis( self.tcex.default_args.tc_playbook_db_path, self.tcex.default_args.tc_playbook_db_port, self.tcex.default_args.tc_playbook_db_context, ) elif self.tcex.default_args.tc_playbook_db_type == 'TCKeyValueAPI': from .tcex_key_value import TcExKeyValue self._db = TcExKeyValue(self.tcex) else: err = u'Invalid DB Type: ({})'.format(self.tcex.default_args.tc_playbook_db_type) raise RuntimeError(err) return self._db
Return the correct KV store for this execution.
def delMargin(self, name): for index, margin in enumerate(self._margins): if margin.getName() == name: visible = margin.isVisible() margin.clear() margin.deleteLater() del self._margins[index] if visible: self.updateViewport() return True return False
Deletes a margin. Returns True if the margin was deleted and False otherwise.
def table_path(cls, project, instance, table): return google.api_core.path_template.expand( "projects/{project}/instances/{instance}/tables/{table}", project=project, instance=instance, table=table, )
Return a fully-qualified table string.
def _validate_paths(self, settings, name, value): return [self._validate_path(settings, name, item) for item in value]
Apply ``SettingsPostProcessor._validate_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Raises: boussole.exceptions.SettingsInvalidError: Once a path does not exists. Returns: list: Validated paths.
def update_cookies(self, cookies: Optional[LooseCookies]) -> None: if not cookies: return c = SimpleCookie() if hdrs.COOKIE in self.headers: c.load(self.headers.get(hdrs.COOKIE, '')) del self.headers[hdrs.COOKIE] if isinstance(cookies, Mapping): iter_cookies = cookies.items() else: iter_cookies = cookies for name, value in iter_cookies: if isinstance(value, Morsel): mrsl_val = value.get(value.key, Morsel()) mrsl_val.set(value.key, value.value, value.coded_value) c[name] = mrsl_val else: c[name] = value self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
Update request cookies header.
def postman(host, port=587, auth=(None, None), force_tls=False, options=None): return Postman( host=host, port=port, middlewares=[ middleware.tls(force=force_tls), middleware.auth(*auth), ], **options )
Creates a Postman object with TLS and Auth middleware. TLS is placed before authentication because usually authentication happens and is accepted only after TLS is enabled. :param auth: Tuple of (username, password) to be used to ``login`` to the server. :param force_tls: Whether TLS should be forced. :param options: Dictionary of keyword arguments to be used when the SMTP class is called.
def close(self): files = self.__dict__.get("files") for _key, value in iter_multi_items(files or ()): value.close()
Closes associated resources of this request object. This closes all file handles explicitly. You can also use the request object in a with statement which will automatically close it. .. versionadded:: 0.9
def DeregisterFormatter(cls, formatter_class): formatter_data_type = formatter_class.DATA_TYPE.lower() if formatter_data_type not in cls._formatter_classes: raise KeyError( 'Formatter class not set for data type: {0:s}.'.format( formatter_class.DATA_TYPE)) del cls._formatter_classes[formatter_data_type]
Deregisters a formatter class. The formatter classes are identified based on their lower case data type. Args: formatter_class (type): class of the formatter. Raises: KeyError: if formatter class is not set for the corresponding data type.
def _find_valid_index(self, how): assert how in ['first', 'last'] if len(self) == 0: return None is_valid = ~self.isna() if self.ndim == 2: is_valid = is_valid.any(1) if how == 'first': idxpos = is_valid.values[::].argmax() if how == 'last': idxpos = len(self) - 1 - is_valid.values[::-1].argmax() chk_notna = is_valid.iat[idxpos] idx = self.index[idxpos] if not chk_notna: return None return idx
Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index
def define(rest): "Define a word" word = rest.strip() res = util.lookup(word) fmt = ( '{lookup.provider} says: {res}' if res else "{lookup.provider} does not have a definition for that.") return fmt.format(**dict(locals(), lookup=util.lookup))
Define a word
def alter_zero_tip_allowed_states(tree, feature): zero_parent2tips = defaultdict(list) allowed_state_feature = get_personalized_feature_name(feature, ALLOWED_STATES) for tip in tree: if tip.dist == 0: state = getattr(tip, feature, None) if state is not None and state != '': zero_parent2tips[tip.up].append(tip) for parent, zero_tips in zero_parent2tips.items(): counts = None for tip in zero_tips: if counts is None: counts = getattr(tip, allowed_state_feature).copy() else: counts += getattr(tip, allowed_state_feature) if counts.max() == len(zero_tips): continue allowed_states = None for tip in zero_tips: if allowed_states is None: allowed_states = getattr(tip, allowed_state_feature).copy() else: tip_allowed_states = getattr(tip, allowed_state_feature) allowed_states[np.nonzero(tip_allowed_states)] = 1 tip.add_feature(allowed_state_feature, allowed_states)
Alters the bottom-up likelihood arrays for zero-distance tips to make sure they do not contradict with other zero-distance tip siblings. :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the likelihood is altered :return: void, modifies the get_personalised_feature_name(feature, BU_LH) feature to zero-distance tips.
def bump(self, level='patch', label=None): bump = self._bump_pre if level == 'pre' else self._bump bump(level, label)
Bump version following semantic versioning rules.
def __get_hosts(self, pattern): (name, enumeration_details) = self._enumeration_info(pattern) hpat = self._hosts_in_unenumerated_pattern(name) hpat = sorted(hpat, key=lambda x: x.name) return set(self._apply_ranges(pattern, hpat))
finds hosts that postively match a particular pattern. Does not take into account negative matches.
def _node_participation_settings(self): try: return self.node_participation_settings except ObjectDoesNotExist: node_participation_settings = NodeParticipationSettings(node=self) node_participation_settings.save() return node_participation_settings
Return node_participation_settings record or create it if it does not exist usage: node = Node.objects.get(pk=1) node.participation_settings
def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): return if uf["location"] in fileindex: uf["location"] = fileindex[uf["location"]] return if not uf["location"] and uf["path"]: uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"]) if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]): if skip_broken: return else: raise cwltool.errors.WorkflowException( "File is missing: %s" % uf["location"]) uf["location"] = write_file( uploadfunc, fileindex, existing, uf["location"])
Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary.
def create_authz_decision_query(self, destination, action, evidence=None, resource=None, subject=None, message_id=0, consent=None, extensions=None, sign=None, sign_alg=None, digest_alg=None, **kwargs): return self._message(AuthzDecisionQuery, destination, message_id, consent, extensions, sign, action=action, evidence=evidence, resource=resource, subject=subject, sign_alg=sign_alg, digest_alg=digest_alg, **kwargs)
Creates an authz decision query. :param destination: The IdP endpoint :param action: The action you want to perform (has to be at least one) :param evidence: Why you should be able to perform the action :param resource: The resource you want to perform the action on :param subject: Who wants to do the thing :param message_id: Message identifier :param consent: If the principal gave her consent to this request :param extensions: Possible request extensions :param sign: Whether the request should be signed or not. :return: AuthzDecisionQuery instance
def delete_port(self, port): port_id = self._find_port_id(port) ret = self.network_conn.delete_port(port=port_id) return ret if ret else True
Deletes the specified port
def identify(self, text, **kwargs): if text is None: raise ValueError('text must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('language_translator', 'V3', 'identify') headers.update(sdk_headers) params = {'version': self.version} data = text headers['content-type'] = 'text/plain' url = '/v3/identify' response = self.request( method='POST', url=url, headers=headers, params=params, data=data, accept_json=True) return response
Identify language. Identifies the language of the input text. :param str text: Input text in UTF-8 format. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def _adjust(a, a_offset, b): x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1 a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value x >>= 8 for i in range(len(b)-2, -1, -1): x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF) a[a_offset + i] = ctypes.c_ubyte(x).value x >>= 8
a = bytearray a_offset = int b = bytearray
def check_unassigned(chars, bad_tables): bad_tables = ( stringprep.in_table_a1,) violator = check_against_tables(chars, bad_tables) if violator is not None: raise ValueError("Input contains unassigned code point: " "U+{:04x}".format(ord(violator)))
Check that `chars` does not contain any unassigned code points as per the given list of `bad_tables`. Operates on a list of unicode code points provided in `chars`.
def get(self, resource_manager, identities): m = self.resolve(resource_manager.resource_type) params = {} client_filter = False if m.filter_name: if m.filter_type == 'list': params[m.filter_name] = identities elif m.filter_type == 'scalar': assert len(identities) == 1, "Scalar server side filter" params[m.filter_name] = identities[0] else: client_filter = True resources = self.filter(resource_manager, **params) if client_filter: if all(map(lambda r: isinstance(r, six.string_types), resources)): resources = [r for r in resources if r in identities] else: resources = [r for r in resources if r[m.id] in identities] return resources
Get resources by identities
def create_contentkey_authorization_policy_options(access_token, key_delivery_type="2", \ name="HLS Open Authorization Policy", key_restriction_type="0"): path = '/ContentKeyAuthorizationPolicyOptions' endpoint = ''.join([ams_rest_endpoint, path]) body = '{ \ "Name":"policy",\ "KeyDeliveryType":"' + key_delivery_type + '", \ "KeyDeliveryConfiguration":"", \ "Restrictions":[{ \ "Name":"' + name + '", \ "KeyRestrictionType":"' + key_restriction_type + '", \ "Requirements":null \ }] \ }' return do_ams_post(endpoint, path, body, access_token, "json_only")
Create Media Service Content Key Authorization Policy Options. Args: access_token (str): A valid Azure authentication token. key_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type. name (str): A Media Service Contenty Key Authorization Policy Name. key_restiction_type (str): A Media Service Contenty Key Restriction Type. Returns: HTTP response. JSON body.
def api_send_mail(request, key=None, hproPk=None): if not check_api_key(request, key, hproPk): return HttpResponseForbidden sender = request.POST.get('sender', settings.MAIL_SENDER) dests = request.POST.getlist('dests') subject = request.POST['subject'] message = request.POST['message'] html_message = request.POST.get('html_message') if html_message and html_message.lower() == 'false': html_message = False if 'response_id' in request.POST: key = hproPk + ':' + request.POST['response_id'] else: key = None generic_send_mail(sender, dests, subject, message, key, 'PlugIt API (%s)' % (hproPk or 'StandAlone',), html_message) return HttpResponse(json.dumps({}), content_type="application/json")
Send a email. Posts parameters are used
def ffmpeg_works(): images = np.zeros((2, 32, 32, 3), dtype=np.uint8) try: _encode_gif(images, 2) return True except (IOError, OSError): return False
Tries to encode images with ffmpeg to check if it works.
def _get_first_urn(self, urn): urn = URN(urn) subreference = None textId = urn.upTo(URN.NO_PASSAGE) if urn.reference is not None: subreference = str(urn.reference) firstId = self.resolver.getTextualNode(textId=textId, subreference=subreference).firstId r = render_template( "cts/GetFirstUrn.xml", firstId=firstId, full_urn=textId, request_urn=str(urn) ) return r, 200, {"content-type": "application/xml"}
Provisional route for GetFirstUrn request :param urn: URN to filter the resource :param inv: Inventory Identifier :return: GetFirstUrn response
def get_variable_str(self): if self.var_name is None: prefix = '' else: prefix = self.var_name suffix = str(self.var_value) if len(suffix) == 0: suffix = "''" elif len(suffix) > self.__max_str_length_displayed__: suffix = '' if len(prefix) > 0 and len(suffix) > 0: return prefix + '=' + suffix else: return prefix + suffix
Utility method to get the variable value or 'var_name=value' if name is not None. Note that values with large string representations will not get printed :return:
def on_expired(self): print('Authentication expired') self.is_authenticating.acquire() self.is_authenticating.notify_all() self.is_authenticating.release()
Device authentication expired.
def block_to_svg(block=None): block = working_block(block) try: from graphviz import Source return Source(block_to_graphviz_string())._repr_svg_() except ImportError: raise PyrtlError('need graphviz installed (try "pip install graphviz")')
Return an SVG for the block.
def object_absent(container, name, profile): existing_object = __salt__['libcloud_storage.get_container_object'](container, name, profile) if existing_object is None: return state_result(True, "Object already absent", name, {}) else: result = __salt__['libcloud_storage.delete_object'](container, name, profile) return state_result(result, "Deleted object", name, {})
Ensures a object is absent. :param container: Container name :type container: ``str`` :param name: Object name in cloud :type name: ``str`` :param profile: The profile key :type profile: ``str``
def token(cls: Type[CSVType], time: int) -> CSVType: csv = cls() csv.time = str(time) return csv
Return CSV instance from time :param time: Timestamp :return:
def mask(self, masklist): if not hasattr(masklist, '__len__'): masklist = tuple(masklist) if len(masklist) != len(self): raise Exception("Masklist length (%s) must match length " "of DataTable (%s)" % (len(masklist), len(self))) new_datatable = DataTable() for field in self.fields: new_datatable[field] = list(compress(self[field], masklist)) return new_datatable
`masklist` is an array of Bools or equivalent. This returns a new DataTable using only the rows that were True (or equivalent) in the mask.
def register_rpc(name=None): def wrapper(func): func_name = func.__name__ rpc_name = name or func_name uwsgi.register_rpc(rpc_name, func) _LOG.debug("Registering '%s' for RPC under '%s' alias ...", func_name, rpc_name) return func return wrapper
Decorator. Allows registering a function for RPC. * http://uwsgi.readthedocs.io/en/latest/RPC.html Example: .. code-block:: python @register_rpc() def expose_me(): do() :param str|unicode name: RPC function name to associate with decorated function. :rtype: callable
def index(self, values=None, only_index=None): assert self.indexable, "Field not indexable" assert not only_index or self.has_index(only_index), "Invalid index" if only_index: only_index = only_index if isclass(only_index) else only_index.__class__ if values is None: values = self.proxy_get() for value in values: if value is not None: needs_to_check_uniqueness = bool(self.unique) for index in self._indexes: if only_index and not isinstance(index, only_index): continue index.add(value, check_uniqueness=needs_to_check_uniqueness and index.handle_uniqueness) if needs_to_check_uniqueness and index.handle_uniqueness: needs_to_check_uniqueness = False
Index all values stored in the field, or only given ones if any.
def set_to_cache(self): queryset = self.get_queryset() cache.set(self._get_cache_key(), { 'queryset': [ queryset.none(), queryset.query, ], 'cls': self.__class__, 'search_fields': tuple(self.search_fields), 'max_results': int(self.max_results), 'url': str(self.get_url()), 'dependent_fields': dict(self.dependent_fields), })
Add widget's attributes to Django's cache. Split the QuerySet, to not pickle the result set.
def get_app_name(mod_name): rparts = list(reversed(mod_name.split('.'))) try: try: return rparts[rparts.index(MODELS_MODULE_NAME) + 1] except ValueError: return rparts[1] except IndexError: return mod_name
Retrieve application name from models.py module path >>> get_app_name('testapp.models.foo') 'testapp' 'testapp' instead of 'some.testapp' for compatibility: >>> get_app_name('some.testapp.models.foo') 'testapp' >>> get_app_name('some.models.testapp.models.foo') 'testapp' >>> get_app_name('testapp.foo') 'testapp' >>> get_app_name('some.testapp.foo') 'testapp'
def remove_files(self, *filenames): filenames = list_strings(filenames) for dirpath, dirnames, fnames in os.walk(self.workdir): for fname in fnames: if fname in filenames: filepath = os.path.join(dirpath, fname) os.remove(filepath)
Remove all the files listed in filenames.
def add_reading(self, reading): is_break = False utc = None if reading.stream in self._break_streams: is_break = True if reading.stream in self._anchor_streams: utc = self._anchor_streams[reading.stream](reading) self.add_point(reading.reading_id, reading.raw_time, utc, is_break=is_break)
Add an IOTileReading.
def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl): with open(file_path, 'rb') as f: data = f.read() content_type, content_encoding = mimetypes.guess_type(file_path) headers = { 'x-goog-project-id': project_id, 'x-goog-api-version': API_VERSION, 'x-goog-acl': acl, 'Content-Length': '%d' % len(data) } if content_type: headers['Content-Type'] = content_type if content_type: headers['Content-Encoding'] = content_encoding try: response, content = auth_http.request( 'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name), method='PUT', headers=headers, body=data) except httplib2.ServerNotFoundError, se: raise Error(404, 'Server not found.') if response.status >= 300: raise Error(response.status, response.reason) return content
Uploads a file to Google Cloud Storage. Args: auth_http: An authorized httplib2.Http instance. project_id: The project to upload to. bucket_name: The bucket to upload to. file_path: Path to the file to upload. object_name: The name within the bucket to upload to. acl: The ACL to assign to the uploaded file.
def show_letter( self, s, text_colour=[255, 255, 255], back_colour=[0, 0, 0] ): if len(s) > 1: raise ValueError('Only one character may be passed into this method') previous_rotation = self._rotation self._rotation -= 90 if self._rotation < 0: self._rotation = 270 dummy_colour = [None, None, None] pixel_list = [dummy_colour] * 8 pixel_list.extend(self._get_char_pixels(s)) pixel_list.extend([dummy_colour] * 16) coloured_pixels = [ text_colour if pixel == [255, 255, 255] else back_colour for pixel in pixel_list ] self.set_pixels(coloured_pixels) self._rotation = previous_rotation
Displays a single text character on the LED matrix using the specified colours
def request_password_reset(self, user_id): content = self._fetch("/user/%s/password/request_reset" % (user_id), method="POST") return FastlyUser(self, content)
Requests a password reset for the specified user.
def as_knock(self, created=False): knock = {} if self.should_knock(created): for field, data in self._retrieve_data(None, self._knocker_data): knock[field] = data return knock
Returns a dictionary with the knock data built from _knocker_data
def as_dictionary(self): return { "name": self.name, "type": self.type, "value": remove_0x_prefix(self.value) if self.type == 'bytes32' else self.value }
Return the parameter as a dictionary. :return: dict
def create_blueprint(state): blueprint = Blueprint( 'invenio_jsonschemas', __name__, ) @blueprint.route('/<path:schema_path>') def get_schema(schema_path): try: schema_dir = state.get_schema_dir(schema_path) except JSONSchemaNotFound: abort(404) resolved = request.args.get( 'resolved', current_app.config.get('JSONSCHEMAS_RESOLVE_SCHEMA'), type=int ) with_refs = request.args.get( 'refs', current_app.config.get('JSONSCHEMAS_REPLACE_REFS'), type=int ) or resolved if resolved or with_refs: schema = state.get_schema( schema_path, with_refs=with_refs, resolved=resolved ) return jsonify(schema) else: return send_from_directory(schema_dir, schema_path) return blueprint
Create blueprint serving JSON schemas. :param state: :class:`invenio_jsonschemas.ext.InvenioJSONSchemasState` instance used to retrieve the schemas.
def post_multipart(self, url, params, files): resp = requests.post( url, data=params, params=params, files=files, headers=self.headers, allow_redirects=False, auth=self.oauth ) return self.json_parse(resp)
Generates and issues a multipart request for data files :param url: a string, the url you are requesting :param params: a dict, a key-value of all the parameters :param files: a dict, matching the form '{name: file descriptor}' :returns: a dict parsed from the JSON response
def get_vm_extension(access_token, subscription_id, resource_group, vm_name, extension_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '/extensions/', extension_name, '?api-version=', COMP_API]) return do_get(endpoint, access_token)
Get details about a VM extension. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. extension_name (str): VM extension name. Returns: HTTP response. JSON body of VM extension properties.
def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: pass res = func(*args, **kwargs) cache[key] = res return res wrapper._wrapped = func return wrapper
Cache return values for multiple executions of func + args For example:: @cached def unit_get(attribute): pass unit_get('test') will cache the result of unit_get + 'test' for future calls.
def instantiate_for_read_and_search(handle_server_url, reverselookup_username, reverselookup_password, **config): if handle_server_url is None and 'reverselookup_baseuri' not in config.keys(): raise TypeError('You must specify either "handle_server_url" or "reverselookup_baseuri".' + \ ' Searching not possible without the URL of a search servlet.') inst = EUDATHandleClient( handle_server_url, reverselookup_username=reverselookup_username, reverselookup_password=reverselookup_password, **config ) return inst
Initialize client with read access and with search function. :param handle_server_url: The URL of the Handle Server. May be None (then, the default 'https://hdl.handle.net' is used). :param reverselookup_username: The username to authenticate at the reverse lookup servlet. :param reverselookup_password: The password to authenticate at the reverse lookup servlet. :param \**config: More key-value pairs may be passed that will be passed on to the constructor as config. Config options from the credentials object are overwritten by this. :return: An instance of the client.
def set(cls, **kwargs): global SCOUT_PYTHON_VALUES for key, value in kwargs.items(): SCOUT_PYTHON_VALUES[key] = value
Sets a configuration value for the Scout agent. Values set here will not override values set in ENV.
def post(self, path, payload=None, headers=None): return self._request('post', path, payload, headers)
HTTP POST operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
def unicode_props(self, props, value, in_group=False, negate=False): category = None if props.startswith("^"): negate = not negate if props.startswith("^"): props = props[1:] if value: if _uniprops.is_enum(props): category = props props = value elif value in ('y', 'yes', 't', 'true'): category = 'binary' elif value in ('n', 'no', 'f', 'false'): category = 'binary' negate = not negate else: raise ValueError('Invalid Unicode property!') v = _uniprops.get_unicode_property(("^" if negate else "") + props, category, self.is_bytes) if not in_group: if not v: v = '^%s' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE) v = "[%s]" % v properties = [v] return properties
Insert Unicode properties. Unicode properties are very forgiving. Case doesn't matter and `[ -_]` will be stripped out.
def secret_file(filename): filestat = os.stat(abspath(filename)) if stat.S_ISREG(filestat.st_mode) == 0 and \ stat.S_ISLNK(filestat.st_mode) == 0: e_msg = "Secret file %s must be a real file or symlink" % filename raise aomi.exceptions.AomiFile(e_msg) if platform.system() != "Windows": if filestat.st_mode & stat.S_IROTH or \ filestat.st_mode & stat.S_IWOTH or \ filestat.st_mode & stat.S_IWGRP: e_msg = "Secret file %s has too loose permissions" % filename raise aomi.exceptions.AomiFile(e_msg)
Will check the permissions of things which really should be secret files
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)
Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0
def preprocess_files(self, prefix): if prefix is None: return files = ("bin/yang2dsdl", "man/man1/yang2dsdl.1", "pyang/plugins/jsonxsl.py") regex = re.compile("^(.*)/usr/local(.*)$") for f in files: inf = open(f) cnt = inf.readlines() inf.close() ouf = open(f,"w") for line in cnt: mo = regex.search(line) if mo is None: ouf.write(line) else: ouf.write(mo.group(1) + prefix + mo.group(2) + "\n") ouf.close()
Change the installation prefix where necessary.
def convert_vec2_to_vec4(scale, data): it = iter(data) while True: yield next(it) * scale yield next(it) * scale yield 0.0 yield 1.0
transforms an array of 2d coords into 4d
def get_tree_collection_strings(self, scale=1, guide_tree=None): records = [self.collection[i] for i in self.indices] return TreeCollectionTaskInterface().scrape_args(records)
Function to get input strings for tree_collection tree_collection needs distvar, genome_map and labels - these are returned in the order above
def set_all_xlims(self, xlim, dx, xscale, fontsize=None): self._set_all_lims('x', xlim, dx, xscale, fontsize) return
Set limits and ticks for x axis for whole figure. This will set x axis limits and tick marks for the entire figure. It can be overridden in the SinglePlot class. Args: xlim (len-2 list of floats): The limits for the axis. dx (float): Amount to increment by between the limits. xscale (str): Scale of the axis. Either `log` or `lin`. fontsize (int, optional): Set fontsize for x axis tick marks. Default is None.
def set_account_password(self, account, raw_password): luser = self._get_account(account.username) changes = changeset(luser, { 'password': raw_password, }) save(changes, database=self._database)
Account's password was changed.
def sample(self, count, return_index=False): samples, index = sample.sample_surface(self, count) if return_index: return samples, index return samples
Return random samples distributed normally across the surface of the mesh Parameters --------- count : int Number of points to sample return_index : bool If True will also return the index of which face each sample was taken from. Returns --------- samples : (count, 3) float Points on surface of mesh face_index : (count, ) int Index of self.faces
def delete_where_unique(cls, ip, object_id, location): result = cls.where_unique(ip, object_id, location) if result is None: return None result.delete() return True
delete by ip and object id
def get_enum_key(key, choices): if key in choices: return key keys = [k for k in choices if k.startswith(key)] if len(keys) == 1: return keys[0]
Get an enum by prefix or equality
def get_Tuple_params(tpl): try: return tpl.__tuple_params__ except AttributeError: try: if tpl.__args__ is None: return None if tpl.__args__[0] == (): return () else: if tpl.__args__[-1] is Ellipsis: return tpl.__args__[:-1] if len(tpl.__args__) > 1 else None else: return tpl.__args__ except AttributeError: return None
Python version independent function to obtain the parameters of a typing.Tuple object. Omits the ellipsis argument if present. Use is_Tuple_ellipsis for that. Tested with CPython 2.7, 3.5, 3.6 and Jython 2.7.1.
def has_required_params(self): return self.consumer_key and\ self.consumer_secret and\ self.resource_link_id and\ self.launch_url
Check if required parameters for a tool launch are set.
def render(self, data, accepted_media_type=None, renderer_context=None): renderer_context = renderer_context or {} callback = self.get_callback(renderer_context) json = super(JSONPRenderer, self).render(data, accepted_media_type, renderer_context) return callback.encode(self.charset) + b'(' + json + b');'
Renders into jsonp, wrapping the json output in a callback function. Clients may set the callback function name using a query parameter on the URL, for example: ?callback=exampleCallbackName
def secret_write(backend,entry): path,value=entry.split('=') if value.startswith('@'): with open(value[1:]) as vfile: value = vfile.read() click.echo(click.style('%s - Writing secret' % get_datetime(), fg='green')) check_and_print( DKCloudCommandRunner.secret_write(backend.dki,path,value))
Write a secret
def parse_subargs(module, parser, method, opts): module.cli_args(parser) subargs = parser.parse_args(opts) return subargs
Attach argument parser for action specific options. Arguments --------- module : module name of module to extract action from parser : argparser argparser object to attach additional arguments to method : str name of method (morris, sobol, etc). Must match one of the available submodules opts : list A list of argument options to parse Returns --------- subargs : argparser namespace object
def schedule_deleted(sender, instance, **kwargs): from contentstore.tasks import deactivate_schedule deactivate_schedule.delay(str(instance.scheduler_schedule_id))
Fires off the celery task to ensure that this schedule is deactivated Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the schedule that we want to deactivate
def delete_tag_from_job(user, job_id, tag_id): _JJT = models.JOIN_JOBS_TAGS job = v1_utils.verify_existence_and_get(job_id, _TABLE) if not user.is_in_team(job['team_id']): raise dci_exc.Unauthorized() v1_utils.verify_existence_and_get(tag_id, models.TAGS) query = _JJT.delete().where(sql.and_(_JJT.c.tag_id == tag_id, _JJT.c.job_id == job_id)) try: flask.g.db_conn.execute(query) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict('tag', 'tag_id') return flask.Response(None, 204, content_type='application/json')
Delete a tag from a job.
def load(self, **kwargs): if LooseVersion(self.tmos_ver) == LooseVersion('11.6.0'): return self._load_11_6(**kwargs) else: return super(Rule, self)._load(**kwargs)
Custom load method to address issue in 11.6.0 Final, where non existing objects would be True.
def complete_media(self, text, line, begidx, endidx): choices = {'actor': query_actors, 'director': TabCompleteExample.static_list_directors, 'movie_file': (self.path_complete,) } completer = argparse_completer.AutoCompleter(TabCompleteExample.media_parser, self, arg_choices=choices) tokens, _ = self.tokens_for_completion(line, begidx, endidx) results = completer.complete_command(tokens, text, line, begidx, endidx) return results
Adds tab completion to media
def match(Class, path, pattern, flags=re.I, sortkey=None, ext=None): return sorted( [ Class(fn=fn) for fn in rglob(path, f"*{ext or ''}") if re.search(pattern, os.path.basename(fn), flags=flags) is not None and os.path.basename(fn)[0] != '~' ], key=sortkey, )
for a given path and regexp pattern, return the files that match
def chunked(iterable, n): iterable = iter(iterable) while 1: t = tuple(islice(iterable, n)) if t: yield t else: return
Returns chunks of n length of iterable If len(iterable) % n != 0, then the last chunk will have length less than n. Example: >>> chunked([1, 2, 3, 4, 5], 2) [(1, 2), (3, 4), (5,)]
def get_datanode_fp_meta(fp): directory_meta = list(CMIP5_DATANODE_FP_ATTS) meta = get_dir_meta(fp, directory_meta) meta.update(get_cmor_fname_meta(fp)) return meta
Processes a datanode style file path. Section 3.2 of the `Data Reference Syntax`_ details: It is recommended that ESGF data nodes should layout datasets on disk mapping DRS components to directories as: <activity>/<product>/<institute>/<model>/<experiment>/ <frequency>/<modeling_realm>/<mip_table>/<ensemble_member>/ <version_number>/<variable_name>/<CMOR filename>.nc Arguments: fp (str): A file path conforming to DRS spec. Returns: dict: Metadata as extracted from the file path. .. _Data Reference Syntax: http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf
def exp10(x, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_exp10, (BigFloat._implicit_convert(x),), context, )
Return ten raised to the power x.
def install_program(self): text = templ_program.render(**self.options) config = Configuration(self.buildout, self.program + '.conf', { 'deployment': self.deployment_name, 'directory': os.path.join(self.options['etc-directory'], 'conf.d'), 'text': text}) return [config.install()]
install supervisor program config file
def set_shellwidget(self, shellwidget): self.shellwidget = shellwidget shellwidget.set_figurebrowser(self) shellwidget.sig_new_inline_figure.connect(self._handle_new_figure)
Bind the shellwidget instance to the figure browser
def define_wide_deep_flags(): flags_core.define_base() flags_core.define_benchmark() flags_core.define_performance( num_parallel_calls=False, inter_op=True, intra_op=True, synthetic_data=False, max_train_steps=False, dtype=False, all_reduce_alg=False) flags.adopt_module_key_flags(flags_core) flags.DEFINE_enum( name="model_type", short_name="mt", default="wide_deep", enum_values=['wide', 'deep', 'wide_deep'], help="Select model topology.") flags.DEFINE_boolean( name="download_if_missing", default=True, help=flags_core.help_wrap( "Download data to data_dir if it is not already present."))
Add supervised learning flags, as well as wide-deep model type.
def sendCommand(self, command): data = { 'rapi' : command } full_url = self.url + urllib.parse.urlencode(data) data = urllib.request.urlopen(full_url) response = re.search('\<p>&gt;\$(.+)\<script', data.read().decode('utf-8')) if response == None: response = re.search('\>\>\$(.+)\<p>', data.read().decode('utf-8')) return response.group(1).split()
Sends a command through the web interface of the charger and parses the response
def _get_mean_and_median(hist: Hist) -> Tuple[float, float]: x = ctypes.c_double(0) q = ctypes.c_double(0.5) hist.ComputeIntegral() hist.GetQuantiles(1, x, q) mean = hist.GetMean() return (mean, x.value)
Retrieve the mean and median from a ROOT histogram. Note: These values are not so trivial to calculate without ROOT, as they are the bin values weighted by the bin content. Args: hist: Histogram from which the values will be extract. Returns: mean, median of the histogram.
def byaxis(self): space = self class NpyTensorSpacebyaxis(object): def __getitem__(self, indices): try: iter(indices) except TypeError: newshape = space.shape[indices] else: newshape = tuple(space.shape[i] for i in indices) if isinstance(space.weighting, ArrayWeighting): new_array = np.asarray(space.weighting.array[indices]) weighting = NumpyTensorSpaceArrayWeighting( new_array, space.weighting.exponent) else: weighting = space.weighting return type(space)(newshape, space.dtype, weighting=weighting) def __repr__(self): return repr(space) + '.byaxis' return NpyTensorSpacebyaxis()
Return the subspace defined along one or several dimensions. Examples -------- Indexing with integers or slices: >>> space = odl.rn((2, 3, 4)) >>> space.byaxis[0] rn(2) >>> space.byaxis[1:] rn((3, 4)) Lists can be used to stack spaces arbitrarily: >>> space.byaxis[[2, 1, 2]] rn((4, 3, 4))
def check_boundary_lines_similar(l_1, l_2): num_matches = 0 if (type(l_1) != list) or (type(l_2) != list) or (len(l_1) != len(l_2)): return 0 num_elements = len(l_1) for i in xrange(0, num_elements): if l_1[i].isdigit() and l_2[i].isdigit(): num_matches += 1 else: l1_str = l_1[i].lower() l2_str = l_2[i].lower() if (l1_str[0] == l2_str[0]) and \ (l1_str[len(l1_str) - 1] == l2_str[len(l2_str) - 1]): num_matches = num_matches + 1 if (len(l_1) == 0) or (float(num_matches) / float(len(l_1)) < 0.9): return 0 else: return 1
Compare two lists to see if their elements are roughly the same. @param l_1: (list) of strings. @param l_2: (list) of strings. @return: (int) 1/0.
def get(self, value): config = self.get_block('vrf definition %s' % value) if not config: return None response = dict(vrf_name=value) response.update(self._parse_rd(config)) response.update(self._parse_description(config)) config = self.get_block('no ip routing vrf %s' % value) if config: response['ipv4_routing'] = False else: response['ipv4_routing'] = True config = self.get_block('no ipv6 unicast-routing vrf %s' % value) if config: response['ipv6_routing'] = False else: response['ipv6_routing'] = True return response
Returns the VRF configuration as a resource dict. Args: value (string): The vrf name to retrieve from the running configuration. Returns: A Python dict object containing the VRF attributes as key/value pairs.
def find_faderport_output_name(number=0): outs = [i for i in mido.get_output_names() if i.lower().startswith('faderport')] if 0 <= number < len(outs): return outs[number] else: return None
Find the MIDI output name for a connected FaderPort. NOTE! Untested for more than one FaderPort attached. :param number: 0 unless you've got more than one FaderPort attached. In which case 0 is the first, 1 is the second etc :return: Port name or None
def plot_delta_m(fignum, B, DM, Bcr, s): plt.figure(num=fignum) plt.clf() if not isServer: plt.figtext(.02, .01, version_num) plt.plot(B, DM, 'b') plt.xlabel('B (T)') plt.ylabel('Delta M') linex = [0, Bcr, Bcr] liney = [old_div(DM[0], 2.), old_div(DM[0], 2.), 0] plt.plot(linex, liney, 'r') plt.title(s)
function to plot Delta M curves Parameters __________ fignum : matplotlib figure number B : array of field values DM : array of difference between top and bottom curves in hysteresis loop Bcr : coercivity of remanence s : specimen name
def _formatOntologyTermObject(self, terms, element_type): elementClause = None if not isinstance(terms, collections.Iterable): terms = [terms] elements = [] for term in terms: if term.term_id: elements.append('?{} = <{}> '.format( element_type, term.term_id)) else: elements.append('?{} = <{}> '.format( element_type, self._toNamespaceURL(term.term))) elementClause = "({})".format(" || ".join(elements)) return elementClause
Formats the ontology term object for query
def authenticate(api_key, api_url, **kwargs): muddle = Muddle(**kwargs) muddle.authenticate(api_key, api_url) return muddle
Returns a muddle instance, with API key and url set for requests.
def _transition_loop(self): while self._transitions: start = time.time() for transition in self._transitions: transition.step() if transition.finished: self._transitions.remove(transition) time_delta = time.time() - start sleep_time = max(0, self.MIN_STEP_TIME - time_delta) time.sleep(sleep_time)
Execute all queued transitions step by step.
def get_user(self, username): User = get_user_model() try: user = User.objects.get(**{ User.USERNAME_FIELD: username, }) if user.is_active: raise ActivationError( self.ALREADY_ACTIVATED_MESSAGE, code='already_activated' ) return user except User.DoesNotExist: raise ActivationError( self.BAD_USERNAME_MESSAGE, code='bad_username' )
Given the verified username, look up and return the corresponding user account if it exists, or raising ``ActivationError`` if it doesn't.
def sendExact( signal=Any, sender=Anonymous, *arguments, **named ): responses = [] for receiver in liveReceivers(getReceivers(sender, signal)): response = robustapply.robustApply( receiver, signal=signal, sender=sender, *arguments, **named ) responses.append((receiver, response)) return responses
Send signal only to those receivers registered for exact message sendExact allows for avoiding Any/Anonymous registered handlers, sending only to those receivers explicitly registered for a particular signal on a particular sender.
def write_warc(self, resources=None, dumpfile=None): try: from warc import WARCFile, WARCHeader, WARCRecord except: raise DumpError("Failed to load WARC library") wf = WARCFile(dumpfile, mode="w", compress=self.compress) for resource in resources: wh = WARCHeader({}) wh.url = resource.uri wh.ip_address = None wh.date = resource.lastmod wh.content_type = 'text/plain' wh.result_code = 200 wh.checksum = 'aabbcc' wh.location = self.archive_path(resource.path) wf.write_record(WARCRecord(header=wh, payload=resource.path)) wf.close() warcsize = os.path.getsize(dumpfile) self.logging.info( "Wrote WARC file dump %s with size %d bytes" % (dumpfile, warcsize))
Write a WARC dump file. WARC support is not part of ResourceSync v1.0 (Z39.99 2014) but is left in this library for experimentation.
def normalize(seq): s = float(sum(seq)) return [v/s for v in seq]
Scales each number in the sequence so that the sum of all numbers equals 1.
def GetUserInfo(knowledge_base, user): if "\\" in user: domain, user = user.split("\\", 1) users = [ u for u in knowledge_base.users if u.username == user and u.userdomain == domain ] else: users = [u for u in knowledge_base.users if u.username == user] if not users: return else: return users[0]
Get a User protobuf for a specific user. Args: knowledge_base: An rdf_client.KnowledgeBase object. user: Username as string. May contain domain like DOMAIN\\user. Returns: A User rdfvalue or None
def monday_of_week(year, week): str_time = time.strptime('{0} {1} 1'.format(year, week), '%Y %W %w') date = timezone.datetime(year=str_time.tm_year, month=str_time.tm_mon, day=str_time.tm_mday, tzinfo=timezone.utc) if timezone.datetime(year, 1, 4).isoweekday() > 4: date -= timezone.timedelta(days=7) return date
Returns a datetime for the monday of the given week of the given year.
def done(self): return self._exception != self._SENTINEL or self._result != self._SENTINEL
Return True the future is done, False otherwise. This still returns True in failure cases; checking :meth:`result` or :meth:`exception` is the canonical way to assess success or failure.
def load_table(self, table): region = table.database if table.database else self.default_region resource_name, collection_name = table.table.split('_', 1) boto_region_name = region.replace('_', '-') resource = self.boto3_session.resource(resource_name, region_name=boto_region_name) if not hasattr(resource, collection_name): raise QueryError( 'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name)) self.attach_region(region) self.refresh_table(region, table.table, resource, getattr(resource, collection_name))
Load resources as specified by given table into our db.
def _parse_normalization(normalization): parsed_normalization = None if isinstance(normalization, dict): if len(normalization.keys()) == 1: items = list(normalization.items())[0] if len(items) == 2: if items[1] and isinstance(items[1], dict): parsed_normalization = items else: parsed_normalization = items[0] elif isinstance(normalization, STR_TYPE): parsed_normalization = normalization return parsed_normalization
Parse a normalization item. Transform dicts into a tuple containing the normalization options. If a string is found, the actual value is used. Args: normalization: Normalization to parse. Returns: Tuple or string containing the parsed normalization.
def add(self, **kwargs): new_element = self._message_descriptor._concrete_class(**kwargs) new_element._SetListener(self._message_listener) self._values.append(new_element) if not self._message_listener.dirty: self._message_listener.Modified() return new_element
Adds a new element at the end of the list and returns it. Keyword arguments may be used to initialize the element.