code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def auth_token(cls, token): store = goldman.sess.store login = store.find(cls.RTYPE, 'token', token) if not login: msg = 'No login found with that token. It may have been revoked.' raise AuthRejected(**{'detail': msg}) elif login.locked: msg = 'The login account is currently locked out.' raise AuthRejected(**{'detail': msg}) else: login.post_authenticate()
Callback method for OAuth 2.0 bearer token middleware
def set_exception(self, exception): assert isinstance(exception, Exception), \ "%r should be an Exception" % exception self._exception = exception self._state = self.FINISHED
Sets the result of the future as being the given exception. Should only be used by Task and unit tests.
def kth_to_last_dict(head, k): if not (head and k > -1): return False d = dict() count = 0 while head: d[count] = head head = head.next count += 1 return len(d)-k in d and d[len(d)-k]
This is a brute force method where we keep a dict the size of the list Then we check it for the value we need. If the key is not in the dict, our and statement will short circuit and return False
def _load_data(): lines = dragonmapper.data.load_data_file('transcriptions.csv') pinyin_map, zhuyin_map, ipa_map = {}, {}, {} for line in lines: p, z, i = line.split(',') pinyin_map[p] = {'Zhuyin': z, 'IPA': i} zhuyin_map[z] = {'Pinyin': p, 'IPA': i} ipa_map[i] = {'Pinyin': p, 'Zhuyin': z} return pinyin_map, zhuyin_map, ipa_map
Load the transcription mapping data into a dictionary.
def _recursively_lookup_complex(self, complex_id): assert complex_id in self.complex_map expanded_agent_strings = [] expand_these_next = [complex_id] while len(expand_these_next) > 0: c = expand_these_next[0] expand_these_next = expand_these_next[1:] assert c in self.complex_map for s in self.complex_map[c]: if s in self.complex_map: expand_these_next.append(s) else: expanded_agent_strings.append(s) return expanded_agent_strings
Looks up the constitutents of a complex. If any constituent is itself a complex, recursively expands until all constituents are not complexes.
def _escapify(label): text = '' for c in label: if c in _escaped: text += '\\' + c elif ord(c) > 0x20 and ord(c) < 0x7F: text += c else: text += '\\%03d' % ord(c) return text
Escape the characters in label which need it. @returns: the escaped string @rtype: string
def snap(self, *args): length = len(args) if not 2 <= length <= 6: msg = 'snap takes 2 to 6 arguments, {0} given.'.format(length) raise TypeError(msg) param = Enum( 'x', 'y', 'r', 'theta', 'aux1', 'aux2', 'aux3', 'aux4', 'frequency', 'trace1', 'trace2', 'trace3', 'trace4' ) cmd = 'SNAP?', (Float,) * length, (param, ) * length return self._ask(cmd, *args)
Records multiple values at once. It takes two to six arguments specifying which values should be recorded together. Valid arguments are 'x', 'y', 'r', 'theta', 'aux1', 'aux2', 'aux3', 'aux4', 'frequency', 'trace1', 'trace2', 'trace3' and 'trace4'. snap is faster since it avoids communication overhead. 'x' and 'y' are recorded together, as well as 'r' and 'theta'. Between these pairs, there is a delay of approximately 10 us. 'aux1', 'aux2', 'aux3' and 'aux4' have am uncertainty of up to 32 us. It takes at least 40 ms or a period to calculate the frequency. E.g.:: lockin.snap('x', 'theta', 'trace3')
def _input_as_multiline_string(self, data): self._input_filename = self.getTmpFilename( self.WorkingDir, suffix='.fasta') with open(self._input_filename, 'w') as f: f.write(data) return self._input_filename
Write multiline string to temp file, return filename data: a multiline string to be written to a file.
def _unquote(self, val): if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]): val = val[1:-1] return val
Unquote a value if necessary.
def get_pushes(self, project, **params): return self._get_json_list(self.PUSH_ENDPOINT, project, **params)
Gets pushes from project, filtered by parameters By default this method will just return the latest 10 pushes (if they exist) :param project: project (repository name) to query data for :param params: keyword arguments to filter results
async def delete(self, iden): appt = self.appts.get(iden) if appt is None: raise s_exc.NoSuchIden() try: heappos = self.apptheap.index(appt) except ValueError: pass else: if heappos == len(self.apptheap) - 1: del self.apptheap[heappos] else: self.apptheap[heappos] = self.apptheap.pop() heapq.heapify(self.apptheap) del self.appts[iden] await self._hivedict.pop(iden)
Delete an appointment
def list(gandi, domain, limit): options = {'items_per_page': limit} result = gandi.forward.list(domain, options) for forward in result: output_forward(gandi, domain, forward) return result
List mail forwards for a domain.
def _get_pooling_layers(self, start_node_id, end_node_id): layer_list = [] node_list = [start_node_id] assert self._depth_first_search(end_node_id, layer_list, node_list) ret = [] for layer_id in layer_list: layer = self.layer_list[layer_id] if is_layer(layer, "Pooling"): ret.append(layer) elif is_layer(layer, "Conv") and layer.stride != 1: ret.append(layer) return ret
Given two node IDs, return all the pooling layers between them.
def votes(self): votes = [] for option in self.options.all(): votes += option.votes.all() return votes
Returns all the votes related to this topic poll.
def has_annotation(self, annotation: str) -> bool: return ( self.has_enumerated_annotation(annotation) or self.has_regex_annotation(annotation) or self.has_local_annotation(annotation) )
Check if this annotation is defined.
def triad(note, key): return [note, intervals.third(note, key), intervals.fifth(note, key)]
Return the triad on note in key as a list. Examples: >>> triad('E', 'C') ['E', 'G', 'B'] >>> triad('E', 'B') ['E', 'G#', 'B']
def date_from_string(string, format_string=None): if isinstance(format_string, str): return datetime.datetime.strptime(string, format_string).date() elif format_string is None: format_string = [ "%Y-%m-%d", "%m-%d-%Y", "%m/%d/%Y", "%d/%m/%Y", ] for format in format_string: try: return datetime.datetime.strptime(string, format).date() except ValueError: continue raise ValueError("Could not produce date from string: {}".format(string))
Runs through a few common string formats for datetimes, and attempts to coerce them into a datetime. Alternatively, format_string can provide either a single string to attempt or an iterable of strings to attempt.
def __init_defaults(self, config): provider = self.__provider if provider == 'sqlite': config.setdefault('dbname', ':memory:') config.setdefault('create_db', True) elif provider == 'mysql': config.setdefault('port', 3306) config.setdefault('charset', 'utf8') elif provider == 'postgres': config.setdefault('port', 5432) elif provider == 'oracle': config.setdefault('port', 1521) else: raise ValueError('Unsupported provider "{}"'.format(provider)) if provider != 'sqlite': config.setdefault('host', 'localhost') config.setdefault('user', None) config.setdefault('password', None) config.setdefault('dbname', None)
Initializes the default connection settings.
def _add_opt_argument(self, opt_args, arg_parser): option_args = opt_args.copy() groups = option_args.pop('groups', None) if groups: self._add_group( parser=arg_parser, groups=groups, option_args=option_args ) exclusive_args = option_args.pop('mutually_exclusive', None) if exclusive_args: self._add_mutually_exclusive_group( parser=arg_parser, groups=exclusive_args, option_args=option_args ) for k, v in option_args.items(): self._add_arg(parser=arg_parser, value_dict=v)
Add an argument to an instantiated parser. :param opt_args: ``dict`` :param arg_parser: ``object``
def prettylist(list_): if not list_: return '' values = set() uniqueList = [] for entry in list_: if not entry in values: values.add(entry) uniqueList.append(entry) return uniqueList[0] if len(uniqueList) == 1 \ else '[' + '; '.join(uniqueList) + ']'
Filter out duplicate values while keeping order.
def ecef2enuv(u: float, v: float, w: float, lat0: float, lon0: float, deg: bool = True) -> Tuple[float, float, float]: if deg: lat0 = radians(lat0) lon0 = radians(lon0) t = cos(lon0) * u + sin(lon0) * v uEast = -sin(lon0) * u + cos(lon0) * v wUp = cos(lat0) * t + sin(lat0) * w vNorth = -sin(lat0) * t + cos(lat0) * w return uEast, vNorth, wUp
VECTOR from observer to target ECEF => ENU Parameters ---------- u : float or numpy.ndarray of float target x ECEF coordinate (meters) v : float or numpy.ndarray of float target y ECEF coordinate (meters) w : float or numpy.ndarray of float target z ECEF coordinate (meters) lat0 : float Observer geodetic latitude lon0 : float Observer geodetic longitude h0 : float observer altitude above geodetic ellipsoid (meters) deg : bool, optional degrees input/output (False: radians in/out) Returns ------- uEast : float or numpy.ndarray of float target east ENU coordinate (meters) vNorth : float or numpy.ndarray of float target north ENU coordinate (meters) wUp : float or numpy.ndarray of float target up ENU coordinate (meters)
def get_unique_figname(dirname, root, ext): i = 1 figname = root + '_%d' % i + ext while True: if osp.exists(osp.join(dirname, figname)): i += 1 figname = root + '_%d' % i + ext else: return osp.join(dirname, figname)
Append a number to "root" to form a filename that does not already exist in "dirname".
def log_power_spectrum(frames, fft_points=512, normalize=True): power_spec = power_spectrum(frames, fft_points) power_spec[power_spec <= 1e-20] = 1e-20 log_power_spec = 10 * np.log10(power_spec) if normalize: return log_power_spec - np.max(log_power_spec) else: return log_power_spec
Log power spectrum of each frame in frames. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. normalize (bool): If normalize=True, the log power spectrum will be normalized. Returns: array: The power spectrum - If frames is an num_frames x sample_per_frame matrix, output will be num_frames x fft_length.
def _receive_all(self): (data, part) = ('', '') if is_py3: crlf_bytes = bytes(self.__CRLF, encoding=self.__encoding) else: crlf_bytes = self.__CRLF while self._running and part[-2:] != crlf_bytes: try: part = self._socket.recv(self.buffer_size) except (socket.timeout, socket.error) as e: if self._running: self.stop() raise SocketError('[Connect: %s]: Socket %s' % (self._unique_id, e)) else: return if len(part) == 0: self.stop() raise SocketError('Connection closed by server') data += part.decode(self.__encoding) return data
Whilst socket is running receives data from socket, till CRLF is detected.
def gen_tensor_data(): X, y = toy_interaction(return_X_y=True, n=10000) gam = LinearGAM(te(0, 1,lam=0.1)).fit(X, y) XX = gam.generate_X_grid(term=0, meshgrid=True) Z = gam.partial_dependence(term=0, meshgrid=True) fig = plt.figure(figsize=(9,6)) ax = plt.axes(projection='3d') ax.dist = 7.5 ax.plot_surface(XX[0], XX[1], Z, cmap='viridis') ax.set_axis_off() fig.tight_layout() plt.savefig('imgs/pygam_tensor.png', transparent=True, dpi=300)
toy interaction data
def encode(self, tags, encoding, values_to_sub): for tag in tags: if tags[tag].get(encoding) != "None": if tags[tag].get(encoding) == "url": values_to_sub[tag] = self.url_encode(values_to_sub[tag]) if tags[tag].get(encoding) == "base64": values_to_sub[tag] = self.base64_utf_encode(values_to_sub[tag]) return values_to_sub
reads the encoding type from the event-mapping.json and determines whether a value needs encoding Parameters ---------- tags: dict the values of a particular event that can be substituted within the event json encoding: string string that helps navigate to the encoding field of the json values_to_sub: dict key/value pairs that will be substituted into the json Returns ------- values_to_sub: dict the encoded (if need be) values to substitute into the json.
def load(self, local='localsettings.py', default='settings.py'): self._load_defaults(default) self._load_custom(local) return self.settings()
Load the settings dict @param local: The local settings filename to use @param default: The default settings module to read @return: A dict of the loaded settings
def _print_breakdown(cls, savedir, fname, data): if not os.path.exists(savedir): os.makedirs(savedir) with open(os.path.join(savedir, fname), 'w') as fout: fout.write(data)
Function to print model fixtures into generated file
def get_data_source(self): product_type = self.product_id.split('_')[1] if product_type.endswith('L1C') or product_type == 'OPER': return DataSource.SENTINEL2_L1C if product_type.endswith('L2A') or product_type == 'USER': return DataSource.SENTINEL2_L2A raise ValueError('Unknown data source of product {}'.format(self.product_id))
The method determines data source from product ID. :return: Data source of the product :rtype: DataSource :raises: ValueError
def load_buildfile(self, target): log.info('Loading: %s', target) filepath = os.path.join(target.path, app.get_options().buildfile_name) try: repo = self.repo_state.GetRepo(target.repo) return repo.get_file(filepath) except gitrepo.GitError as err: log.error('Failed loading %s: %s', target, err) raise error.BrokenGraph('Sadface.')
Pull a build file from git.
def get_node(service_name, host_name): return common_pb2.Node( identifier=common_pb2.ProcessIdentifier( host_name=socket.gethostname() if host_name is None else host_name, pid=os.getpid(), start_timestamp=proto_ts_from_datetime( datetime.datetime.utcnow())), library_info=common_pb2.LibraryInfo( language=common_pb2.LibraryInfo.Language.Value('PYTHON'), exporter_version=EXPORTER_VERSION, core_library_version=opencensus_version), service_info=common_pb2.ServiceInfo(name=service_name))
Generates Node message from params and system information.
def schema_class(self, object_schema, model_name, classes=False): cls_bldr = ClassBuilder(self.resolver) model_cls = cls_bldr.construct(model_name, object_schema) model_cls.proptype = SchemaObjectFactory.proptype return [model_cls, cls_bldr.resolved][classes]
Create a object-class based on the object_schema. Use this class to create specific instances, and validate the data values. See the "python-jsonschema-objects" package for details on further usage. Parameters ---------- object_schema : dict The JSON-schema that defines the object model_name : str if provided, the name given to the new class. if not provided, then the name will be determined by one of the following schema values, in this order: ['x-model', 'title', 'id'] classes : bool When `True`, this method will return the complete dictionary of all resolved object-classes built from the object_schema. This can be helpful when a deeply nested object_schema is provided; but generally not necessary. You can then create a :class:`Namespace` instance using this dict. See the 'python-jschonschema-objects.utls' package for further details. When `False` (default), return only the object-class Returns ------- - new class for given object_schema (default) - dict of all classes when :param:`classes` is True
def prepare_value(self, value): if value is None and self.required: choices =list(self.choices) if len(choices) == 1: value = choices[0][0] return super(TemplateChoiceField, self).prepare_value(value)
To avoid evaluating the lazysorted callable more than necessary to establish a potential initial value for the field, we do it here. If there's - only one template choice, and - the field is required, and - there's no prior initial set (either by being bound or by being set higher up the stack then forcibly select the only "good" value as the default.
def genome_alignment_iterator(fn, reference_species, index_friendly=False, verbose=False): kw_args = {"reference_species": reference_species} for e in maf.maf_iterator(fn, index_friendly=index_friendly, yield_class=GenomeAlignmentBlock, yield_kw_args=kw_args, verbose=verbose): yield e
build an iterator for an MAF file of genome alignment blocks. :param fn: filename or stream-like object to iterate over. :param reference_species: which species in the alignment should be treated as the reference? :param index_friendly: if True, buffering is disabled to support using the iterator to build an index. :return an iterator that yields GenomeAlignment objects
def match(self, method, path): segments = path.split('/') while len(segments): index = '/'.join(segments) if index in self.__idx__: handler, params = self.match_rule(method, path, self.__idx__[index]) if handler: return handler, params segments.pop() return None, None
find handler from registered rules Example: handler, params = match('GET', '/path')
def date_to_json(pydate, manager): if pydate is None: return None else: return dict( year=pydate.year, month=pydate.month - 1, date=pydate.day )
Serialize a Python date object. Attributes of this dictionary are to be passed to the JavaScript Date constructor.
def _get_by_id(collection, id): matches = [item for item in collection if item.id == id] if not matches: raise ValueError('Could not find a matching item') elif len(matches) > 1: raise ValueError('The id matched {0} items, not 1'.format(len(matches))) return matches[0]
Get item from a list by the id field
def get_service_details(self, service_id): service_query = \ self._soap_client.service['LDBServiceSoap']['GetServiceDetails'] try: soap_response = service_query(serviceID=service_id) except WebFault: raise WebServiceError return ServiceDetails(soap_response)
Get the details of an individual service and return a ServiceDetails instance. Positional arguments: service_id: A Darwin LDB service id
def update( self, jump ): atom = jump.initial_site.atom dr = jump.dr( self.cell_lengths ) jump.final_site.occupation = atom.number jump.final_site.atom = atom jump.final_site.is_occupied = True jump.initial_site.occupation = 0 jump.initial_site.atom = None jump.initial_site.is_occupied = False atom.site = jump.final_site atom.number_of_hops += 1 atom.dr += dr atom.summed_dr2 += np.dot( dr, dr )
Update the lattice state by accepting a specific jump Args: jump (Jump): The jump that has been accepted. Returns: None.
def read_sha1( file_path, buf_size = None, start_byte = 0, read_size = None, extra_hashers = [], ): read_size = read_size or os.stat(file_path).st_size buf_size = buf_size or DEFAULT_BUFFER_SIZE data_read = 0 total_sha1 = hashlib.sha1() while data_read < read_size: with open( file_path, 'rb', buffering = 0 ) as f: f.seek( start_byte ) data = f.read( min(buf_size, read_size - data_read) ) assert( len(data) > 0 ) total_sha1.update( data ) for hasher in extra_hashers: hasher.update( data ) data_read += len(data) start_byte += len(data) assert( data_read == read_size ) return total_sha1
Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory
def get_user_id(self, user): user_field = getattr(settings, 'SAML_IDP_DJANGO_USERNAME_FIELD', None) or \ getattr(user, 'USERNAME_FIELD', 'username') return str(getattr(user, user_field))
Get identifier for a user. Take the one defined in settings.SAML_IDP_DJANGO_USERNAME_FIELD first, if not set use the USERNAME_FIELD property which is set on the user Model. This defaults to the user.username field.
def render_impl(self, template, context, **options): ropts = dict((k, v) for k, v in options.items() if k != "safe") tmpl = anytemplate.engines.base.fallback_render(template, {}, **ropts) return self.renders_impl(tmpl, context, **options)
Inherited class must implement this! :param template: Template file path :param context: A dict or dict-like object to instantiate given template file :param options: Same options as :meth:`renders_impl` - at_paths: Template search paths (common option) - at_encoding: Template encoding (common option) - safe: Safely substitute parameters in templates, that is, original template content will be returned if some of template parameters are not found in given context :return: To be rendered string in inherited classes
def get_chunks(self,chunk_type): for nonter,this_type in self.label_for_nonter.items(): if this_type == chunk_type: subsumed = self.terms_subsumed_by_nonter.get(nonter) if subsumed is not None: yield sorted(list(subsumed))
Returns the chunks for a certain type @type chunk_type: string @param chunk_type: type of the chunk @rtype: list @return: the chunks for that type
def namespace_map(self, target): self._check_target(target) return target.namespace_map or self._default_namespace_map
Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary
def fix_header(filepath): with open(filepath, "r+") as f: current = f.read() fixed = "\n".join(line.strip() for line in current.split("\n")) if current == fixed: return f.seek(0) f.truncate() f.write(fixed)
Removes leading whitespace from a MacOS header file. This whitespace is causing issues with directives on some platforms.
def get_fact_cache(self, host): if self.config.fact_cache_type != 'jsonfile': raise Exception('Unsupported fact cache type. Only "jsonfile" is supported for reading and writing facts from ansible-runner') fact_cache = os.path.join(self.config.fact_cache, host) if os.path.exists(fact_cache): with open(fact_cache) as f: return json.loads(f.read()) return {}
Get the entire fact cache only if the fact_cache_type is 'jsonfile'
def max_pool(x_input, pool_size): return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], strides=[1, pool_size, pool_size, 1], padding='SAME')
max_pool downsamples a feature map by 2X.
def accept_moderator_invite(self, subreddit): data = {'r': six.text_type(subreddit)} self.user._mod_subs = None self.evict(self.config['my_mod_subreddits']) return self.request_json(self.config['accept_mod_invite'], data=data)
Accept a moderator invite to the given subreddit. Callable upon an instance of Subreddit with no arguments. :returns: The json response from the server.
def extender(path=None, cache=None): old_path = sys.path[:] extend(path, cache=None) try: yield finally: sys.path = old_path
A context that temporarily extends sys.path and reverts it after the context is complete.
def pretty_print_config_to_json(self, configs): descriptor = self.get_directory_list_doc(configs) return json.dumps(descriptor, sort_keys=True, indent=2, separators=(',', ': '))
JSON string description of a protorpc.remote.Service in a discovery doc. Args: configs: Either a single dict or a list of dicts containing the service configurations to list. Returns: string, The directory list document as a JSON string.
def urlread(url, encoding='utf8'): try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen response = urlopen(url) content = response.read() content = content.decode(encoding) return content
Read the content of an URL. Parameters ---------- url : str Returns ------- content : str
def get_cb_plot(cb, plot=None): plot = plot or cb.plot if isinstance(plot, GeoOverlayPlot): plots = [get_cb_plot(cb, p) for p in plot.subplots.values()] plots = [p for p in plots if any(s in cb.streams and getattr(s, '_triggering', False) for s in p.streams)] if plots: plot = plots[0] return plot
Finds the subplot with the corresponding stream.
def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub()
Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name.
def google_nest_count(self, style): nest_count = 0 if 'margin-left' in style: nest_count = int(style['margin-left'][:-2]) / self.google_list_indent return nest_count
calculate the nesting count of google doc lists
def removeRouterPrefix(self, prefixEntry): print '%s call removeRouterPrefix' % self.port print prefixEntry prefix = self.__convertIp6PrefixStringToIp6Address(str(prefixEntry)) try: prefixLen = 64 cmd = 'prefix remove %s/%d' % (prefix, prefixLen) print cmd if self.__sendCommand(cmd)[0] == 'Done': return self.__sendCommand('netdataregister')[0] == 'Done' else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger("removeRouterPrefix() Error: " + str(e))
remove the configured prefix on a border router Args: prefixEntry: a on-mesh prefix entry Returns: True: successful to remove the prefix entry from border router False: fail to remove the prefix entry from border router
def is_suburi(self, base, test): if base == test: return True if base[0] != test[0]: return False common = posixpath.commonprefix((base[1], test[1])) if len(common) == len(base[1]): return True return False
Check if test is below base in a URI tree Both args must be URIs in reduced form.
def clipPolygons(self, polygons): if not self.plane: return polygons[:] front = [] back = [] for poly in polygons: self.plane.splitPolygon(poly, front, back, front, back) if self.front: front = self.front.clipPolygons(front) if self.back: back = self.back.clipPolygons(back) else: back = [] front.extend(back) return front
Recursively remove all polygons in `polygons` that are inside this BSP tree.
def streaming_command(self, service, command='', timeout_ms=None): timeout = timeouts.PolledTimeout.from_millis(timeout_ms) stream = self.open_stream('%s:%s' % (service, command), timeout) if not stream: raise usb_exceptions.AdbStreamUnavailableError( '%s does not support service: %s', self, service) for data in stream.read_until_close(timeout): yield data
One complete set of packets for a single command. Helper function to call open_stream and yield the output. Sends service:command in a new connection, reading the data for the response. All the data is held in memory, large responses will be slow and can fill up memory. Args: service: The service on the device to talk to. command: The command to send to the service. timeout_ms: Timeout for the entire command, in milliseconds (or as a PolledTimeout object). Yields: The data contained in the responses from the service.
def get_code_language(self): js_source = self.get_js_source() if self.options.get("include_html", False): resources = get_sphinx_resources(include_bokehjs_api=True) html_source = BJS_HTML.render( css_files=resources.css_files, js_files=resources.js_files, bjs_script=js_source) return [html_source, "html"] else: return [js_source, "javascript"]
This is largely copied from bokeh.sphinxext.bokeh_plot.run
def _forward(self): try: self.current_token = next(self.tokens) except StopIteration: raise MissingTokensError("Unexpected end of token stream at %d." % self.current_pos) self.current_pos += 1
Advance to the next token. Internal methods, updates: - self.current_token - self.current_pos Raises: MissingTokensError: when trying to advance beyond the end of the token flow.
def get_args_and_values(parser, an_action): args = inspect.getargspec(an_action.__class__.__init__).args kwargs = dict( (an_attr, getattr(an_action, an_attr)) for an_attr in args if ( an_attr not in ('self', 'required') and getattr(an_action, an_attr) is not None ) ) action_name = find_action_name_by_value( parser._optionals._registries, an_action ) if 'required' in kwargs: del kwargs['required'] kwargs['action'] = action_name if 'option_strings' in kwargs: args = tuple(kwargs['option_strings']) del kwargs['option_strings'] else: args = () return args, kwargs
this rountine attempts to reconstruct the kwargs that were used in the creation of an action object
def get_aggregate (config): _urlqueue = urlqueue.UrlQueue(max_allowed_urls=config["maxnumurls"]) _robots_txt = robots_txt.RobotsTxt(config["useragent"]) plugin_manager = plugins.PluginManager(config) result_cache = results.ResultCache() return aggregator.Aggregate(config, _urlqueue, _robots_txt, plugin_manager, result_cache)
Get an aggregator instance with given configuration.
def get_current_main_assistant(self): current_page = self.notebook.get_nth_page(self.notebook.get_current_page()) return current_page.main_assistant
Function return current assistant
def unstructure_attrs_asdict(self, obj): attrs = obj.__class__.__attrs_attrs__ dispatch = self._unstructure_func.dispatch rv = self._dict_factory() for a in attrs: name = a.name v = getattr(obj, name) rv[name] = dispatch(v.__class__)(v) return rv
Our version of `attrs.asdict`, so we can call back to us.
def is_edge(obj, shape): if obj[0].start == 0: return True if obj[1].start == 0: return True if obj[0].stop == shape[0]: return True if obj[1].stop == shape[1]: return True return False
Check if a 2d object is on the edge of the array. Parameters ---------- obj : tuple(slice, slice) Pair of slices (e.g. from scipy.ndimage.measurements.find_objects) shape : tuple(int, int) Array shape. Returns ------- b : boolean True if the object touches any edge of the array, else False.
def rekey_multi(self, keys, nonce=None, recovery_key=False): result = None for key in keys: result = self.rekey( key=key, nonce=nonce, recovery_key=recovery_key, ) if result.get('complete'): break return result
Enter multiple recovery key shares to progress the rekey of the Vault. If the threshold number of recovery key shares is reached, Vault will complete the rekey. :param keys: Specifies multiple recovery share keys. :type keys: list :param nonce: Specifies the nonce of the rekey operation. :type nonce: str | unicode :param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path. :type recovery_key: bool :return: The last response of the rekey request. :rtype: response.Request
def change_password(self): form = self._get_form('SECURITY_CHANGE_PASSWORD_FORM') if form.validate_on_submit(): self.security_service.change_password( current_user._get_current_object(), form.new_password.data) self.after_this_request(self._commit) self.flash(_('flask_unchained.bundles.security:flash.password_change'), category='success') if request.is_json: return self.jsonify({'token': current_user.get_auth_token()}) return self.redirect('SECURITY_POST_CHANGE_REDIRECT_ENDPOINT', 'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT') elif form.errors and request.is_json: return self.errors(form.errors) return self.render('change_password', change_password_form=form, **self.security.run_ctx_processor('change_password'))
View function for a user to change their password. Supports html and json requests.
def return_port(port): if port in _random_ports: _random_ports.remove(port) elif port in _owned_ports: _owned_ports.remove(port) _free_ports.add(port) elif port in _free_ports: logging.info("Returning a port that was already returned: %s", port) else: logging.info("Returning a port that wasn't given by portpicker: %s", port)
Return a port that is no longer being used so it can be reused.
async def handle_client_new_job(self, client_addr, message: ClientNewJob): self._logger.info("Adding a new job %s %s to the queue", client_addr, message.job_id) self._waiting_jobs[(client_addr, message.job_id)] = message await self.update_queue()
Handle an ClientNewJob message. Add a job to the queue and triggers an update
def int2str(self, num): if int(num) != num: raise TypeError('number must be an integer') if num < 0: raise ValueError('number must be positive') radix, alphabet = self.radix, self.alphabet if radix in (8, 10, 16) and \ alphabet[:radix].lower() == BASE85[:radix].lower(): return ({8: '%o', 10: '%d', 16: '%x'}[radix] % num).upper() ret = '' while True: ret = alphabet[num % radix] + ret if num < radix: break num //= radix return ret
Converts an integer into a string. :param num: A numeric value to be converted to another base as a string. :rtype: string :raise TypeError: when *num* isn't an integer :raise ValueError: when *num* isn't positive
def mcast_sender(mcgroup=MC_GROUP): sock = socket(AF_INET, SOCK_DGRAM) sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) if _is_broadcast_group(mcgroup): group = '<broadcast>' sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1) elif((int(mcgroup.split(".")[0]) > 239) or (int(mcgroup.split(".")[0]) < 224)): raise IOError("Invalid multicast address.") else: group = mcgroup ttl = struct.pack('b', TTL_LOCALNET) sock.setsockopt(IPPROTO_IP, IP_MULTICAST_TTL, ttl) return sock, group
Non-object interface for sending multicast messages.
def to_raster(self, vector): return transform(vector.get_shape(vector.crs), vector.crs, self.crs, dst_affine=~self.affine)
Return the vector in pixel coordinates, as shapely.Geometry.
def list_replications(self): docs = self.database.all_docs(include_docs=True)['rows'] documents = [] for doc in docs: if doc['id'].startswith('_design/'): continue document = Document(self.database, doc['id']) document.update(doc['doc']) documents.append(document) return documents
Retrieves all replication documents from the replication database. :returns: List containing replication Document objects
def sort(self, *columns, **options): sorts = self.meta.setdefault('sort', []) for column in columns: if isinstance(column, Column): identifier = column.id elif isinstance(column, utils.basestring): descending = column.startswith('-') or options.get('descending', False) identifier = self.api.columns[column.lstrip('-')].id else: raise ValueError("Can only sort on columns or column strings. Received: {}".format(column)) if descending: sign = '-' else: sign = '' sorts.append(sign + identifier) self.raw['sort'] = ",".join(sorts) return self
Return a new query which will produce results sorted by one or more metrics or dimensions. You may use plain strings for the columns, or actual `Column`, `Metric` and `Dimension` objects. Add a minus in front of the metric (either the string or the object) to sort in descending order. ```python # sort using strings query.sort('pageviews', '-device type') # alternatively, ask for a descending sort in a keyword argument query.sort('pageviews', descending=True) # sort using metric, dimension or column objects pageviews = profile.core.metrics['pageviews'] query.sort(-pageviews) ```
def _update_frames(self, written): if self.seekable(): curr = self.tell() self._info.frames = self.seek(0, SEEK_END) self.seek(curr, SEEK_SET) else: self._info.frames += written
Update self.frames after writing.
def _write(self, session, openFile, replaceParamFile): hmetRecords = self.hmetRecords for record in hmetRecords: openFile.write('%s\t%s\t%s\t%s\t%.3f\t%s\t%s\t%s\t%s\t%.2f\t%.2f\n' % ( record.hmetDateTime.year, record.hmetDateTime.month, record.hmetDateTime.day, record.hmetDateTime.hour, record.barometricPress, record.relHumidity, record.totalSkyCover, record.windSpeed, record.dryBulbTemp, record.directRad, record.globalRad))
Write HMET WES to File Method
def remove_context(self, name): self._context(name) del self.contexts[name] self._flush_tools()
Remove a context from the suite. Args: name (str): Name of the context to remove.
def do_python_eval(self): annopath = os.path.join(self.data_path, 'Annotations', '{:s}.xml') imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt') cache_dir = os.path.join(self.cache_path, self.name) aps = [] use_07_metric = True if int(self.year) < 2010 else False print('VOC07 metric? ' + ('Y' if use_07_metric else 'No')) for cls_ind, cls in enumerate(self.classes): filename = self.get_result_file_template().format(cls) rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, cache_dir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] print('AP for {} = {:.4f}'.format(cls, ap)) print('Mean AP = {:.4f}'.format(np.mean(aps)))
python evaluation wrapper Returns: ---------- None
def DbDeleteClassAttributeProperty(self, argin): self._log.debug("In DbDeleteClassAttributeProperty()") if len(argin) < 3: self.warn_stream("DataBase::db_delete_class_attribute_property(): insufficient number of arguments ") th_exc(DB_IncorrectArguments, "insufficient number of arguments to delete class attribute property", "DataBase::DeleteClassAttributeProperty()") klass_name, attr_name = argin[:2] for prop_name in argin[2:]: self.db.delete_class_attribute_property(klass_name, attr_name, prop_name)
delete class attribute properties from database :param argin: Str[0] = Tango class name Str[1] = Attribute name Str[2] = Property name Str[n] = Property name :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid
def _extract_sel_info(sel): from cssselect2.parser import (CombinedSelector, CompoundSelector, PseudoClassSelector, FunctionalPseudoClassSelector) steps = [] extras = [] if isinstance(sel, CombinedSelector): lstep, lextras = _extract_sel_info(sel.left) rstep, rextras = _extract_sel_info(sel.right) steps = lstep + rstep extras = lextras + rextras elif isinstance(sel, CompoundSelector): for ssel in sel.simple_selectors: s, e = _extract_sel_info(ssel) steps.extend(s) extras.extend(e) elif isinstance(sel, FunctionalPseudoClassSelector): if sel.name == 'pass': steps.append(serialize(sel.arguments).strip('"\'')) elif isinstance(sel, PseudoClassSelector): if sel.name == 'deferred': extras.append('deferred') return (steps, extras)
Recurse down parsed tree, return pseudo class info
def atlasdb_get_zonefiles_missing_count_by_name(name, max_index=None, indexes_exclude=[], con=None, path=None): with AtlasDBOpen(con=con, path=path) as dbcon: sql = 'SELECT COUNT(*) FROM zonefiles WHERE name = ? AND present = 0 {} {};'.format( 'AND inv_index <= ?' if max_index is not None else '', 'AND inv_index NOT IN ({})'.format(','.join([str(int(i)) for i in indexes_exclude])) if len(indexes_exclude) > 0 else '' ) args = (name,) if max_index is not None: args += (max_index,) cur = dbcon.cursor() res = atlasdb_query_execute(cur, sql, args) for row in res: return row['COUNT(*)']
Get the number of missing zone files for a particular name, optionally up to a maximum zonefile index and optionally omitting particular zone files in the count. Returns an integer
def _process_value(self, value, type): if not isinstance(value, six.string_types + (list,)): value = json.dumps(value) return value
Process a value that will be sent to backend :param value: the value to return :param type: hint for what sort of value this is :type type: str
def addComponentToPathway(self, component_id, pathway_id): self.graph.addTriple(component_id, self.globaltt['involved in'], pathway_id) return
This can be used directly when the component is directly involved in the pathway. If a transforming event is performed on the component first, then the addGeneToPathway should be used instead. :param pathway_id: :param component_id: :return:
def addLoginMethod(self, localpart, domain, protocol=ANY_PROTOCOL, verified=False, internal=False): if self.store.parent is None: otherStore = self.avatars.open() peer = otherStore.findUnique(LoginAccount) else: otherStore = self.store.parent subStoreItem = self.store.parent.getItemByID(self.store.idInParent) peer = otherStore.findUnique(LoginAccount, LoginAccount.avatars == subStoreItem) for store, account in [(otherStore, peer), (self.store, self)]: store.findOrCreate(LoginMethod, account=account, localpart=localpart, domain=domain, protocol=protocol, verified=verified, internal=internal)
Add a login method to this account, propogating up or down as necessary to site store or user store to maintain consistency.
def get_threads(self, querystring, sort='newest_first', exclude_tags=None): assert sort in self._sort_orders q = self.query(querystring) q.set_sort(self._sort_orders[sort]) if exclude_tags: for tag in exclude_tags: q.exclude_tag(tag) return self.async_(q.search_threads, (lambda a: a.get_thread_id()))
asynchronously look up thread ids matching `querystring`. :param querystring: The query string to use for the lookup :type querystring: str. :param sort: Sort order. one of ['oldest_first', 'newest_first', 'message_id', 'unsorted'] :type query: str :param exclude_tags: Tags to exclude by default unless included in the search :type exclude_tags: list of str :returns: a pipe together with the process that asynchronously writes to it. :rtype: (:class:`multiprocessing.Pipe`, :class:`multiprocessing.Process`)
def particles_by_name(self, name): for particle in self.particles(): if particle.name == name: yield particle
Return all Particles of the Compound with a specific name Parameters ---------- name : str Only particles with this name are returned Yields ------ mb.Compound The next Particle in the Compound with the user-specified name
def timeit(func): @wraps(func) def timer_wrapper(*args, **kwargs): with Timer() as timer: result = func(*args, **kwargs) return result, timer return timer_wrapper
Returns the number of seconds that a function took along with the result
def _generate(self): needs_close = False if sys.hexversion >= 0x03000000: if self._opts.output == '-': from io import TextIOWrapper pyfile = TextIOWrapper(sys.stdout.buffer, encoding='utf8') else: pyfile = open(self._opts.output, 'wt', encoding='utf8') needs_close = True else: if self._opts.output == '-': pyfile = sys.stdout else: pyfile = open(self._opts.output, 'wt') needs_close = True import_from = self._opts.import_from if import_from: from_imports = True elif self._opts.from_imports: from_imports = True import_from = '.' else: from_imports = False compileUi(self._ui_file, pyfile, self._opts.execute, self._opts.indent, from_imports, self._opts.resource_suffix, import_from) if needs_close: pyfile.close()
Generate the Python code.
def _extract_image_urls(self): resultsPage = self._chromeDriver.page_source resultsPageSoup = BeautifulSoup(resultsPage, 'html.parser') images = resultsPageSoup.find_all('div', class_='rg_meta') images = [json.loads(image.contents[0]) for image in images] [self._imageURLs.append(image['ou']) for image in images] self._imageURLsExtractedCount += len(images)
Retrieves image URLs from the current page
def groupReadOnlyViews(self, person): grouped = {} for contactType in self.getContactTypes(): for contactItem in contactType.getContactItems(person): contactGroup = contactType.getContactGroup(contactItem) if contactGroup is not None: contactGroup = contactGroup.groupName if contactGroup not in grouped: grouped[contactGroup] = [] grouped[contactGroup].append( contactType.getReadOnlyView(contactItem)) return grouped
Collect all contact items from the available contact types for the given person, organize them by contact group, and turn them into read-only views. @type person: L{Person} @param person: The person whose contact items we're interested in. @return: A mapping of of L{ContactGroup} names to the read-only views of their member contact items, with C{None} being the key for groupless contact items. @rtype: C{dict} of C{str}
def GetParsersInformation(cls): parsers_information = [] for _, parser_class in cls.GetParsers(): description = getattr(parser_class, 'DESCRIPTION', '') parsers_information.append((parser_class.NAME, description)) return parsers_information
Retrieves the parsers information. Returns: list[tuple[str, str]]: parser names and descriptions.
def load(cls, path): assert os.path.exists(path), "No such file: %r" % path (folder, filename) = os.path.split(path) (name, extension) = os.path.splitext(filename) wave = Waveform(None) wave._path = path return wave
Load Waveform from file.
def in_same_dir(as_file, target_file): return os.path.abspath(os.path.join(os.path.dirname(as_file), target_file))
Return an absolute path to a target file that is located in the same directory as as_file Args: as_file: File name (including __file__) Use the directory path of this file target_file: Name of the target file
def send_scp(self, *args, **kwargs): x = kwargs.pop("x") y = kwargs.pop("y") p = kwargs.pop("p") return self._send_scp(x, y, p, *args, **kwargs)
Transmit an SCP Packet and return the response. This function is a thin wrapper around :py:meth:`rig.machine_control.scp_connection.SCPConnection.send_scp`. This function will attempt to use the SCP connection nearest the destination of the SCP command if multiple connections have been discovered using :py:meth:`.discover_connections`. Parameters ---------- x : int y : int p : int *args **kwargs
def make_local_dirs(dl_dir, dl_inputs, keep_subdirs): if not os.path.isdir(dl_dir): os.makedirs(dl_dir) print('Created local base download directory: %s' % dl_dir) if keep_subdirs: dl_dirs = set([os.path.join(dl_dir, d[1]) for d in dl_inputs]) for d in dl_dirs: if not os.path.isdir(d): os.makedirs(d) return
Make any required local directories to prepare for downloading
def publish(self, distribution, storage=""): try: return self._publishes[distribution] except KeyError: self._publishes[distribution] = Publish(self.client, distribution, timestamp=self.timestamp, storage=(storage or self.storage)) return self._publishes[distribution]
Get or create publish
def object(self, infotype, key): "Return the encoding, idletime, or refcount about the key" return self.execute_command('OBJECT', infotype, key, infotype=infotype)
Return the encoding, idletime, or refcount about the key
def get_emitter(self, name: str) -> Callable[[Event], Event]: return self._event_manager.get_emitter(name)
Gets and emitter for a named event. Parameters ---------- name : The name of the event he requested emitter will emit. Users may provide their own named events by requesting an emitter with this function, but should do so with caution as it makes time much more difficult to think about. Returns ------- An emitter for the named event. The emitter should be called by the requesting component at the appropriate point in the simulation lifecycle.
def redirect_output(fileobj): old = sys.stdout sys.stdout = fileobj try: yield fileobj finally: sys.stdout = old
Redirect standard out to file.
def read_kioslaverc (kde_config_dir): data = {} filename = os.path.join(kde_config_dir, "kioslaverc") with open(filename) as fd: for line in fd: line = line.rstrip() if line.startswith('['): in_proxy_settings = line.startswith("[Proxy Settings]") elif in_proxy_settings: if '=' not in line: continue key, value = line.split('=', 1) key = key.strip() value = value.strip() if not key: continue key = loc_ro.sub("", key).strip() if not key: continue add_kde_setting(key, value, data) resolve_kde_settings(data) return data
Read kioslaverc into data dictionary.