code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def patterns(self): all_patterns = [] for label, patterns in self.token_patterns.items(): for pattern in patterns: all_patterns.append({"label": label, "pattern": pattern}) for label, patterns in self.phrase_patterns.items(): for pattern in patterns: all_patterns.append({"label": label, "pattern": pattern.text}) return all_patterns
Get all patterns that were added to the entity ruler. RETURNS (list): The original patterns, one dictionary per pattern. DOCS: https://spacy.io/api/entityruler#patterns
def pad_release(release_to_pad, num_sections=4): parts = release_to_pad.split('.') if len(parts) > num_sections: raise ValueError("Too many sections encountered ({found} > {num} in release string {rel}".format( found=len(parts), num=num_sections, rel=release_to_pad )) pad_count = num_sections - len(parts) return ".".join(parts[:-1] + ['0'] * pad_count + parts[-1:])
Pad out package and kernel release versions so that ``LooseVersion`` comparisons will be correct. Release versions with less than num_sections will be padded in front of the last section with zeros. For example :: pad_release("390.el6", 4) will return ``390.0.0.el6`` and :: pad_release("390.11.el6", 4) will return ``390.11.0.el6``. If the number of sections of the release to be padded is greater than num_sections, a ``ValueError`` will be raised.
def low_frequency_cutoff_from_cli(opts): instruments = opts.instruments if opts.instruments is not None else [] return {ifo: opts.low_frequency_cutoff for ifo in instruments}
Parses the low frequency cutoff from the given options. Returns ------- dict Dictionary of instruments -> low frequency cutoff.
def has_documented_fields(self, include_inherited_fields=False): fields = self.all_fields if include_inherited_fields else self.fields for field in fields: if field.doc: return True return False
Returns whether at least one field is documented.
def _make_schema_patterns(self) -> None: self.schema_pattern = self._schema_pattern() for dc in self.data_children(): if isinstance(dc, InternalNode): dc._make_schema_patterns()
Build schema pattern for the receiver and its data descendants.
def read(self, length=-1): _complain_ifclosed(self.closed) if length < 0: length = self.size chunks = [] while 1: if length <= 0: break c = self.f.read(min(self.buff_size, length)) if c == b"": break chunks.append(c) length -= len(c) data = b"".join(chunks) if self.__encoding: return data.decode(self.__encoding, self.__errors) else: return data
Read ``length`` bytes from the file. If ``length`` is negative or omitted, read all data until EOF. :type length: int :param length: the number of bytes to read :rtype: string :return: the chunk of data read from the file
def set_idle_ttl(cls, pid, ttl): with cls._lock: cls._ensure_pool_exists(pid) cls._pools[pid].set_idle_ttl(ttl)
Set the idle TTL for a pool, after which it will be destroyed. :param str pid: The pool id :param int ttl: The TTL for an idle pool
def check_exports(mod, specs, renamings): functions = {renamings.get(k, k): v for k, v in specs.functions.items()} mod_functions = {node.name: node for node in mod.body if isinstance(node, ast.FunctionDef)} for fname, signatures in functions.items(): try: fnode = mod_functions[fname] except KeyError: raise PythranSyntaxError( "Invalid spec: exporting undefined function `{}`" .format(fname)) for signature in signatures: args_count = len(fnode.args.args) if len(signature) > args_count: raise PythranSyntaxError( "Too many arguments when exporting `{}`" .format(fname)) elif len(signature) < args_count - len(fnode.args.defaults): raise PythranSyntaxError( "Not enough arguments when exporting `{}`" .format(fname))
Does nothing but raising PythranSyntaxError if specs references an undefined global
def normalize_std_array(vector): length = 1 n_samples = len(vector) mean = numpy.ndarray((length,), 'float64') std = numpy.ndarray((length,), 'float64') mean.fill(0) std.fill(0) for array in vector: x = array.astype('float64') mean += x std += (x ** 2) mean /= n_samples std /= n_samples std -= (mean ** 2) std = std ** 0.5 arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64) for i in range (0, n_samples): arrayset[i,:] = (vector[i]-mean) / std return arrayset
Applies a unit mean and variance normalization to an arrayset
def fields(self, fields): if not isinstance(fields, list): raise InvalidUsage('fields must be of type `list`') self._sysparms['sysparm_fields'] = ",".join(fields)
Sets `sysparm_fields` after joining the given list of `fields` :param fields: List of fields to include in the response :raise: :InvalidUsage: if fields is of an unexpected type
def seek_to_beginning(self, *partitions): if not all([isinstance(p, TopicPartition) for p in partitions]): raise TypeError('partitions must be TopicPartition namedtuples') if not partitions: partitions = self._subscription.assigned_partitions() assert partitions, 'No partitions are currently assigned' else: for p in partitions: assert p in self._subscription.assigned_partitions(), 'Unassigned partition' for tp in partitions: log.debug("Seeking to beginning of partition %s", tp) self._subscription.need_offset_reset(tp, OffsetResetStrategy.EARLIEST)
Seek to the oldest available offset for partitions. Arguments: *partitions: Optionally provide specific TopicPartitions, otherwise default to all assigned partitions. Raises: AssertionError: If any partition is not currently assigned, or if no partitions are assigned.
def _to_raw_pwm(self, values): return [self._to_single_raw_pwm(values[i]) for i in range(len(self._pins))]
Convert uniform pwm values to raw, driver-specific values. :param values: The uniform pwm values (0.0-1.0). :return: Converted, driver-specific pwm values.
def modify_classes(): import copy from django.conf import settings from django.contrib.admin.sites import site from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule for app in settings.INSTALLED_APPS: mod = import_module(app) try: before_import_registry = copy.copy(site._registry) import_module('%s.class_modifiers' % app) except: site._registry = before_import_registry if module_has_submodule(mod, 'class_modifiers'): raise
Auto-discover INSTALLED_APPS class_modifiers.py modules and fail silently when not present. This forces an import on them to modify any classes they may want.
def overwrite_workspace_config(namespace, workspace, cnamespace, configname, body): headers = _fiss_agent_header({"Content-type": "application/json"}) uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace, workspace, cnamespace, configname) return __put(uri, headers=headers, json=body)
Add or overwrite method configuration in workspace. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name cnamespace (str): Configuration namespace configname (str): Configuration name body (json): new body (definition) of the method config Swagger: https://api.firecloud.org/#!/Method_Configurations/overwriteWorkspaceMethodConfig
def normalize_num_type(num_type): if isinstance(num_type, tf.DType): num_type = num_type.as_numpy_dtype.type if num_type in [np.float32, np.float64]: num_type = settings.float_type elif num_type in [np.int16, np.int32, np.int64]: num_type = settings.int_type else: raise ValueError('Unknown dtype "{0}" passed to normalizer.'.format(num_type)) return num_type
Work out what a sensible type for the array is. if the default type is float32, downcast 64bit float to float32. For ints, assume int32
def delete_tree(self, key): self.log.debug("Deleting tree: %r", key) names = [item["name"] for item in self.list_path(key, with_metadata=False, deep=True)] for name in names: self.delete_key(name)
Delete all keys under given root key. Basic implementation works by just listing all available keys and deleting them individually but storage providers can implement more efficient logic.
def cleanup_dead_jobs(): from .models import WooeyJob inspect = celery_app.control.inspect() active_tasks = {task['id'] for worker, tasks in six.iteritems(inspect.active()) for task in tasks} active_jobs = WooeyJob.objects.filter(status=WooeyJob.RUNNING) to_disable = set() for job in active_jobs: if job.celery_id not in active_tasks: to_disable.add(job.pk) WooeyJob.objects.filter(pk__in=to_disable).update(status=WooeyJob.FAILED)
This cleans up jobs that have been marked as ran, but are not queue'd in celery. It is meant to cleanup jobs that have been lost due to a server crash or some other reason a job is in limbo.
def install_python(python, runas=None): python = re.sub(r'^python-', '', python) env = None env_list = [] if __grains__['os'] in ('FreeBSD', 'NetBSD', 'OpenBSD'): env_list.append('MAKE=gmake') if __salt__['config.option']('pyenv.build_env'): env_list.append(__salt__['config.option']('pyenv.build_env')) if env_list: env = ' '.join(env_list) ret = {} ret = _pyenv_exec('install', python, env=env, runas=runas, ret=ret) if ret['retcode'] == 0: rehash(runas=runas) return ret['stderr'] else: uninstall_python(python, runas=runas) return False
Install a python implementation. python The version of python to install, should match one of the versions listed by pyenv.list CLI Example: .. code-block:: bash salt '*' pyenv.install_python 2.0.0-p0
def on_key_down(self, event): state = self.state if self.mouse_pos: latlon = self.coordinates(self.mouse_pos.x, self.mouse_pos.y) selected = self.selected_objects(self.mouse_pos) state.event_queue.put(SlipKeyEvent(latlon, event, selected)) c = event.GetUniChar() if c == ord('+') or (c == ord('=') and event.ShiftDown()): self.change_zoom(1.0/1.2) event.Skip() elif c == ord('-'): self.change_zoom(1.2) event.Skip() elif c == ord('G'): self.enter_position() event.Skip() elif c == ord('C'): self.clear_thumbnails() event.Skip()
handle keyboard input
def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True): if self.cancmap and (event.state == 'down'): self.restore_contrast(viewer, msg=msg) return True
An interactive way to restore the colormap contrast settings after a warp operation.
def _get_any_translated_model(self, meta=None): if meta is None: meta = self._parler_meta.root tr_model = meta.model local_cache = self._translations_cache[tr_model] if local_cache: check_languages = [self._current_language] + self.get_fallback_languages() try: for fallback_lang in check_languages: trans = local_cache.get(fallback_lang, None) if trans and not is_missing(trans): return trans return next(t for t in six.itervalues(local_cache) if not is_missing(t)) except StopIteration: pass try: prefetch = self._get_prefetched_translations(meta=meta) if prefetch is not None: translation = prefetch[0] else: translation = self._get_translated_queryset(meta=meta)[0] except IndexError: return None else: local_cache[translation.language_code] = translation _cache_translation(translation) return translation
Return any available translation. Returns None if there are no translations at all.
def parse_all_arguments(func): args = dict() if sys.version_info < (3, 0): func_args = inspect.getargspec(func) if func_args.defaults is not None: val = len(func_args.defaults) for i, itm in enumerate(func_args.args[-val:]): args[itm] = func_args.defaults[i] else: func_args = inspect.signature(func) for itm in list(func_args.parameters)[1:]: param = func_args.parameters[itm] if param.default is not param.empty: args[param.name] = param.default return args
determine all positional and named arguments as a dict
def _to_json(uniq): result_json = {} depth, ipix = utils.uniq2orderipix(uniq) min_depth = np.min(depth[0]) max_depth = np.max(depth[-1]) for d in range(min_depth, max_depth+1): pix_index = np.where(depth == d)[0] if pix_index.size: ipix_depth = ipix[pix_index] result_json[str(d)] = ipix_depth.tolist() return result_json
Serializes a MOC to the JSON format. Parameters ---------- uniq : `~numpy.ndarray` The array of HEALPix cells representing the MOC to serialize. Returns ------- result_json : {str : [int]} A dictionary of HEALPix cell lists indexed by their depth.
def permute_data(arrays, random_state=None): if any(len(a) != len(arrays[0]) for a in arrays): raise ValueError('All arrays must be the same length.') if not random_state: random_state = np.random order = random_state.permutation(len(arrays[0])) return [a[order] for a in arrays]
Permute multiple numpy arrays with the same order.
def is_readable(path): return os.access(os.path.abspath(path), os.R_OK)
Returns True if provided file or directory exists and can be read with the current user. Returns False otherwise.
def h5features_convert(self, infile): with h5py.File(infile, 'r') as f: groups = list(f.keys()) for group in groups: self._writer.write( Reader(infile, group).read(), self.groupname, append=True)
Convert a h5features file to the latest h5features version.
def inverse_mercator(xy): lon = (xy[0] / 20037508.34) * 180 lat = (xy[1] / 20037508.34) * 180 lat = 180 / math.pi * \ (2 * math.atan(math.exp(lat * math.pi / 180)) - math.pi / 2) return (lon, lat)
Given coordinates in spherical mercator, return a lon,lat tuple.
def _handle_io(self, args, file, result, passphrase=False, binary=False): p = self._open_subprocess(args, passphrase) if not binary: stdin = codecs.getwriter(self._encoding)(p.stdin) else: stdin = p.stdin if passphrase: _util._write_passphrase(stdin, passphrase, self._encoding) writer = _util._threaded_copy_data(file, stdin) self._collect_output(p, result, writer, stdin) return result
Handle a call to GPG - pass input data, collect output data.
def spawn_worker(params): setup_logging(params) log.info("Adding worker: idx=%s\tconcurrency=%s\tresults=%s", params.worker_index, params.concurrency, params.report) worker = Worker(params) worker.start() worker.join()
This method has to be module level function :type params: Params
def DbAddDevice(self, argin): self._log.debug("In DbAddDevice()") if len(argin) < 3: self.warn_stream("DataBase::AddDevice(): incorrect number of input arguments ") th_exc(DB_IncorrectArguments, "incorrect no. of input arguments, needs at least 3 (server,device,class)", "DataBase::AddDevice()") self.info_stream("DataBase::AddDevice(): insert %s server with device %s",argin[0],argin[1]) server_name, d_name, klass_name = argin[:3] if len(argin) > 3: alias = argin[3] else: alias = None ret, dev_name, dfm = check_device_name(d_name) if not ret: th_exc(DB_IncorrectDeviceName, "device name (" + d_name + ") syntax error (should be [tango:][//instance/]domain/family/member)", "DataBase::AddDevice()") self.db.add_device(server_name, (dev_name, dfm) , klass_name, alias=alias)
Add a Tango class device to a specific device server :param argin: Str[0] = Full device server process name Str[1] = Device name Str[2] = Tango class name :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid
def get_signature_params(func): if is_cython(func): attrs = [ "__code__", "__annotations__", "__defaults__", "__kwdefaults__" ] if all(hasattr(func, attr) for attr in attrs): original_func = func def func(): return for attr in attrs: setattr(func, attr, getattr(original_func, attr)) else: raise TypeError("{!r} is not a Python function we can process" .format(func)) return list(funcsigs.signature(func).parameters.items())
Get signature parameters Support Cython functions by grabbing relevant attributes from the Cython function and attaching to a no-op function. This is somewhat brittle, since funcsigs may change, but given that funcsigs is written to a PEP, we hope it is relatively stable. Future versions of Python may allow overloading the inspect 'isfunction' and 'ismethod' functions / create ABC for Python functions. Until then, it appears that Cython won't do anything about compatability with the inspect module. Args: func: The function whose signature should be checked. Raises: TypeError: A type error if the signature is not supported
def _set_categories(self): for column, _ in self._categories.items(): if column in self.columns: self[column] = self[column].astype('category')
Inplace conversion from categories.
def _pack(self): data = ByteBuffer() if not hasattr(self, '__fields__'): return data.array for field in self.__fields__: field.encode(self, data) return data.array
Pack the message and return an array.
def _load_table(self, metadata_path, data_path): metadata_dir = os.path.dirname(os.path.expanduser(metadata_path)) data_path = os.path.normpath(os.path.join(metadata_dir, data_path)) extension = data_path.split('.')[-1] if extension == 'csv': full_table = pd.read_csv(data_path, index_col=False) table = _subset_table(full_table, self.subset) self.meta, _ = _subset_meta(self.meta, self.subset) elif extension in ['db', 'sql']: table = self._get_db_table(data_path, extension) else: raise TypeError('Cannot process file of type %s' % extension) return table
Load data table, taking subset if needed Parameters ---------- metadata_path : str Path to metadata file data_path : str Path to data file, absolute or relative to metadata file Returns ------- dataframe Table for analysis
def _add_interaction(int_type, **kwargs): fig = kwargs.pop('figure', current_figure()) marks = kwargs.pop('marks', [_context['last_mark']]) for name, traitlet in int_type.class_traits().items(): dimension = traitlet.get_metadata('dimension') if dimension is not None: kwargs[name] = _get_context_scale(dimension) kwargs['marks'] = marks interaction = int_type(**kwargs) if fig.interaction is not None: fig.interaction.close() fig.interaction = interaction return interaction
Add the interaction for the specified type. If a figure is passed using the key-word argument `figure` it is used. Else the context figure is used. If a list of marks are passed using the key-word argument `marks` it is used. Else the latest mark that is passed is used as the only mark associated with the selector. Parameters ---------- int_type: type The type of interaction to be added.
def H_donor_count(mol): mol.require("Valence") return sum(1 for _, a in mol.atoms_iter() if a.H_donor)
Hydrogen bond donor count
def is_sparse_vector(x): return sp.issparse(x) and len(x.shape) == 2 and x.shape[0] == 1
x is a 2D sparse matrix with it's first shape equal to 1.
def get_default_config_file(rootdir=None): if rootdir is None: return DEFAULT_CONFIG_FILE for path in CONFIG_FILES: path = os.path.join(rootdir, path) if os.path.isfile(path) and os.access(path, os.R_OK): return path
Search for configuration file.
def ppo_opt_step(i, opt_state, ppo_opt_update, policy_net_apply, old_policy_params, value_net_apply, value_net_params, padded_observations, padded_actions, padded_rewards, reward_mask, gamma=0.99, lambda_=0.95, epsilon=0.1): new_policy_params = trax_opt.get_params(opt_state) g = grad( ppo_loss, argnums=1)( policy_net_apply, new_policy_params, old_policy_params, value_net_apply, value_net_params, padded_observations, padded_actions, padded_rewards, reward_mask, gamma=gamma, lambda_=lambda_, epsilon=epsilon) return ppo_opt_update(i, g, opt_state)
PPO optimizer step.
def from_dict(self, d): self.sitting = d['sitting'] self.id = d.get('id', None) return self
Set this person from dict :param d: Dictionary representing a person ('sitting'[, 'id']) :type d: dict :rtype: Person :raises KeyError: 'sitting' not set
def _ExportFileContent(self, aff4_object, result): if self.options.export_files_contents: try: result.content = aff4_object.Read(self.MAX_CONTENT_SIZE) result.content_sha256 = hashlib.sha256(result.content).hexdigest() except (IOError, AttributeError) as e: logging.warning("Can't read content of %s: %s", aff4_object.urn, e)
Add file content from aff4_object to result.
def cpu_frequency(self) -> str: return '{}GHz'.format( self.random.uniform( a=1.5, b=4.3, precision=1, ), )
Get a random frequency of CPU. :return: Frequency of CPU. :Example: 4.0 GHz.
def inspect(self, **kwargs): scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path) if scf_cycle is not None: if "title" not in kwargs: kwargs["title"] = str(self) return scf_cycle.plot(**kwargs)
Plot the Phonon SCF cycle results with matplotlib. Returns: `matplotlib` figure, None if some error occurred.
def debounce(self, wait, immediate=None): wait = (float(wait) / float(1000)) def debounced(*args, **kwargs): def call_it(): self.obj(*args, **kwargs) try: debounced.t.cancel() except(AttributeError): pass debounced.t = Timer(wait, call_it) debounced.t.start() return self._wrap(debounced)
Returns a function, that, as long as it continues to be invoked, will not be triggered. The function will be called after it stops being called for N milliseconds. If `immediate` is passed, trigger the function on the leading edge, instead of the trailing.
def get_urls_from_onetab(onetab): html = requests.get(onetab).text soup = BeautifulSoup(html, 'lxml') divs = soup.findAll('div', {'style': 'padding-left: 24px; ' 'padding-top: 8px; ' 'position: relative; ' 'font-size: 13px;'}) return [div.find('a').attrs['href'] for div in divs]
Get video urls from a link to the onetab shared page. Args: onetab (str): Link to a onetab shared page. Returns: list: List of links to the videos.
def decision(self, result, **values): data = self.__getDecision(result, **values) data = [data[value] for value in result] if len(data) == 1: return data[0] else: return data
The decision method with callback option. This method will find matching row, construct a dictionary and call callback with dictionary. Args: callback (function): Callback function will be called when decision will be finded. result (array of str): Array of header string **values (dict): What should finder look for, (headerString : value). Returns: Arrays of finded values strings Example: >>> table = DecisionTable(''' >>> header1 header2 >>> =============== >>> value1 value2 >>> ''') >>> >>> header1, header2 = table.decision( >>> ['header1','header2'], >>> header1='value1', >>> header2='value2' >>> ) >>> print(header1,header2) (value1 value2)
def dumps(self) -> str: return json.dumps(self.data, sort_keys=True, indent=4)
Dumps the json content as a string
def clear_local_registration(self): delete_registered_file() delete_unregistered_file() write_to_disk(constants.machine_id_file, delete=True) logger.debug('Re-register set, forcing registration.') logger.debug('New machine-id: %s', generate_machine_id(new=True))
Deletes dotfiles and machine-id for fresh registration
def callback(self): self._callback(*self._args, **self._kwargs) self._last_checked = time.time()
Run the callback
def model_resources(self): response = jsonify({ 'apiVersion': '0.1', 'swaggerVersion': '1.1', 'basePath': '%s%s' % (self.base_uri(), self.api.url_prefix), 'apis': self.get_model_resources() }) response.headers.add('Cache-Control', 'max-age=0') return response
Listing of all supported resources.
def print_out(self, value, indent=None, format_options=None, asap=False): if indent is None: indent = '> ' text = indent + str(value) if format_options is None: format_options = 'gray' if self._style_prints and format_options: if not isinstance(format_options, dict): format_options = {'color_fg': format_options} text = format_print_text(text, **format_options) command = 'iprint' if asap else 'print' self._set(command, text, multi=True) return self
Prints out the given value. :param value: :param str|unicode indent: :param dict|str|unicode format_options: text color :param bool asap: Print as soon as possible.
def shortlink_scanned(self, data): self.logger.info("Received shortlink_scanned event") data = json.loads(data) customer_token = str(data['object']['id']) response = self.mapiclient.create_payment_request( customer=customer_token, currency="NOK", amount="20.00", allow_credit=True, pos_id=self._pos_id, pos_tid=str(uuid.uuid4()), action='auth', expires_in=90, callback_uri="pusher:m-winterwarming-pos_callback_chan", text='Have some hot chocolate!') self._tid = response['id'] print(str(self._tid))
Called when a shortlink_scanned event is received
def traverse_data(obj, key_target): if isinstance(obj, str) and '.json' in str(obj): obj = json.load(open(obj, 'r')) if isinstance(obj, list): queue = obj.copy() elif isinstance(obj, dict): queue = [obj.copy()] else: sys.exit('obj needs to be a list or dict') count = 0 while not queue or count != 1000: count += 1 curr_obj = queue.pop() if isinstance(curr_obj, dict): for key, value in curr_obj.items(): if key == key_target: return curr_obj else: queue.append(curr_obj[key]) elif isinstance(curr_obj, list): for co in curr_obj: queue.append(co) if count == 1000: sys.exit('traverse_data needs to be updated...') return False
will traverse nested list and dicts until key_target equals the current dict key
def add_route(enode, route, via, shell=None): via = ip_address(via) version = '-4' if (via.version == 6) or \ (route != 'default' and ip_network(route).version == 6): version = '-6' cmd = 'ip {version} route add {route} via {via}'.format( version=version, route=route, via=via ) response = enode(cmd, shell=shell) assert not response
Add a new static route. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str route: Route to add, an IP in the form ``'192.168.20.20/24'`` or ``'2001::0/24'`` or ``'default'``. :param str via: Via for the route as an IP in the form ``'192.168.20.20/24'`` or ``'2001::0/24'``. :param shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. :type shell: str or None
def pad(text, length): text_length = wcswidth(text) if text_length < length: return text + ' ' * (length - text_length) return text
Pads text to given length, taking into account wide characters.
def update(self, id, name, incident_preference): data = { "policy": { "name": name, "incident_preference": incident_preference } } return self._put( url='{0}alerts_policies/{1}.json'.format(self.URL, id), headers=self.headers, data=data )
This API endpoint allows you to update an alert policy :type id: integer :param id: The id of the policy :type name: str :param name: The name of the policy :type incident_preference: str :param incident_preference: Can be PER_POLICY, PER_CONDITION or PER_CONDITION_AND_TARGET :rtype: dict :return: The JSON response of the API :: { "policy": { "created_at": "time", "id": "integer", "incident_preference": "string", "name": "string", "updated_at": "time" } }
def _format_lon(self, lon): if self.ppd in [4, 16, 64, 128]: return None else: return map(lambda x: "{0:0>3}".format(int(x)), self._map_center('long', lon))
Returned a formated longitude format for the file
def update_record_field(table, sys_id, field, value): client = _get_client() client.table = table response = client.update({field: value}, sys_id) return response
Update the value of a record's field in a servicenow table :param table: The table name, e.g. sys_user :type table: ``str`` :param sys_id: The unique ID of the record :type sys_id: ``str`` :param field: The new value :type field: ``str`` :param value: The new value :type value: ``str`` CLI Example: .. code-block:: bash salt myminion servicenow.update_record_field sys_user 2348234 first_name jimmy
def fix_file(input_file, encoding=None, *, fix_entities='auto', remove_terminal_escapes=True, fix_encoding=True, fix_latin_ligatures=True, fix_character_width=True, uncurl_quotes=True, fix_line_breaks=True, fix_surrogates=True, remove_control_chars=True, remove_bom=True, normalization='NFC'): entities = fix_entities for line in input_file: if isinstance(line, bytes): if encoding is None: line, encoding = guess_bytes(line) else: line = line.decode(encoding) if fix_entities == 'auto' and '<' in line and '>' in line: entities = False yield fix_text_segment( line, fix_entities=entities, remove_terminal_escapes=remove_terminal_escapes, fix_encoding=fix_encoding, fix_latin_ligatures=fix_latin_ligatures, fix_character_width=fix_character_width, uncurl_quotes=uncurl_quotes, fix_line_breaks=fix_line_breaks, fix_surrogates=fix_surrogates, remove_control_chars=remove_control_chars, remove_bom=remove_bom, normalization=normalization )
Fix text that is found in a file. If the file is being read as Unicode text, use that. If it's being read as bytes, then we hope an encoding was supplied. If not, unfortunately, we have to guess what encoding it is. We'll try a few common encodings, but we make no promises. See the `guess_bytes` function for how this is done. The output is a stream of fixed lines of text.
def declare_actor(self, actor): self.emit_before("declare_actor", actor) self.declare_queue(actor.queue_name) self.actors[actor.actor_name] = actor self.emit_after("declare_actor", actor)
Declare a new actor on this broker. Declaring an Actor twice replaces the first actor with the second by name. Parameters: actor(Actor): The actor being declared.
def check_slice_perms(self, slice_id): form_data, slc = get_form_data(slice_id, use_slice_data=True) datasource_type = slc.datasource.type datasource_id = slc.datasource.id viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method.
def diff(self, other=diff.Diffable.Index, paths=None, create_patch=False, **kwargs): if other is self.Index: return diff.DiffIndex() if isinstance(other, string_types): other = self.repo.rev_parse(other) if isinstance(other, Object): cur_val = kwargs.get('R', False) kwargs['R'] = not cur_val return other.diff(self.Index, paths, create_patch, **kwargs) if other is not None: raise ValueError("other must be None, Diffable.Index, a Tree or Commit, was %r" % other) return super(IndexFile, self).diff(other, paths, create_patch, **kwargs)
Diff this index against the working copy or a Tree or Commit object For a documentation of the parameters and return values, see Diffable.diff :note: Will only work with indices that represent the default git index as they have not been initialized with a stream.
def instruction_ASR_memory(self, opcode, ea, m): r = self.ASR(m) return ea, r & 0xff
Arithmetic shift memory right
def produce_csv_output(filehandle: TextIO, fields: Sequence[str], values: Iterable[str]) -> None: output_csv(filehandle, fields) for row in values: output_csv(filehandle, row)
Produce CSV output, without using ``csv.writer``, so the log can be used for lots of things. - ... eh? What was I talking about? - POOR; DEPRECATED. Args: filehandle: file to write to fields: field names values: values
async def on_raw_313(self, message): target, nickname = message.params[:2] info = { 'oper': True } if nickname in self._pending['whois']: self._whois_info[nickname].update(info)
WHOIS operator info.
def _cl_gof3r(file_info, region): command = ["gof3r", "get", "--no-md5", "-k", file_info.key, "-b", file_info.bucket] if region != "us-east-1": command += ["--endpoint=s3-%s.amazonaws.com" % region] return (command, "gof3r")
Command line required for download using gof3r.
def dragEnterEvent(self, event): if mimedata2url(event.mimeData()): event.accept() else: event.ignore()
Allow user to drag files
def explain_prediction(estimator, doc, **kwargs): return Explanation( estimator=repr(estimator), error="estimator %r is not supported" % estimator, )
Return an explanation of an estimator prediction. :func:`explain_prediction` is not doing any work itself, it dispatches to a concrete implementation based on estimator type. Parameters ---------- estimator : object Estimator instance. This argument must be positional. doc : object Example to run estimator on. Estimator makes a prediction for this example, and :func:`explain_prediction` tries to show information about this prediction. Pass a single element, not a one-element array: if you fitted your estimator on ``X``, that would be ``X[i]`` for most containers, and ``X.iloc[i]`` for ``pandas.DataFrame``. top : int or (int, int) tuple, optional Number of features to show. When ``top`` is int, ``top`` features with a highest absolute values are shown. When it is (pos, neg) tuple, no more than ``pos`` positive features and no more than ``neg`` negative features is shown. ``None`` value means no limit (default). This argument may be supported or not, depending on estimator type. top_targets : int, optional Number of targets to show. When ``top_targets`` is provided, only specified number of targets with highest scores are shown. Negative value means targets with lowest scores are shown. Must not be given with ``targets`` argument. ``None`` value means no limit: all targets are shown (default). This argument may be supported or not, depending on estimator type. target_names : list[str] or {'old_name': 'new_name'} dict, optional Names of targets or classes. This argument can be used to provide human-readable class/target names for estimators which don't expose clss names themselves. It can be also used to rename estimator-provided classes before displaying them. This argument may be supported or not, depending on estimator type. targets : list, optional Order of class/target names to show. This argument can be also used to show information only for a subset of classes. It should be a list of class / target names which match either names provided by an estimator or names defined in ``target_names`` parameter. Must not be given with ``top_targets`` argument. In case of binary classification you can use this argument to set the class which probability or score should be displayed, with an appropriate explanation. By default a result for predicted class is shown. For example, you can use ``targets=[True]`` to always show result for a positive class, even if the predicted label is False. This argument may be supported or not, depending on estimator type. feature_names : list, optional A list of feature names. It allows to specify feature names when they are not provided by an estimator object. This argument may be supported or not, depending on estimator type. feature_re : str, optional Only feature names which match ``feature_re`` regex are returned (more precisely, ``re.search(feature_re, x)`` is checked). feature_filter : Callable[[str, float], bool], optional Only feature names for which ``feature_filter`` function returns True are returned. It must accept feature name and feature value. Missing features always have a NaN value. **kwargs: dict Keyword arguments. All keyword arguments are passed to concrete explain_prediction... implementations. Returns ------- Explanation :class:`~.Explanation` result. Use one of the formatting functions from :mod:`eli5.formatters` to print it in a human-readable form. Explanation instances have repr which works well with IPython notebook, but it can be a better idea to use :func:`eli5.show_prediction` instead of :func:`eli5.explain_prediction` if you work with IPython: :func:`eli5.show_prediction` allows to customize formatting without a need to import :mod:`eli5.formatters` functions.
def _document_by_attribute(self, kind, condition=None): doc = self.document if doc: for attr in doc.attributes: if isinstance(attr, kind): if not condition or condition(attr): return doc return None
Helper method to return the document only if it has an attribute that's an instance of the given kind, and passes the condition.
def batch_write(self, tablename, return_capacity=None, return_item_collection_metrics=NONE): return_capacity = self._default_capacity(return_capacity) return BatchWriter(self, tablename, return_capacity=return_capacity, return_item_collection_metrics=return_item_collection_metrics)
Perform a batch write on a table Parameters ---------- tablename : str Name of the table to write to return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) return_item_collection_metrics : (NONE, SIZE), optional SIZE will return statistics about item collections that were modified. Examples -------- .. code-block:: python with connection.batch_write('mytable') as batch: batch.put({'id': 'id1', 'foo': 'bar'}) batch.delete({'id': 'oldid'})
def percolating_continua(target, phi_crit, tau, volume_fraction='pore.volume_fraction', bulk_property='pore.intrinsic_conductivity'): r sigma = target[bulk_property] phi = target[volume_fraction] diff_phi = _sp.clip(phi - phi_crit, a_min=0, a_max=_sp.inf) sigma_eff = sigma*(diff_phi)**tau return sigma_eff
r''' Calculates the effective property of a continua using percolation theory Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. volume_fraction : string The dictionary key in the Phase object containing the volume fraction of the conducting component bulk_property : string The dictionary key in the Phase object containing the intrinsic property of the conducting component phi_crit : float The volume fraction below which percolation does NOT occur tau : float The exponent of the percolation relationship Notes ----- This model uses the following standard percolation relationship: .. math:: \sigma_{effective}=\sigma_{bulk}(\phi - \phi_{critical})^\lambda
def variance_corrected_loss(loss, sigma_2=None): with tf.variable_scope("variance_corrected_loss"): sigma_cost = 0 if sigma_2 is None: sigma = tf.get_variable(name="sigma", dtype=tf.float32, initializer=tf.constant(1.0), trainable=True) sigma_2 = tf.pow(sigma, 2) tf.summary.scalar("sigma2", sigma_2) sigma_cost = tf.log(sigma_2 + 1.0) return 0.5 / sigma_2 * loss + sigma_cost
Create a variance corrected loss. When summing variance corrected losses you get the same as multiloss. This is especially usefull for keras where when having multiple losses they are summed by keras. This multi-loss implementation is inspired by the Paper "Multi-Task Learning Using Uncertainty to Weight Losses for Scene Geometry and Semantics" by Kendall, Gal and Cipolla. :param loss: The loss that should be variance corrected. :param sigma_2: Optional a variance (sigma squared) to use. If none is provided it is learned. :return: The variance corrected loss.
def url_for(**options): url_parts = get_url_parts(**options) image_hash = hashlib.md5(b(options['image_url'])).hexdigest() url_parts.append(image_hash) return "/".join(url_parts)
Returns the url for the specified options
def visit_SetComp(self, node: AST, dfltChaining: bool = True) -> str: return f"{{{self.visit(node.elt)} " \ f"{' '.join(self.visit(gen) for gen in node.generators)}}}"
Return `node`s representation as set comprehension.
def privmsg(self, text): if self.dcctype == 'chat': text += '\n' return self.send_bytes(self.encode(text))
Send text to DCC peer. The text will be padded with a newline if it's a DCC CHAT session.
def _ProduceContent(self, mods, showprivate=False, showinh=False): result = '' nestedresult = '' for mod in mods: try: all = mod[1].__all__ except AttributeError: raise RuntimeError('Module (%s) MUST have `__all__` defined.' % mod[1].__name__) if not showprivate and mod[0][0:1] == '_': continue if mod[0][0:2] == '__': continue result += self._ProduceSingleContent(mod, showprivate, showinh) return result
An internal helper to create pages for several modules that do not have nested modules. This will automatically generate the needed RSF to document each module module and save the module to its own page appropriately. Args: mods (module): The modules to document that do not contain nested modules showprivate (bool): A flag for whether or not to display private members Returns: str: The file names ready to be appended to a toctree
def from_yaml(cls, yaml_str=None, str_or_buffer=None): cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer) default_model_expr = cfg['default_config']['model_expression'] default_ytransform = cfg['default_config']['ytransform'] seg = cls( cfg['segmentation_col'], cfg['fit_filters'], cfg['predict_filters'], default_model_expr, YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'], cfg['name']) if "models" not in cfg: cfg["models"] = {} for name, m in cfg['models'].items(): m['model_expression'] = m.get( 'model_expression', default_model_expr) m['ytransform'] = m.get('ytransform', default_ytransform) m['fit_filters'] = None m['predict_filters'] = None reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None)) seg._group.add_model(reg) logger.debug( 'loaded segmented regression model {} from yaml'.format(seg.name)) return seg
Create a SegmentedRegressionModel instance from a saved YAML configuration. Arguments are mutally exclusive. Parameters ---------- yaml_str : str, optional A YAML string from which to load model. str_or_buffer : str or file like, optional File name or buffer from which to load YAML. Returns ------- SegmentedRegressionModel
def delete_ipv4_range(start_addr=None, end_addr=None, **api_opts): r = get_ipv4_range(start_addr, end_addr, **api_opts) if r: return delete_object(r['_ref'], **api_opts) else: return True
Delete ip range. CLI Example: .. code-block:: bash salt-call infoblox.delete_ipv4_range start_addr=123.123.122.12
def mon_status(conn, logger, hostname, args, silent=False): mon = 'mon.%s' % hostname try: out = mon_status_check(conn, logger, hostname, args) if not out: logger.warning('monitor: %s, might not be running yet' % mon) return False if not silent: logger.debug('*'*80) logger.debug('status for monitor: %s' % mon) for line in json.dumps(out, indent=2, sort_keys=True).split('\n'): logger.debug(line) logger.debug('*'*80) if out['rank'] >= 0: logger.info('monitor: %s is running' % mon) return True if out['rank'] == -1 and out['state']: logger.info('monitor: %s is currently at the state of %s' % (mon, out['state'])) return True logger.info('monitor: %s is not running' % mon) return False except RuntimeError: logger.info('monitor: %s is not running' % mon) return False
run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide not only the output, but be able to return a boolean status of what is going on. ``False`` represents a monitor that is not doing OK even if it is up and running, while ``True`` would mean the monitor is up and running correctly.
def rand_unicode(min_char=MIN_UNICHR, max_char=MAX_UNICHR, min_len=MIN_STRLEN, max_len=MAX_STRLEN, **kwargs): from syn.five import unichr return unicode(rand_str(min_char, max_char, min_len, max_len, unichr))
For values in the unicode range, regardless of Python version.
def set_group_member_orphan(self, member_id): self._put(self._service_url(['triggers', 'groups', 'members', member_id, 'orphan']), data=None, parse_json=False)
Make a non-orphan member trigger into an orphan. :param member_id: Member Trigger id to be made an orphan.
def convdicts(): pth = os.path.join(os.path.dirname(__file__), 'data', 'convdict.npz') npz = np.load(pth) cdd = {} for k in list(npz.keys()): cdd[k] = npz[k] return cdd
Access a set of example learned convolutional dictionaries. Returns ------- cdd : dict A dict associating description strings with dictionaries represented as ndarrays Examples -------- Print the dict keys to obtain the identifiers of the available dictionaries >>> from sporco import util >>> cd = util.convdicts() >>> print(cd.keys()) ['G:12x12x72', 'G:8x8x16,12x12x32,16x16x48', ...] Select a specific example dictionary using the corresponding identifier >>> D = cd['G:8x8x96']
def get_binds(self, app=None): app = self.get_app(app) binds = [None] + list(app.config.get('SQLALCHEMY_BINDS') or ()) retval = {} for bind in binds: engine = self.get_engine(app, bind) tables = self.get_tables_for_bind(bind) retval.update(dict((table, engine) for table in tables)) return retval
Returns a dictionary with a table->engine mapping. This is suitable for use of sessionmaker(binds=db.get_binds(app)).
def get_domain_info(self, domain): url = self.API_TEMPLATE + self.DOMAIN_INFO.format(domain=domain) return self._get_json_from_response(url)
Get the GoDaddy supplied information about a specific domain. :param domain: The domain to obtain info about. :type domain: str :return A JSON string representing the domain information
def clean_weights(self, cutoff=1e-4, rounding=5): if not isinstance(rounding, int) or rounding < 1: raise ValueError("rounding must be a positive integer") clean_weights = self.weights.copy() clean_weights[np.abs(clean_weights) < cutoff] = 0 if rounding is not None: clean_weights = np.round(clean_weights, rounding) return dict(zip(self.tickers, clean_weights))
Helper method to clean the raw weights, setting any weights whose absolute values are below the cutoff to zero, and rounding the rest. :param cutoff: the lower bound, defaults to 1e-4 :type cutoff: float, optional :param rounding: number of decimal places to round the weights, defaults to 5. Set to None if rounding is not desired. :type rounding: int, optional :return: asset weights :rtype: dict
def get_component_attribute_name(component): search = re.search(r"(?P<category>\w+)\.(?P<name>\w+)", component) if search: name = "{0}{1}{2}".format( search.group("category"), search.group("name")[0].upper(), search.group("name")[1:]) LOGGER.debug("> Component name: '{0}' to attribute name Active_QLabel: '{1}'.".format(component, name)) else: name = component return name
Gets given Component attribute name. Usage:: >>> Manager.get_component_attribute_name("factory.components_manager_ui") u'factoryComponentsManagerUi' :param component: Component to get the attribute name. :type component: unicode :return: Component attribute name. :rtype: object
def to_array(self, dim='variable', name=None): from .dataarray import DataArray data_vars = [self.variables[k] for k in self.data_vars] broadcast_vars = broadcast_variables(*data_vars) data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0) coords = dict(self.coords) coords[dim] = list(self.data_vars) dims = (dim,) + broadcast_vars[0].dims return DataArray(data, coords, dims, attrs=self.attrs, name=name)
Convert this dataset into an xarray.DataArray The data variables of this dataset will be broadcast against each other and stacked along the first axis of the new array. All coordinates of this dataset will remain coordinates. Parameters ---------- dim : str, optional Name of the new dimension. name : str, optional Name of the new data array. Returns ------- array : xarray.DataArray
def load_object(obj) -> object: if isinstance(obj, str): if ':' in obj: module_name, obj_name = obj.split(':') if not module_name: module_name = '.' else: module_name = obj obj = importlib.import_module(module_name) if obj_name: attrs = obj_name.split('.') for attr in attrs: obj = getattr(obj, attr) return obj
Load an object. Args: obj (str|object): Load the indicated object if this is a string; otherwise, return the object as is. To load a module, pass a dotted path like 'package.module'; to load an an object from a module pass a path like 'package.module:name'. Returns: object
def opts(): if __opts__.get('grain_opts', False) or \ (isinstance(__pillar__, dict) and __pillar__.get('grain_opts', False)): return __opts__ return {}
Return the minion configuration settings
def decompress(self, value): if value: return [value.get(field.name, None) for field in self.fields] return [field.field.initial for field in self.fields]
Retreieve each field value or provide the initial values
def loadnetcdf(filename, copy=True): filename = str(Path(filename).expanduser()) if copy: dataarray = xr.open_dataarray(filename).copy() else: dataarray = xr.open_dataarray(filename, chunks={}) if dataarray.name is None: dataarray.name = filename.rstrip('.nc') for key, val in dataarray.coords.items(): if val.dtype.kind == 'S': dataarray[key] = val.astype('U') elif val.dtype == np.int32: dataarray[key] = val.astype('i8') return dataarray
Load a dataarray from a NetCDF file. Args: filename (str): Filename (*.nc). copy (bool): If True, dataarray is copied in memory. Default is True. Returns: dataarray (xarray.DataArray): Loaded dataarray.
def values(self): return {n: getattr(self, n) for n in self._hparam_types.keys()}
Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values.
def teardown_request(self, fn): self._defer(lambda app: app.teardown_request(fn)) return fn
Register a function to be run at the end of each request, regardless of whether there was an exception or not. These functions are executed when the request context is popped, even if not an actual request was performed. Example:: ctx = app.test_request_context() ctx.push() ... ctx.pop() When ``ctx.pop()`` is executed in the above example, the teardown functions are called just before the request context moves from the stack of active contexts. This becomes relevant if you are using such constructs in tests. Generally teardown functions must take every necessary step to avoid that they will fail. If they do execute code that might fail they will have to surround the execution of these code by try/except statements and log occurring errors. When a teardown function was called because of an exception it will be passed an error object. The return values of teardown functions are ignored. .. admonition:: Debug Note In debug mode Flask will not tear down a request on an exception immediately. Instead it will keep it alive so that the interactive debugger can still access it. This behavior can be controlled by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
def new_inner_member(self, name=None, params=None): if name is None: name = 'Generated_checkmodulation_%s' % uuid.uuid4() if params is None: params = {} params['checkmodulation_name'] = name checkmodulation = CheckModulation(params) self.add_item(checkmodulation)
Create a CheckModulation object and add it to items :param name: CheckModulation name :type name: str :param params: parameters to init CheckModulation :type params: dict :return: None TODO: Remove this default mutable argument. Usually result in unexpected behavior
def get_user_activity(self, offset=None, limit=None): args = { 'offset': offset, 'limit': limit, } return self._api_call('GET', 'v1.2/history', args=args)
Get activity about the user's lifetime activity with Uber. Parameters offset (int) The integer offset for activity results. Default is 0. limit (int) Integer amount of results to return. Maximum is 50. Default is 5. Returns (Response) A Response object containing ride history.
def pushd(cls, new_dir): previous_dir = os.getcwd() try: new_ab_dir = None if os.path.isabs(new_dir): new_ab_dir = new_dir else: new_ab_dir = os.path.join(previous_dir, new_dir) cls.cd(new_ab_dir) yield finally: cls.cd(previous_dir)
Change directory, and back to previous directory. It behaves like "pushd directory; something; popd".
def search_tags(self, tags): qs = self.filter(tags__name__in=tags).order_by('file').distinct() return qs
Search assets by passing a list of one or more tags.
def _results_dir_path(self, key, stable): return os.path.join( self._results_dir_prefix, key.id, self._STABLE_DIR_NAME if stable else sha1(key.hash.encode('utf-8')).hexdigest()[:12] )
Return a results directory path for the given key. :param key: A CacheKey to generate an id for. :param stable: True to use a stable subdirectory, false to use a portion of the cache key to generate a path unique to the key.
def editor_multi_agent_example(): agent_definitions = [ AgentDefinition("uav0", agents.UavAgent, [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR]), AgentDefinition("uav1", agents.UavAgent, [Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR]) ] env = HolodeckEnvironment(agent_definitions, start_world=False) cmd0 = np.array([0, 0, -2, 10]) cmd1 = np.array([0, 0, 5, 10]) for i in range(10): env.reset() env.act("uav0", cmd0) env.act("uav1", cmd1) for _ in range(1000): states = env.tick() uav0_terminal = states["uav0"][Sensors.TERMINAL] uav1_reward = states["uav1"][Sensors.REWARD]
This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary.
def execute ( self, conn, dataset, dataset_access_type, transaction=False ): if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/Dataset/UpdateType. Expects db connection from upper layer.", self.logger.exception) binds = { "dataset" : dataset , "dataset_access_type" : dataset_access_type ,"myuser": dbsUtils().getCreateBy(), "mydate": dbsUtils().getTime() } result = self.dbi.processData(self.sql, binds, conn, transaction)
for a given file