code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _cursor_up(self, value): value = int(value) if value == 0: value = 1 self._cursor.clearSelection() self._cursor.movePosition(self._cursor.Up, self._cursor.MoveAnchor, value) self._last_cursor_pos = self._cursor.position()
Moves the cursor up by ``value``.
def npz_generator(npz_path): npz_data = np.load(npz_path) X = npz_data['X'] y = npz_data['Y'] n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'Y': y[i]}
Generate data from an npz file.
def settrace_forked(): from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder GlobalDebuggerHolder.global_dbg = None threading.current_thread().additional_info = None from _pydevd_frame_eval.pydevd_frame_eval_main import clear_thread_local_info host, port = dispatch() import pydevd_tra...
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
def parse_pattern(format_string, env, wrapper=lambda x, y: y): formatter = Formatter() fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None] prepared_env = {} for field in fields: for field_alt in (x.strip() for x in field.split('|')): if field_alt[0] in '\'"' a...
Parse the format_string and return prepared data according to the env. Pick each field found in the format_string from the env(ironment), apply the wrapper on each data and return a mapping between field-to-replace and values for each.
def requires_genesis(self): genesis_file = os.path.join(self._data_dir, 'genesis.batch') has_genesis_batches = Path(genesis_file).is_file() LOGGER.debug('genesis_batch_file: %s', genesis_file if has_genesis_batches else 'not found') chain_head = self._block_store.cha...
Determines if the system should be put in genesis mode Returns: bool: return whether or not a genesis block is required to be generated. Raises: InvalidGenesisStateError: raises this error if there is invalid combination of the following: genesis...
def execute(self, conn, acquisition_era_name,end_date, transaction = False): if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "dbs/dao/Oracle/AcquisitionEra/updateEndDate expects db connection from upper layer.", self.logger.exception) binds = { "acquisition_era_name" :acquisition_era_...
for a given block_id
def _ReadPaddingDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): if not is_member: error_message = 'data type only supported as member' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadDa...
Reads a padding data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data typ...
def to_tree(self): tree = TreeLibTree() for node in self: tree.create_node(node, node.node_id, parent=node.parent) return tree
returns a TreeLib tree
def _process_open(self): click.launch(self._format_issue_url()) if not click.confirm('Did it work?', default=True): click.echo() self._process_print() click.secho( '\nOpen the line manually and copy the text above\n', fg='yellow' ...
Open link in a browser.
def project_role(self, project, id): if isinstance(id, Number): id = "%s" % id return self._find_for_resource(Role, (project, id))
Get a role Resource. :param project: ID or key of the project to get the role from :param id: ID of the role to get
def readSTYLECHANGERECORD(self, states, fill_bits, line_bits, level = 1): return SWFShapeRecordStyleChange(self, states, fill_bits, line_bits, level)
Read a SWFShapeRecordStyleChange
def get_plot_dims(signal, ann_samp): "Figure out the number of plot channels" if signal is not None: if signal.ndim == 1: sig_len = len(signal) n_sig = 1 else: sig_len = signal.shape[0] n_sig = signal.shape[1] else: sig_len = 0 ...
Figure out the number of plot channels
def update(self): del self.ma.coefs del self.arma.ma_coefs del self.arma.ar_coefs if self.primary_parameters_complete: self.calc_secondary_parameters() else: for secpar in self._SECONDARY_PARAMETERS.values(): secpar.__delete__(self)
Delete the coefficients of the pure MA model and also all MA and AR coefficients of the ARMA model. Also calculate or delete the values of all secondary iuh parameters, depending on the completeness of the values of the primary parameters.
def add_transition(self, source: str, dest: str): self._transitions[source].append(dest)
Adds a transition from one state to another. Args: source (str): the name of the state from where the transition starts dest (str): the name of the state where the transition ends
def connect(self): "Connects to the Redis server if not already connected" if self._sock: return try: sock = self._connect() except socket.timeout: raise TimeoutError("Timeout connecting to server") except socket.error: e = sys.exc_...
Connects to the Redis server if not already connected
def train(input_dir, batch_size, max_steps, output_dir, checkpoint): from google.datalab.utils import LambdaJob if checkpoint is None: checkpoint = _util._DEFAULT_CHECKPOINT_GSURL labels = _util.get_labels(input_dir) model = _model.Model(labels, 0.5, checkpoint) task_data = {'type': 'master', ...
Train model locally.
def page_data(self, idx, offset): size = self.screen.page_size while offset < 0 and idx: offset += size idx -= 1 offset = max(0, offset) while offset >= size: offset -= size idx += 1 if idx == self.last_page: offset = 0 ...
Return character data for page of given index and offset. :param idx: page index. :type idx: int :param offset: scrolling region offset of current page. :type offset: int :returns: list of tuples in form of ``(ucs, name)`` :rtype: list[(unicode, unicode)]
def registrar_for_scope(cls, goal): type_name = '{}_{}'.format(cls.__name__, goal) if PY2: type_name = type_name.encode('utf-8') return type(type_name, (cls, ), {'options_scope': goal})
Returns a subclass of this registrar suitable for registering on the specified goal. Allows reuse of the same registrar for multiple goals, and also allows us to decouple task code from knowing which goal(s) the task is to be registered in.
def get_obj_in_upper_tree(element, cls): if not hasattr(element, '_parent'): raise ValueError('The top of the tree was reached without finding a {}' .format(cls)) parent = element._parent if not isinstance(parent, cls): return get_obj_in_upper_tree(parent, cls) r...
Return the first object in the parent tree of class `cls`.
def get_file(cls, path): depot_name, file_id = path.split('/', 1) depot = cls.get(depot_name) return depot.get(file_id)
Retrieves a file by storage name and fileid in the form of a path Path is expected to be ``storage_name/fileid``.
def set_objective(self, measured_metabolites): self.clean_objective() for k, v in measured_metabolites.items(): m = self.model.metabolites.get_by_id(k) total_stoichiometry = m.total_stoichiometry( self.without_transports) for r in m.producers(self.with...
Updates objective function for given measured metabolites. :param dict measured_metabolites: dict in which keys are metabolite names and values are float numbers represent fold changes in metabolites.
def _resolve_input(variable, variable_name, config_key, config): if variable is None: try: variable = config.get(PROFILE, config_key) except NoOptionError: raise ValueError(( 'no {} found - either provide a command line argument or ' 'set up a ...
Resolve input entered as option values with config values If option values are provided (passed in as `variable`), then they are returned unchanged. If `variable` is None, then we first look for a config value to use. If no config value is found, then raise an error. Parameters ---------- ...
def get_conf_attr(self, attr, default=None): if attr in self.conf: return self.conf[attr] else: return default
Get the value of a attribute in the configuration :param attr: The attribute :param default: If the attribute doesn't appear in the configuration return this value :return: The value of attribute in the configuration or the default value
def next(self): if self.iter == None: self.iter = iter(self.objs) try: return self.iter.next() except StopIteration: self.iter = None self.objs = [] if int(self.page) < int(self.total_pages): self.page += 1 ...
Special paging functionality
def labels(self): labelings = OrderedDict() for tree in self: for label, line in tree.to_labeled_lines(): labelings[line] = label return labelings
Construct a dictionary of string -> labels Returns: -------- OrderedDict<str, int> : string label pairs.
def sync(self): self.elk.send(cp_encode()) self.get_descriptions(TextDescriptions.SETTING.value)
Retrieve custom values from ElkM1
def load_layer_with_provider(layer_uri, provider, layer_name='tmp'): if provider in RASTER_DRIVERS: return QgsRasterLayer(layer_uri, layer_name, provider) elif provider in VECTOR_DRIVERS: return QgsVectorLayer(layer_uri, layer_name, provider) else: return None
Load a layer with a specific driver. :param layer_uri: Layer URI that will be used by QGIS to load the layer. :type layer_uri: basestring :param provider: Provider name to use. :type provider: basestring :param layer_name: Layer name to use. Default to 'tmp'. :type layer_name: basestring ...
def threadsafe_event_trigger(self, event_type): readfd, writefd = os.pipe() self.readers.append(readfd) def callback(**kwargs): self.queued_interrupting_events.append(event_type(**kwargs)) logger.warning('added event to events list %r', self.queued_interrupting_events) ...
Returns a callback to creates events, interrupting current event requests. Returned callback function will create an event of type event_type which will interrupt an event request if one is concurrently occuring, otherwise adding the event to a queue that will be checked on the next eve...
def hash(self): if self._hash is None: tohash = [self.path.name] tohash.append(hashfile(self.path, blocksize=65536, count=20)) self._hash = hashobj(tohash) return self._hash
Hash value based on file name and content
def classification_tikhonov(G, y, M, tau=0): r y[M == False] = 0 Y = _to_logits(y.astype(np.int)) return regression_tikhonov(G, Y, M, tau)
r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{...
def _ps(self, search=''): if not self.available: return result = [] ps = self.adb_streaming_shell('ps') try: for bad_line in ps: for line in bad_line.splitlines(): if search in line: result.append(line.st...
Perform a ps command with optional filtering. :param search: Check for this substring. :returns: List of matching fields
def get_nowait(self): new_get = Future() with self._lock: if not self._get.done(): raise QueueEmpty get, self._get = self._get, new_get hole = get.result() if not hole.done(): new_get.set_result(hole) raise QueueEmpty ...
Returns a value from the queue without waiting. Raises ``QueueEmpty`` if no values are available right now.
def put_website(Bucket, ErrorDocument=None, IndexDocument=None, RedirectAllRequestsTo=None, RoutingRules=None, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) WebsiteConfiguration = {} for key...
Given a valid config, update the website configuration for a bucket. Returns {updated: true} if website configuration was updated and returns {updated: False} if website configuration was not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_website my_bucket IndexD...
def spawn_reader_writer(get_data_fn, put_data_fn): def _reader_thread(): while True: out = get_data_fn() put_data_fn(out) if not out: break t = threading.Thread(target=_reader_thread) t.daemon = True t.start() return t
Spawn a thread that reads from a data source and writes to a sink. The thread will terminate if it receives a Falsey value from the source. Args: get_data_fn: Data-reading function. Called repeatedly until it returns False-y to indicate that the thread should terminate. put_data_fn...
def prepareToCalcEndOfPrdvP(self): aNrmNow = np.asarray(self.aXtraGrid) + self.BoroCnstNat ShkCount = self.TranShkValsNext.size aNrm_temp = np.tile(aNrmNow,(ShkCount,1)) aNrmCount = aNrmNow.shape[0] PermShkVals_temp = (np.tile(self.PermShkValsNext,(aNrmCount,1))...
Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. Parameters ---------- none ...
def build_interactions(self, data): interactions = _IncrementalCOOMatrix(self.interactions_shape(), np.int32) weights = _IncrementalCOOMatrix(self.interactions_shape(), np.float32) for datum in data: user_idx, item_idx, weight = self._unpack_datum(datum) interactions.appe...
Build an interaction matrix. Two matrices will be returned: a (num_users, num_items) COO matrix with interactions, and a (num_users, num_items) matrix with the corresponding interaction weights. Parameters ---------- data: iterable of (user_id, item_id) or (user_id, it...
def button_count(self): if self._buttons_count is None: if isinstance(self.reply_markup, ( types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)): self._buttons_count = sum( len(row.buttons) for row in self.reply_markup.rows) else: ...
Returns the total button count.
def get_beacon(): "Get a beacon socket" s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM) s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True) s.setsockopt(_socket.SOL_SOCKET, _socket.SO_BROADCAST, True) return s
Get a beacon socket
def field2write_only(self, field, **kwargs): attributes = {} if field.load_only and self.openapi_version.major >= 3: attributes["writeOnly"] = True return attributes
Return the dictionary of OpenAPI field attributes for a load_only field. :param Field field: A marshmallow field. :rtype: dict
def name(self, value): if not isinstance(value, six.string_types): raise ValueError("Pass a string") self._properties["id"] = value
Update name of the change set. :type value: str :param value: New name for the changeset.
def _check_device_number(self, devices): if len(devices) < 2 or len(devices) > 4: msg = 'The number of devices to cluster is not supported.' raise ClusterNotSupported(msg)
Check if number of devices is between 2 and 4 :param kwargs: dict -- keyword args in dict
def main(): config.parse_args() if cfg.CONF.kafka_metrics.enabled: prepare_processes(cfg.CONF.kafka_metrics, cfg.CONF.repositories.metrics_driver) if cfg.CONF.kafka_alarm_history.enabled: prepare_processes(cfg.CONF.kafka_alarm_history, cfg....
Start persister.
def delete(self, key): obj = self._get_content() obj.pop(key, None) self.write_data(self.path, obj)
Removes the specified key from the database.
def get_upload_token(mail, pwd): try: params = urllib.urlencode({"email": mail, "password": pwd}) response = urllib.urlopen(LOGIN_URL, params) except: return None resp = json.loads(response.read()) if not resp or 'token' not in resp: return None return resp['token']
Get upload token
def __check_config_key(self, key): try: section, option = key.split('.') except (AttributeError, ValueError): return False if not section or not option: return False return section in Config.CONFIG_OPTIONS and\ option in Config.CONFIG_OPTIO...
Check whether the key is valid. A valid key has the schema <section>.<option>. Keys supported are listed in CONFIG_OPTIONS dict. :param key: <section>.<option> key
def _init_usrgos(self, goids): usrgos = set() goids_missing = set() _go2obj = self.gosubdag.go2obj for goid in goids: if goid in _go2obj: usrgos.add(goid) else: goids_missing.add(goid) if goids_missing: print("MI...
Return user GO IDs which have GO Terms.
def upgrade_db(conn, pkg_name='openquake.server.db.schema.upgrades', skip_versions=()): upgrader = UpgradeManager.instance(conn, pkg_name) t0 = time.time() try: versions_applied = upgrader.upgrade(conn, skip_versions) except: conn.rollback() raise else: ...
Upgrade a database by running several scripts in a single transaction. :param conn: a DB API 2 connection :param str pkg_name: the name of the package with the upgrade scripts :param list skip_versions: the versions to skip :returns: the version numbers of the new scripts applied the database
def _bind(self): credentials = pika.PlainCredentials(self.user, self.password) params = pika.ConnectionParameters(credentials=credentials, host=self.server, virtual_host=self.vhost, ...
Create socket and bind
def mock_cmd(self, release, *cmd, **kwargs): fmt = '{mock_cmd}' if kwargs.get('new_chroot') is True: fmt +=' --new-chroot' fmt += ' --configdir={mock_dir}' return self.call(fmt.format(**release).split() + list(cmd))
Run a mock command in the chroot for a given release
def parse_human_datetime(s): if not s: return None try: dttm = parse(s) except Exception: try: cal = parsedatetime.Calendar() parsed_dttm, parsed_flags = cal.parseDT(s) if parsed_flags & 2 == 0: parsed_dttm = parsed_dttm.replace(hou...
Returns ``datetime.datetime`` from human readable strings >>> from datetime import date, timedelta >>> from dateutil.relativedelta import relativedelta >>> parse_human_datetime('2015-04-03') datetime.datetime(2015, 4, 3, 0, 0) >>> parse_human_datetime('2/3/1969') datetime.datetime(1969, 2, 3, 0...
def compare_dict(da, db): sa = set(da.items()) sb = set(db.items()) diff = sa & sb return dict(sa - diff), dict(sb - diff)
Compare differencs from two dicts
def dateindex(self, col: str): df = self._dateindex(col) if df is None: self.err("Can not create date index") return self.df = df self.ok("Added a datetime index from column", col)
Set a datetime index from a column :param col: column name where to index the date from :type col: str :example: ``ds.dateindex("mycol")``
def add_reshape(self, name, input_name, output_name, target_shape, mode): spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params =...
Add a reshape layer. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. target_shape: tuple Shape of the output blob. The product of target_shape must be equal to the shape of the input blob. ...
def make_choices_tuple(choices, get_display_name): assert callable(get_display_name) return tuple((x, get_display_name(x)) for x in choices)
Make a tuple for the choices parameter for a data model field. :param choices: sequence of valid values for the model field :param get_display_name: callable that returns the human-readable name for a choice :return: A tuple of 2-tuples (choice, display_name) suitable for the choices parameter
def _execute_install_command(cmd, parse_output, errors, parsed_packages): out = __salt__['cmd.run_all']( cmd, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0: if out['stderr']: errors.append(out['stderr']) else: errors.appe...
Executes a command for the install operation. If the command fails, its error output will be appended to the errors list. If the command succeeds and parse_output is true, updated packages will be appended to the parsed_packages dictionary.
def cli(obj, show_userinfo): client = obj['client'] userinfo = client.userinfo() if show_userinfo: for k, v in userinfo.items(): if isinstance(v, list): v = ', '.join(v) click.echo('{:20}: {}'.format(k, v)) else: click.echo(userinfo['preferred_user...
Display logged in user or full userinfo.
def add_system_classpath(): if 'CLASSPATH' in os.environ: parts = os.environ['CLASSPATH'].split(os.pathsep) for part in parts: javabridge.JARS.append(part)
Adds the system's classpath to the JVM's classpath.
def has_translation(self, language_code=None, related_name=None): if language_code is None: language_code = self._current_language if language_code is None: raise ValueError(get_null_language_error()) meta = self._parler_meta._get_extension_by_related_name(related...
Return whether a translation for the given language exists. Defaults to the current language code. .. versionadded 1.2 Added the ``related_name`` parameter.
def get_context_data(self, **kwargs): context = super(BaseAuthorDetail, self).get_context_data(**kwargs) context['author'] = self.author return context
Add the current author in context.
def route_method(method_name, extra_part=False): def wrapper(callable_obj): if method_name.lower() not in DEFAULT_ROUTES: raise HandlerHTTPMethodError( 'Invalid http method in method: {}'.format(method_name) ) callable_obj.http_method = method_name.upper() ...
Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: ...
def find_ips_by_equip(self, id_equip): url = 'ip/getbyequip/' + str(id_equip) + "/" code, xml = self.submit(None, 'GET', url) return self.response(code, xml, ['ipv4', 'ipv6'])
Get Ips related to equipment by its identifier :param id_equip: Equipment identifier. Integer value and greater than zero. :return: Dictionary with the following structure: { ips: { ipv4:[ id: <id_ip4>, oct1: <oct1>, oct2: <oct2>, oct3: <oct...
def money(s, thousand_sep=".", decimal_sep=","): s = s.replace(thousand_sep, "") s = s.replace(decimal_sep, ".") return Decimal(s)
Converts money amount in string to a Decimal object. With the default arguments, the format is expected to be ``-38.500,00``, where dots separate thousands and comma the decimals. Args: thousand_sep: Separator for thousands. decimal_sep: Separator for decimals. ...
def _package_transform(package, fqdn, start=1, *args, **kwargs): node = _obj_getattr(package, fqdn, start) if node is not None and hasattr(node, "__call__"): return node(*args, **kwargs) else: return args
Applies the specified package transform with `fqdn` to the package. Args: package: imported package object. fqdn (str): fully-qualified domain name of function in the package. If it does not include the package name, then set `start=0`. start (int): in the '.'-split list of identi...
def encode(precision, with_z): logger = logging.getLogger('geobuf') stdin = click.get_text_stream('stdin') sink = click.get_binary_stream('stdout') try: data = json.load(stdin) pbf = geobuf.encode( data, precision if precision >= 0 else 6, 3 if with_z ...
Given GeoJSON on stdin, writes a geobuf file to stdout.
def interpolate_psd(psd_f, psd_amp, deltaF): new_psd_f = [] new_psd_amp = [] fcurr = psd_f[0] for i in range(len(psd_f) - 1): f_low = psd_f[i] f_high = psd_f[i+1] amp_low = psd_amp[i] amp_high = psd_amp[i+1] while(1): if fcurr > f_high: ...
Function to interpolate a PSD to a different value of deltaF. Uses linear interpolation. Parameters ---------- psd_f : numpy.array or list or similar List of the frequencies contained within the PSD. psd_amp : numpy.array or list or similar List of the PSD values at the frequencies ...
def depth(args): p = OptionParser(depth.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) readsbed, featsbed = args fp = open(featsbed) nargs = len(fp.readline().split("\t")) keepcols = ",".join(str(x) for x in range(1, nargs...
%prog depth reads.bed features.bed Calculate depth depth per feature using coverageBed.
def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None): variable_type = entities.Variable.Type.STRING return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
Returns value for a certain string variable attached to a feature. Args: feature_key: Key of the feature whose variable's value is being accessed. variable_key: Key of the variable whose value is to be accessed. user_id: ID for user. attributes: Dict representing user attributes. Retur...
def write_can_msg(self, channel, can_msg): c_can_msg = (CanMsg * len(can_msg))(*can_msg) c_count = DWORD(len(can_msg)) UcanWriteCanMsgEx(self._handle, channel, c_can_msg, c_count) return c_count
Transmits one ore more CAN messages through the specified CAN channel of the device. :param int channel: CAN channel, which is to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param list(CanMsg) can_msg: List of CAN message structure (see structure :class:`CanMsg`)....
def _set_state(self, new_state, diag=None): old_state = self._session_state LOG.info("[BFD][%s][STATE] State changed from %s to %s.", hex(self._local_discr), bfd.BFD_STATE_NAME[old_state], bfd.BFD_STATE_NAME[new_state]) self._session_state = new...
Set the state of the BFD session.
def mpub(self, topic, *messages): return self.send(constants.MPUB + ' ' + topic, messages)
Publish multiple messages to a topic
def multi_to_dict(multi): return dict( (key, value[0] if len(value) == 1 else value) for key, value in multi.to_dict(False).items() )
Transform a Werkzeug multidictionnary into a flat dictionnary
def post_versions_list(self, updater_name=None, updater_id=None, post_id=None, start_id=None): params = { 'search[updater_name]': updater_name, 'search[updater_id]': updater_id, 'search[post_id]': post_id, 'search[start_id]': start_id ...
Get list of post versions. Parameters: updater_name (str): updater_id (int): post_id (int): start_id (int):
def get_all_events(self): self.all_events = {} events = self.tree.execute("$.events.frames") if events is None: return for e in events: event_type = e.get('type') frame_id = e.get('frame_id') try: self.all_events[event_type]...
Gather all event IDs in the REACH output by type. These IDs are stored in the self.all_events dict.
def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None): rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8)) tf.RegisterGradient(rnd_name)(grad) g = tf.get_default_graph() with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}): re...
Custom py_func with gradient support
def _scale_back_response(bqm, response, scalar, ignored_interactions, ignored_variables, ignore_offset): if len(ignored_interactions) + len( ignored_variables) + ignore_offset == 0: response.record.energy = np.divide(response.record.energy, scalar) else: resp...
Helper function to scale back the response of sample method
def count(tex): soup = TexSoup(tex) labels = set(label.string for label in soup.find_all('label')) return dict((label, soup.find_all('\ref{%s}' % label)) for label in labels)
Extract all labels, then count the number of times each is referenced in the provided file. Does not follow \includes.
def nn_x(self, x, k=1, radius=np.inf, eps=0.0, p=2): assert len(x) == self.dim_x k_x = min(k, self.size) return self._nn(DATA_X, x, k=k_x, radius=radius, eps=eps, p=p)
Find the k nearest neighbors of x in the observed input data @see Databag.nn() for argument description @return distance and indexes of found nearest neighbors.
def exists_using_casper(self, filename): casper_results = casper.Casper(self.connection["jss"]) distribution_servers = casper_results.find("distributionservers") all_packages = [] for distribution_server in distribution_servers: packages = set() for package in dis...
Check for the existence of a package file. Unlike other DistributionPoint types, JDS and CDP types have no documented interface for checking whether the server and its children have a complete copy of a file. The best we can do is check for an object using the API /packages URL--JSS.Pac...
def progressbar(length, label): return click.progressbar(length=length, label=label, show_pos=True)
Creates a progressbar Parameters ---------- length int Length of the ProgressBar label str Label to give to the progressbar Returns ------- click.progressbar Progressbar
def score(self, phone_number, account_lifecycle_event, **params): return self.post(SCORE_RESOURCE.format(phone_number=phone_number), account_lifecycle_event=account_lifecycle_event, **params)
Score is an API that delivers reputation scoring based on phone number intelligence, traffic patterns, machine learning, and a global data consortium. See https://developer.telesign.com/docs/score-api for detailed API documentation.
def backfill_previous_messages(self, reverse=False, limit=10): res = self.client.api.get_room_messages(self.room_id, self.prev_batch, direction="b", limit=limit) events = res["chunk"] if not reverse: events = reversed(events) fo...
Backfill handling of previous messages. Args: reverse (bool): When false messages will be backfilled in their original order (old to new), otherwise the order will be reversed (new to old). limit (int): Number of messages to go back.
def add_link(app, pagename, templatename, context, doctree): context['show_slidelink'] = ( app.config.slide_link_html_to_slides and hasattr(app.builder, 'get_outfilename') ) if context['show_slidelink']: context['slide_path'] = slide_path(app.builder, pagename)
Add the slides link to the HTML context.
def next(self): if self._current_index < len(self._collection): value = self._collection[self._current_index] self._current_index += 1 return value elif self._next_cursor: self.__fetch_next() return self.next() else: self._c...
Returns the next item in the cursor.
def delete_existing_cname(env, zone_id, dns_name): client = boto3.Session(profile_name=env).client('route53') startrecord = None newrecord_name = dns_name startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME') if startrecord: LOG.info("Deletin...
Delete an existing CNAME record. This is used when updating to multi-region for deleting old records. The record can not just be upserted since it changes types. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. dns_name (str): FQDN of application's dns entry...
def all_pairs_normalized_distances_reference(X): n_samples, n_cols = X.shape D = np.ones((n_samples, n_samples), dtype="float32") * np.inf for i in range(n_samples): diffs = X - X[i, :].reshape((1, n_cols)) missing_diffs = np.isnan(diffs) missing_counts_per_row = missing_diffs.sum(ax...
Reference implementation of normalized all-pairs distance, used for testing the more efficient implementation above for equivalence.
def _weighted_formula(form, weight_func): for e, mf in form.items(): if e == Atom.H: continue yield e, mf, weight_func(e)
Yield weight of each formula element.
def _get_table_names(statement): parts = statement.to_unicode().split() tables = set() for i, token in enumerate(parts): if token.lower() == 'from' or token.lower().endswith('join'): tables.add(parts[i + 1].rstrip(';')) return list(tables)
Returns table names found in the query. NOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well. Args: statement (sqlparse.sql.Statement): parsed by sqlparse sql statement. Returns: list of str
def has_metadata(self, name): return self.has_implementation(name) or \ name in self.non_returning or \ name in self.prototypes
Check if a function has either an implementation or any metadata associated with it :param name: The name of the function as a string :return: A bool indicating if anything is known about the function
def resolve_remote(self, uri): try: return super(LocalRefResolver, self).resolve_remote(uri) except ValueError: return super(LocalRefResolver, self).resolve_remote( 'file://' + get_schema_path(uri.rsplit('.json', 1)[0]) )
Resolve a uri or relative path to a schema.
def transformer_clean(): hparams = transformer_base_v2() hparams.label_smoothing = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.max_length = 0 return hparams
No dropout, label smoothing, max_length.
def set_db_attribute(self, table, record, column, value, key=None): if key is not None: column = '%s:%s' % (column, key) command = ovs_vsctl.VSCtlCommand( 'set', (table, record, '%s=%s' % (column, value))) self.run_command([command])
Sets 'value' into 'column' in 'record' in 'table'. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl set TBL REC COL[:KEY]=VALUE
def histogram_day_counts( df, variable ): if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]: log.error("index is not datetime") return False counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:]) counts.plot(kind = "bar", width...
Create a week-long histogram of counts of the variable for each day. It is assumed that the DataFrame index is datetime and that the variable `weekday_name` exists.
def filter_time_nearest(self, time, regex=None): return min(self._get_datasets_with_times(regex), key=lambda i: abs((i[0] - time).total_seconds()))[-1]
Filter keys for an item closest to the desired time. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. The collection of `datetime`s is compared to `start` and the value that has a `datetime` closest to that requested is returned.If none of the keys in the...
def load_py(stream, filepath=None): with add_sys_paths(config.package_definition_build_python_paths): return _load_py(stream, filepath=filepath)
Load python-formatted data from a stream. Args: stream (file-like object). Returns: dict.
def _quote(str, LegalChars=_LegalChars): r if all(c in LegalChars for c in str): return str else: return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
r"""Quote a string for use in a cookie header. If the string does not need to be double-quoted, then just return the string. Otherwise, surround the string in doublequotes and quote (with a \) special characters.
async def is_pairwise_exists(wallet_handle: int, their_did: str) -> bool: logger = logging.getLogger(__name__) logger.debug("is_pairwise_exists: >>> wallet_handle: %r, their_did: %r", wallet_handle, their_did) if not hasattr(is_pairwise_exists, ...
Check if pairwise is exists. :param wallet_handle: wallet handler (created by open_wallet). :param their_did: encoded Did. :return: true - if pairwise is exists, false - otherwise
def executions(self): if self._executions is None: self._executions = ExecutionList(self._version, flow_sid=self._solution['sid'], ) return self._executions
Access the executions :returns: twilio.rest.studio.v1.flow.execution.ExecutionList :rtype: twilio.rest.studio.v1.flow.execution.ExecutionList
def pending(): upgrader = InvenioUpgrader() logger = upgrader.get_logger() try: upgrades = upgrader.get_upgrades() if not upgrades: logger.info("All upgrades have been applied.") return logger.info("Following upgrade(s) are ready to be applied:") for u...
Command for showing upgrades ready to be applied.
def get_utt_regions(self): regions = [] current_offset = 0 for utt_idx in sorted(self.utt_ids): offset = current_offset num_frames = [] refs = [] for cnt in self.containers: num_frames.append(cnt.get(utt_idx).shape[0]) ...
Return the regions of all utterances, assuming all utterances are concatenated. It is assumed that the utterances are sorted in ascending order for concatenation. A region is defined by offset (in chunks), length (num-chunks) and a list of references to the utterance datasets in the containers....
def tempput(local_path=None, remote_path=None, use_sudo=False, mirror_local_mode=False, mode=None): import warnings warnings.simplefilter('ignore', RuntimeWarning) if remote_path is None: remote_path = os.tempnam() put(local_path, remote_path, use_sudo, mirror_local_mode, mode) y...
Put a file to remote and remove it afterwards