code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _cursor_up(self, value): value = int(value) if value == 0: value = 1 self._cursor.clearSelection() self._cursor.movePosition(self._cursor.Up, self._cursor.MoveAnchor, value) self._last_cursor_pos = self._cursor.position()
Moves the cursor up by ``value``.
def npz_generator(npz_path): npz_data = np.load(npz_path) X = npz_data['X'] y = npz_data['Y'] n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'Y': y[i]}
Generate data from an npz file.
def settrace_forked(): from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder GlobalDebuggerHolder.global_dbg = None threading.current_thread().additional_info = None from _pydevd_frame_eval.pydevd_frame_eval_main import clear_thread_local_info host, port = dispatch() import pydevd_tracing pydevd_tracing.restore_sys_set_trace_func() if port is not None: global connected connected = False global forked forked = True custom_frames_container_init() if clear_thread_local_info is not None: clear_thread_local_info() settrace( host, port=port, suspend=False, trace_only_current_thread=False, overwrite_prev_trace=True, patch_multiprocessing=True, )
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
def parse_pattern(format_string, env, wrapper=lambda x, y: y): formatter = Formatter() fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None] prepared_env = {} for field in fields: for field_alt in (x.strip() for x in field.split('|')): if field_alt[0] in '\'"' and field_alt[-1] in '\'"': field_values = field_alt[1:-1] else: field_values = env.get(field_alt) if field_values is not None: break else: field_values = [] if not isinstance(field_values, list): field_values = [field_values] prepared_env[field] = wrapper(field_alt, field_values) return prepared_env
Parse the format_string and return prepared data according to the env. Pick each field found in the format_string from the env(ironment), apply the wrapper on each data and return a mapping between field-to-replace and values for each.
def requires_genesis(self): genesis_file = os.path.join(self._data_dir, 'genesis.batch') has_genesis_batches = Path(genesis_file).is_file() LOGGER.debug('genesis_batch_file: %s', genesis_file if has_genesis_batches else 'not found') chain_head = self._block_store.chain_head has_chain_head = chain_head is not None if has_chain_head: LOGGER.debug('chain_head: %s', chain_head) block_chain_id = self._chain_id_manager.get_block_chain_id() is_genesis_node = block_chain_id is None LOGGER.debug( 'block_chain_id: %s', block_chain_id if not is_genesis_node else 'not yet specified') if has_genesis_batches and has_chain_head: raise InvalidGenesisStateError( 'Cannot have a genesis_batch_file and an existing chain') if has_genesis_batches and not is_genesis_node: raise InvalidGenesisStateError( 'Cannot have a genesis_batch_file and join an existing network' ) if not has_genesis_batches and not has_chain_head: LOGGER.info('No chain head and not the genesis node: ' 'starting in peering mode') return has_genesis_batches and not has_chain_head and is_genesis_node
Determines if the system should be put in genesis mode Returns: bool: return whether or not a genesis block is required to be generated. Raises: InvalidGenesisStateError: raises this error if there is invalid combination of the following: genesis.batch, existing chain head, and block chain id.
def execute(self, conn, acquisition_era_name,end_date, transaction = False): if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "dbs/dao/Oracle/AcquisitionEra/updateEndDate expects db connection from upper layer.", self.logger.exception) binds = { "acquisition_era_name" :acquisition_era_name , "end_date" : end_date } result = self.dbi.processData(self.sql, binds, conn, transaction)
for a given block_id
def _ReadPaddingDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): if not is_member: error_message = 'data type only supported as member' raise errors.DefinitionReaderError(definition_name, error_message) definition_object = self._ReadDataTypeDefinition( definitions_registry, definition_values, data_types.PaddingDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING) alignment_size = definition_values.get('alignment_size', None) if not alignment_size: error_message = 'missing alignment_size' raise errors.DefinitionReaderError(definition_name, error_message) try: int(alignment_size) except ValueError: error_message = 'unuspported alignment size attribute: {0!s}'.format( alignment_size) raise errors.DefinitionReaderError(definition_name, error_message) if alignment_size not in (2, 4, 8, 16): error_message = 'unuspported alignment size value: {0!s}'.format( alignment_size) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.alignment_size = alignment_size return definition_object
Reads a padding data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: PaddingtDefinition: padding definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
def to_tree(self): tree = TreeLibTree() for node in self: tree.create_node(node, node.node_id, parent=node.parent) return tree
returns a TreeLib tree
def _process_open(self): click.launch(self._format_issue_url()) if not click.confirm('Did it work?', default=True): click.echo() self._process_print() click.secho( '\nOpen the line manually and copy the text above\n', fg='yellow' ) click.secho( ' ' + self.REPO_URL + self.ISSUE_SUFFIX + '\n', bold=True )
Open link in a browser.
def project_role(self, project, id): if isinstance(id, Number): id = "%s" % id return self._find_for_resource(Role, (project, id))
Get a role Resource. :param project: ID or key of the project to get the role from :param id: ID of the role to get
def readSTYLECHANGERECORD(self, states, fill_bits, line_bits, level = 1): return SWFShapeRecordStyleChange(self, states, fill_bits, line_bits, level)
Read a SWFShapeRecordStyleChange
def get_plot_dims(signal, ann_samp): "Figure out the number of plot channels" if signal is not None: if signal.ndim == 1: sig_len = len(signal) n_sig = 1 else: sig_len = signal.shape[0] n_sig = signal.shape[1] else: sig_len = 0 n_sig = 0 if ann_samp is not None: n_annot = len(ann_samp) else: n_annot = 0 return sig_len, n_sig, n_annot, max(n_sig, n_annot)
Figure out the number of plot channels
def update(self): del self.ma.coefs del self.arma.ma_coefs del self.arma.ar_coefs if self.primary_parameters_complete: self.calc_secondary_parameters() else: for secpar in self._SECONDARY_PARAMETERS.values(): secpar.__delete__(self)
Delete the coefficients of the pure MA model and also all MA and AR coefficients of the ARMA model. Also calculate or delete the values of all secondary iuh parameters, depending on the completeness of the values of the primary parameters.
def add_transition(self, source: str, dest: str): self._transitions[source].append(dest)
Adds a transition from one state to another. Args: source (str): the name of the state from where the transition starts dest (str): the name of the state where the transition ends
def connect(self): "Connects to the Redis server if not already connected" if self._sock: return try: sock = self._connect() except socket.timeout: raise TimeoutError("Timeout connecting to server") except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock self._selector = DefaultSelector(sock) try: self.on_connect() except RedisError: self.disconnect() raise for callback in self._connect_callbacks: callback(self)
Connects to the Redis server if not already connected
def train(input_dir, batch_size, max_steps, output_dir, checkpoint): from google.datalab.utils import LambdaJob if checkpoint is None: checkpoint = _util._DEFAULT_CHECKPOINT_GSURL labels = _util.get_labels(input_dir) model = _model.Model(labels, 0.5, checkpoint) task_data = {'type': 'master', 'index': 0} task = type('TaskSpec', (object,), task_data) job = LambdaJob(lambda: _trainer.Trainer(input_dir, batch_size, max_steps, output_dir, model, None, task).run_training(), 'training') return job
Train model locally.
def page_data(self, idx, offset): size = self.screen.page_size while offset < 0 and idx: offset += size idx -= 1 offset = max(0, offset) while offset >= size: offset -= size idx += 1 if idx == self.last_page: offset = 0 idx = min(max(0, idx), self.last_page) start = (idx * self.screen.page_size) + offset end = start + self.screen.page_size return (idx, offset), self._page_data[start:end]
Return character data for page of given index and offset. :param idx: page index. :type idx: int :param offset: scrolling region offset of current page. :type offset: int :returns: list of tuples in form of ``(ucs, name)`` :rtype: list[(unicode, unicode)]
def registrar_for_scope(cls, goal): type_name = '{}_{}'.format(cls.__name__, goal) if PY2: type_name = type_name.encode('utf-8') return type(type_name, (cls, ), {'options_scope': goal})
Returns a subclass of this registrar suitable for registering on the specified goal. Allows reuse of the same registrar for multiple goals, and also allows us to decouple task code from knowing which goal(s) the task is to be registered in.
def get_obj_in_upper_tree(element, cls): if not hasattr(element, '_parent'): raise ValueError('The top of the tree was reached without finding a {}' .format(cls)) parent = element._parent if not isinstance(parent, cls): return get_obj_in_upper_tree(parent, cls) return parent
Return the first object in the parent tree of class `cls`.
def get_file(cls, path): depot_name, file_id = path.split('/', 1) depot = cls.get(depot_name) return depot.get(file_id)
Retrieves a file by storage name and fileid in the form of a path Path is expected to be ``storage_name/fileid``.
def set_objective(self, measured_metabolites): self.clean_objective() for k, v in measured_metabolites.items(): m = self.model.metabolites.get_by_id(k) total_stoichiometry = m.total_stoichiometry( self.without_transports) for r in m.producers(self.without_transports): update_rate = v * r.metabolites[m] / total_stoichiometry r.objective_coefficient += update_rate
Updates objective function for given measured metabolites. :param dict measured_metabolites: dict in which keys are metabolite names and values are float numbers represent fold changes in metabolites.
def _resolve_input(variable, variable_name, config_key, config): if variable is None: try: variable = config.get(PROFILE, config_key) except NoOptionError: raise ValueError(( 'no {} found - either provide a command line argument or ' 'set up a default by running `apparate configure`' ).format(variable_name)) return variable
Resolve input entered as option values with config values If option values are provided (passed in as `variable`), then they are returned unchanged. If `variable` is None, then we first look for a config value to use. If no config value is found, then raise an error. Parameters ---------- variable: string or numeric value passed in as input by the user variable_name: string name of the variable, for clarity in the error message config_key: string key in the config whose value could be used to fill in the variable config: ConfigParser contains keys/values in .apparatecfg
def get_conf_attr(self, attr, default=None): if attr in self.conf: return self.conf[attr] else: return default
Get the value of a attribute in the configuration :param attr: The attribute :param default: If the attribute doesn't appear in the configuration return this value :return: The value of attribute in the configuration or the default value
def next(self): if self.iter == None: self.iter = iter(self.objs) try: return self.iter.next() except StopIteration: self.iter = None self.objs = [] if int(self.page) < int(self.total_pages): self.page += 1 self._connection.get_response(self.action, self.params, self.page, self) return self.next() else: raise
Special paging functionality
def labels(self): labelings = OrderedDict() for tree in self: for label, line in tree.to_labeled_lines(): labelings[line] = label return labelings
Construct a dictionary of string -> labels Returns: -------- OrderedDict<str, int> : string label pairs.
def sync(self): self.elk.send(cp_encode()) self.get_descriptions(TextDescriptions.SETTING.value)
Retrieve custom values from ElkM1
def load_layer_with_provider(layer_uri, provider, layer_name='tmp'): if provider in RASTER_DRIVERS: return QgsRasterLayer(layer_uri, layer_name, provider) elif provider in VECTOR_DRIVERS: return QgsVectorLayer(layer_uri, layer_name, provider) else: return None
Load a layer with a specific driver. :param layer_uri: Layer URI that will be used by QGIS to load the layer. :type layer_uri: basestring :param provider: Provider name to use. :type provider: basestring :param layer_name: Layer name to use. Default to 'tmp'. :type layer_name: basestring :return: The layer or None if it's failed. :rtype: QgsMapLayer
def threadsafe_event_trigger(self, event_type): readfd, writefd = os.pipe() self.readers.append(readfd) def callback(**kwargs): self.queued_interrupting_events.append(event_type(**kwargs)) logger.warning('added event to events list %r', self.queued_interrupting_events) os.write(writefd, b'interrupting event!') return callback
Returns a callback to creates events, interrupting current event requests. Returned callback function will create an event of type event_type which will interrupt an event request if one is concurrently occuring, otherwise adding the event to a queue that will be checked on the next event request.
def hash(self): if self._hash is None: tohash = [self.path.name] tohash.append(hashfile(self.path, blocksize=65536, count=20)) self._hash = hashobj(tohash) return self._hash
Hash value based on file name and content
def classification_tikhonov(G, y, M, tau=0): r y[M == False] = 0 Y = _to_logits(y.astype(np.int)) return regression_tikhonov(G, Y, M, tau)
r"""Solve a classification problem on graph via Tikhonov minimization. The function first transforms :math:`y` in logits :math:`Y`, then solves .. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X) if :math:`\tau > 0`, and .. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X otherwise, where :math:`X` and :math:`Y` are logits. The function returns the maximum of the logits. Parameters ---------- G : :class:`pygsp.graphs.Graph` y : array, length G.n_vertices Measurements. M : array of boolean, length G.n_vertices Masking vector. tau : float Regularization parameter. Returns ------- logits : array, length G.n_vertices The logits :math:`X`. Examples -------- >>> from pygsp import graphs, learning >>> import matplotlib.pyplot as plt >>> >>> G = graphs.Logo() Create a ground truth signal: >>> signal = np.zeros(G.n_vertices) >>> signal[G.info['idx_s']] = 1 >>> signal[G.info['idx_p']] = 2 Construct a measurement signal from a binary mask: >>> rs = np.random.RandomState(42) >>> mask = rs.uniform(0, 1, G.n_vertices) > 0.5 >>> measures = signal.copy() >>> measures[~mask] = np.nan Solve the classification problem by reconstructing the signal: >>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0) Plot the results. Note that we recover the class with ``np.argmax(recovery, axis=1)``. >>> prediction = np.argmax(recovery, axis=1) >>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6)) >>> _ = G.plot_signal(signal, ax=ax[0, 0], title='Ground truth') >>> _ = G.plot_signal(measures, ax=ax[0, 1], title='Measurements') >>> _ = G.plot_signal(prediction, ax=ax[0, 2], title='Recovered class') >>> _ = G.plot_signal(recovery[:, 0], ax=ax[1, 0], title='Logit 0') >>> _ = G.plot_signal(recovery[:, 1], ax=ax[1, 1], title='Logit 1') >>> _ = G.plot_signal(recovery[:, 2], ax=ax[1, 2], title='Logit 2') >>> _ = fig.tight_layout()
def _ps(self, search=''): if not self.available: return result = [] ps = self.adb_streaming_shell('ps') try: for bad_line in ps: for line in bad_line.splitlines(): if search in line: result.append(line.strip().rsplit(' ', 1)[-1]) return result except InvalidChecksumError as e: print(e) self.connect() raise IOError
Perform a ps command with optional filtering. :param search: Check for this substring. :returns: List of matching fields
def get_nowait(self): new_get = Future() with self._lock: if not self._get.done(): raise QueueEmpty get, self._get = self._get, new_get hole = get.result() if not hole.done(): new_get.set_result(hole) raise QueueEmpty node = hole.result() value = node.value new_hole, node.next = node.next, None new_get.set_result(new_hole) return value
Returns a value from the queue without waiting. Raises ``QueueEmpty`` if no values are available right now.
def put_website(Bucket, ErrorDocument=None, IndexDocument=None, RedirectAllRequestsTo=None, RoutingRules=None, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) WebsiteConfiguration = {} for key in ('ErrorDocument', 'IndexDocument', 'RedirectAllRequestsTo', 'RoutingRules'): val = locals()[key] if val is not None: if isinstance(val, six.string_types): WebsiteConfiguration[key] = salt.utils.json.loads(val) else: WebsiteConfiguration[key] = val conn.put_bucket_website(Bucket=Bucket, WebsiteConfiguration=WebsiteConfiguration) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
Given a valid config, update the website configuration for a bucket. Returns {updated: true} if website configuration was updated and returns {updated: False} if website configuration was not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_website my_bucket IndexDocument='{"Suffix":"index.html"}'
def spawn_reader_writer(get_data_fn, put_data_fn): def _reader_thread(): while True: out = get_data_fn() put_data_fn(out) if not out: break t = threading.Thread(target=_reader_thread) t.daemon = True t.start() return t
Spawn a thread that reads from a data source and writes to a sink. The thread will terminate if it receives a Falsey value from the source. Args: get_data_fn: Data-reading function. Called repeatedly until it returns False-y to indicate that the thread should terminate. put_data_fn: Data-writing function. Returns: threading.Thread
def prepareToCalcEndOfPrdvP(self): aNrmNow = np.asarray(self.aXtraGrid) + self.BoroCnstNat ShkCount = self.TranShkValsNext.size aNrm_temp = np.tile(aNrmNow,(ShkCount,1)) aNrmCount = aNrmNow.shape[0] PermShkVals_temp = (np.tile(self.PermShkValsNext,(aNrmCount,1))).transpose() TranShkVals_temp = (np.tile(self.TranShkValsNext,(aNrmCount,1))).transpose() ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aNrmCount,1))).transpose() mNrmNext = self.Rfree/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp self.PermShkVals_temp = PermShkVals_temp self.ShkPrbs_temp = ShkPrbs_temp self.mNrmNext = mNrmNext self.aNrmNow = aNrmNow return aNrmNow
Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. Parameters ---------- none Returns ------- aNrmNow : np.array A 1D array of end-of-period assets; also stored as attribute of self.
def build_interactions(self, data): interactions = _IncrementalCOOMatrix(self.interactions_shape(), np.int32) weights = _IncrementalCOOMatrix(self.interactions_shape(), np.float32) for datum in data: user_idx, item_idx, weight = self._unpack_datum(datum) interactions.append(user_idx, item_idx, 1) weights.append(user_idx, item_idx, weight) return (interactions.tocoo(), weights.tocoo())
Build an interaction matrix. Two matrices will be returned: a (num_users, num_items) COO matrix with interactions, and a (num_users, num_items) matrix with the corresponding interaction weights. Parameters ---------- data: iterable of (user_id, item_id) or (user_id, item_id, weight) An iterable of interactions. The user and item ids will be translated to internal model indices using the mappings constructed during the fit call. If weights are not provided they will be assumed to be 1.0. Returns ------- (interactions, weights): COO matrix, COO matrix Two COO matrices: the interactions matrix and the corresponding weights matrix.
def button_count(self): if self._buttons_count is None: if isinstance(self.reply_markup, ( types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)): self._buttons_count = sum( len(row.buttons) for row in self.reply_markup.rows) else: self._buttons_count = 0 return self._buttons_count
Returns the total button count.
def get_beacon(): "Get a beacon socket" s = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM) s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, True) s.setsockopt(_socket.SOL_SOCKET, _socket.SO_BROADCAST, True) return s
Get a beacon socket
def field2write_only(self, field, **kwargs): attributes = {} if field.load_only and self.openapi_version.major >= 3: attributes["writeOnly"] = True return attributes
Return the dictionary of OpenAPI field attributes for a load_only field. :param Field field: A marshmallow field. :rtype: dict
def name(self, value): if not isinstance(value, six.string_types): raise ValueError("Pass a string") self._properties["id"] = value
Update name of the change set. :type value: str :param value: New name for the changeset.
def _check_device_number(self, devices): if len(devices) < 2 or len(devices) > 4: msg = 'The number of devices to cluster is not supported.' raise ClusterNotSupported(msg)
Check if number of devices is between 2 and 4 :param kwargs: dict -- keyword args in dict
def main(): config.parse_args() if cfg.CONF.kafka_metrics.enabled: prepare_processes(cfg.CONF.kafka_metrics, cfg.CONF.repositories.metrics_driver) if cfg.CONF.kafka_alarm_history.enabled: prepare_processes(cfg.CONF.kafka_alarm_history, cfg.CONF.repositories.alarm_state_history_driver) if cfg.CONF.kafka_events.enabled: prepare_processes(cfg.CONF.kafka_events, cfg.CONF.repositories.events_driver) try: LOG.info( ) for process in processors: process.start() signal.signal(signal.SIGCHLD, clean_exit) signal.signal(signal.SIGINT, clean_exit) signal.signal(signal.SIGTERM, clean_exit) while True: time.sleep(10) except Exception: LOG.exception('Error! Exiting.') clean_exit(signal.SIGKILL)
Start persister.
def delete(self, key): obj = self._get_content() obj.pop(key, None) self.write_data(self.path, obj)
Removes the specified key from the database.
def get_upload_token(mail, pwd): try: params = urllib.urlencode({"email": mail, "password": pwd}) response = urllib.urlopen(LOGIN_URL, params) except: return None resp = json.loads(response.read()) if not resp or 'token' not in resp: return None return resp['token']
Get upload token
def __check_config_key(self, key): try: section, option = key.split('.') except (AttributeError, ValueError): return False if not section or not option: return False return section in Config.CONFIG_OPTIONS and\ option in Config.CONFIG_OPTIONS[section]
Check whether the key is valid. A valid key has the schema <section>.<option>. Keys supported are listed in CONFIG_OPTIONS dict. :param key: <section>.<option> key
def _init_usrgos(self, goids): usrgos = set() goids_missing = set() _go2obj = self.gosubdag.go2obj for goid in goids: if goid in _go2obj: usrgos.add(goid) else: goids_missing.add(goid) if goids_missing: print("MISSING GO IDs: {GOs}".format(GOs=goids_missing)) print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids))) return usrgos
Return user GO IDs which have GO Terms.
def upgrade_db(conn, pkg_name='openquake.server.db.schema.upgrades', skip_versions=()): upgrader = UpgradeManager.instance(conn, pkg_name) t0 = time.time() try: versions_applied = upgrader.upgrade(conn, skip_versions) except: conn.rollback() raise else: conn.commit() dt = time.time() - t0 logging.info('Upgrade completed in %s seconds', dt) return versions_applied
Upgrade a database by running several scripts in a single transaction. :param conn: a DB API 2 connection :param str pkg_name: the name of the package with the upgrade scripts :param list skip_versions: the versions to skip :returns: the version numbers of the new scripts applied the database
def _bind(self): credentials = pika.PlainCredentials(self.user, self.password) params = pika.ConnectionParameters(credentials=credentials, host=self.server, virtual_host=self.vhost, port=self.port) self.connection = pika.BlockingConnection(params) self.channel = self.connection.channel() self.channel.exchange_declare(exchange=self.topic_exchange, exchange_type="topic")
Create socket and bind
def mock_cmd(self, release, *cmd, **kwargs): fmt = '{mock_cmd}' if kwargs.get('new_chroot') is True: fmt +=' --new-chroot' fmt += ' --configdir={mock_dir}' return self.call(fmt.format(**release).split() + list(cmd))
Run a mock command in the chroot for a given release
def parse_human_datetime(s): if not s: return None try: dttm = parse(s) except Exception: try: cal = parsedatetime.Calendar() parsed_dttm, parsed_flags = cal.parseDT(s) if parsed_flags & 2 == 0: parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0) dttm = dttm_from_timtuple(parsed_dttm.utctimetuple()) except Exception as e: logging.exception(e) raise ValueError("Couldn't parse date string [{}]".format(s)) return dttm
Returns ``datetime.datetime`` from human readable strings >>> from datetime import date, timedelta >>> from dateutil.relativedelta import relativedelta >>> parse_human_datetime('2015-04-03') datetime.datetime(2015, 4, 3, 0, 0) >>> parse_human_datetime('2/3/1969') datetime.datetime(1969, 2, 3, 0, 0) >>> parse_human_datetime('now') <= datetime.now() True >>> parse_human_datetime('yesterday') <= datetime.now() True >>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date() True >>> year_ago_1 = parse_human_datetime('one year ago').date() >>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date() >>> year_ago_1 == year_ago_2 True
def compare_dict(da, db): sa = set(da.items()) sb = set(db.items()) diff = sa & sb return dict(sa - diff), dict(sb - diff)
Compare differencs from two dicts
def dateindex(self, col: str): df = self._dateindex(col) if df is None: self.err("Can not create date index") return self.df = df self.ok("Added a datetime index from column", col)
Set a datetime index from a column :param col: column name where to index the date from :type col: str :example: ``ds.dateindex("mycol")``
def add_reshape(self, name, input_name, output_name, target_shape, mode): spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.reshape spec_layer_params.targetShape.extend(target_shape) if mode == 0: spec_layer_params.mode = \ _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_FIRST') else: spec_layer_params.mode = \ _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_LAST') if len(target_shape) != 4 and len(target_shape) != 3: raise ValueError("Length of the 'target-shape' parameter must be equal to 3 or 4")
Add a reshape layer. Kindly refer to NeuralNetwork.proto for details. Parameters ---------- name: str The name of this layer. target_shape: tuple Shape of the output blob. The product of target_shape must be equal to the shape of the input blob. Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W). mode: int - If mode == 0, the reshape layer is in CHANNEL_FIRST mode. - If mode == 1, the reshape layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_flatten, add_permute
def make_choices_tuple(choices, get_display_name): assert callable(get_display_name) return tuple((x, get_display_name(x)) for x in choices)
Make a tuple for the choices parameter for a data model field. :param choices: sequence of valid values for the model field :param get_display_name: callable that returns the human-readable name for a choice :return: A tuple of 2-tuples (choice, display_name) suitable for the choices parameter
def _execute_install_command(cmd, parse_output, errors, parsed_packages): out = __salt__['cmd.run_all']( cmd, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0: if out['stderr']: errors.append(out['stderr']) else: errors.append(out['stdout']) elif parse_output: parsed_packages.update(_parse_reported_packages_from_install_output(out['stdout']))
Executes a command for the install operation. If the command fails, its error output will be appended to the errors list. If the command succeeds and parse_output is true, updated packages will be appended to the parsed_packages dictionary.
def cli(obj, show_userinfo): client = obj['client'] userinfo = client.userinfo() if show_userinfo: for k, v in userinfo.items(): if isinstance(v, list): v = ', '.join(v) click.echo('{:20}: {}'.format(k, v)) else: click.echo(userinfo['preferred_username'])
Display logged in user or full userinfo.
def add_system_classpath(): if 'CLASSPATH' in os.environ: parts = os.environ['CLASSPATH'].split(os.pathsep) for part in parts: javabridge.JARS.append(part)
Adds the system's classpath to the JVM's classpath.
def has_translation(self, language_code=None, related_name=None): if language_code is None: language_code = self._current_language if language_code is None: raise ValueError(get_null_language_error()) meta = self._parler_meta._get_extension_by_related_name(related_name) try: return not is_missing(self._translations_cache[meta.model][language_code]) except KeyError: if language_code in self._read_prefetched_translations(meta=meta): return True object = get_cached_translation(self, language_code, related_name=related_name, use_fallback=True) if object is not None: return object.language_code == language_code try: self._get_translated_model(language_code, use_fallback=False, auto_create=False, meta=meta) except meta.model.DoesNotExist: return False else: return True
Return whether a translation for the given language exists. Defaults to the current language code. .. versionadded 1.2 Added the ``related_name`` parameter.
def get_context_data(self, **kwargs): context = super(BaseAuthorDetail, self).get_context_data(**kwargs) context['author'] = self.author return context
Add the current author in context.
def route_method(method_name, extra_part=False): def wrapper(callable_obj): if method_name.lower() not in DEFAULT_ROUTES: raise HandlerHTTPMethodError( 'Invalid http method in method: {}'.format(method_name) ) callable_obj.http_method = method_name.upper() callable_obj.url_extra_part = callable_obj.__name__ if extra_part\ else None return classmethod(callable_obj) return wrapper
Custom handler routing decorator. Signs a web handler callable with the http method as attribute. Args: method_name (str): HTTP method name (i.e GET, POST) extra_part (bool): Indicates if wrapped callable name should be a part of the actual endpoint. Returns: A wrapped handler callable. examples: >>> @route_method('GET') ... def method(): ... return "Hello!" ... >>> method.http_method 'GET' >>> method.url_extra_part None
def find_ips_by_equip(self, id_equip): url = 'ip/getbyequip/' + str(id_equip) + "/" code, xml = self.submit(None, 'GET', url) return self.response(code, xml, ['ipv4', 'ipv6'])
Get Ips related to equipment by its identifier :param id_equip: Equipment identifier. Integer value and greater than zero. :return: Dictionary with the following structure: { ips: { ipv4:[ id: <id_ip4>, oct1: <oct1>, oct2: <oct2>, oct3: <oct3>, oct4: <oct4>, descricao: <descricao> ] ipv6:[ id: <id_ip6>, block1: <block1>, block2: <block2>, block3: <block3>, block4: <block4>, block5: <block5>, block6: <block6>, block7: <block7>, block8: <block8>, descricao: <descricao> ] } } :raise UserNotAuthorizedError: User dont have permission to list ips. :raise InvalidParameterError: Equipment identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database.
def money(s, thousand_sep=".", decimal_sep=","): s = s.replace(thousand_sep, "") s = s.replace(decimal_sep, ".") return Decimal(s)
Converts money amount in string to a Decimal object. With the default arguments, the format is expected to be ``-38.500,00``, where dots separate thousands and comma the decimals. Args: thousand_sep: Separator for thousands. decimal_sep: Separator for decimals. Returns: A ``Decimal`` object of the string encoded money amount.
def _package_transform(package, fqdn, start=1, *args, **kwargs): node = _obj_getattr(package, fqdn, start) if node is not None and hasattr(node, "__call__"): return node(*args, **kwargs) else: return args
Applies the specified package transform with `fqdn` to the package. Args: package: imported package object. fqdn (str): fully-qualified domain name of function in the package. If it does not include the package name, then set `start=0`. start (int): in the '.'-split list of identifiers in `fqdn`, where to start looking in the package. E.g., `numpy.linalg.norm` has `start=1` since `package=numpy`; however, `linalg.norm` would have `start=0`.
def encode(precision, with_z): logger = logging.getLogger('geobuf') stdin = click.get_text_stream('stdin') sink = click.get_binary_stream('stdout') try: data = json.load(stdin) pbf = geobuf.encode( data, precision if precision >= 0 else 6, 3 if with_z else 2) sink.write(pbf) sys.exit(0) except Exception: logger.exception("Failed. Exception caught") sys.exit(1)
Given GeoJSON on stdin, writes a geobuf file to stdout.
def interpolate_psd(psd_f, psd_amp, deltaF): new_psd_f = [] new_psd_amp = [] fcurr = psd_f[0] for i in range(len(psd_f) - 1): f_low = psd_f[i] f_high = psd_f[i+1] amp_low = psd_amp[i] amp_high = psd_amp[i+1] while(1): if fcurr > f_high: break new_psd_f.append(fcurr) gradient = (amp_high - amp_low) / (f_high - f_low) fDiff = fcurr - f_low new_psd_amp.append(amp_low + fDiff * gradient) fcurr = fcurr + deltaF return numpy.asarray(new_psd_f), numpy.asarray(new_psd_amp)
Function to interpolate a PSD to a different value of deltaF. Uses linear interpolation. Parameters ---------- psd_f : numpy.array or list or similar List of the frequencies contained within the PSD. psd_amp : numpy.array or list or similar List of the PSD values at the frequencies in psd_f. deltaF : float Value of deltaF to interpolate the PSD to. Returns -------- new_psd_f : numpy.array Array of the frequencies contained within the interpolated PSD new_psd_amp : numpy.array Array of the interpolated PSD values at the frequencies in new_psd_f.
def depth(args): p = OptionParser(depth.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) readsbed, featsbed = args fp = open(featsbed) nargs = len(fp.readline().split("\t")) keepcols = ",".join(str(x) for x in range(1, nargs + 1)) cmd = "coverageBed -a {0} -b {1} -d".format(readsbed, featsbed) cmd += " | groupBy -g {0} -c {1} -o mean".format(keepcols, nargs + 2) sh(cmd, outfile=opts.outfile)
%prog depth reads.bed features.bed Calculate depth depth per feature using coverageBed.
def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None): variable_type = entities.Variable.Type.STRING return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
Returns value for a certain string variable attached to a feature. Args: feature_key: Key of the feature whose variable's value is being accessed. variable_key: Key of the variable whose value is to be accessed. user_id: ID for user. attributes: Dict representing user attributes. Returns: String value of the variable. None if: - Feature key is invalid. - Variable key is invalid. - Mismatch with type of variable.
def write_can_msg(self, channel, can_msg): c_can_msg = (CanMsg * len(can_msg))(*can_msg) c_count = DWORD(len(can_msg)) UcanWriteCanMsgEx(self._handle, channel, c_can_msg, c_count) return c_count
Transmits one ore more CAN messages through the specified CAN channel of the device. :param int channel: CAN channel, which is to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param list(CanMsg) can_msg: List of CAN message structure (see structure :class:`CanMsg`). :return: The number of successfully transmitted CAN messages. :rtype: int
def _set_state(self, new_state, diag=None): old_state = self._session_state LOG.info("[BFD][%s][STATE] State changed from %s to %s.", hex(self._local_discr), bfd.BFD_STATE_NAME[old_state], bfd.BFD_STATE_NAME[new_state]) self._session_state = new_state if new_state == bfd.BFD_STATE_DOWN: if diag is not None: self._local_diag = diag self._desired_min_tx_interval = 1000000 self._is_polling = True self._update_xmit_period() elif new_state == bfd.BFD_STATE_UP: self._desired_min_tx_interval = self._cfg_desired_min_tx_interval self._is_polling = True self._update_xmit_period() self.app.send_event_to_observers( EventBFDSessionStateChanged(self, old_state, new_state))
Set the state of the BFD session.
def mpub(self, topic, *messages): return self.send(constants.MPUB + ' ' + topic, messages)
Publish multiple messages to a topic
def multi_to_dict(multi): return dict( (key, value[0] if len(value) == 1 else value) for key, value in multi.to_dict(False).items() )
Transform a Werkzeug multidictionnary into a flat dictionnary
def post_versions_list(self, updater_name=None, updater_id=None, post_id=None, start_id=None): params = { 'search[updater_name]': updater_name, 'search[updater_id]': updater_id, 'search[post_id]': post_id, 'search[start_id]': start_id } return self._get('post_versions.json', params)
Get list of post versions. Parameters: updater_name (str): updater_id (int): post_id (int): start_id (int):
def get_all_events(self): self.all_events = {} events = self.tree.execute("$.events.frames") if events is None: return for e in events: event_type = e.get('type') frame_id = e.get('frame_id') try: self.all_events[event_type].append(frame_id) except KeyError: self.all_events[event_type] = [frame_id]
Gather all event IDs in the REACH output by type. These IDs are stored in the self.all_events dict.
def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None): rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8)) tf.RegisterGradient(rnd_name)(grad) g = tf.get_default_graph() with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}): return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
Custom py_func with gradient support
def _scale_back_response(bqm, response, scalar, ignored_interactions, ignored_variables, ignore_offset): if len(ignored_interactions) + len( ignored_variables) + ignore_offset == 0: response.record.energy = np.divide(response.record.energy, scalar) else: response.record.energy = bqm.energies((response.record.sample, response.variables)) return response
Helper function to scale back the response of sample method
def count(tex): soup = TexSoup(tex) labels = set(label.string for label in soup.find_all('label')) return dict((label, soup.find_all('\ref{%s}' % label)) for label in labels)
Extract all labels, then count the number of times each is referenced in the provided file. Does not follow \includes.
def nn_x(self, x, k=1, radius=np.inf, eps=0.0, p=2): assert len(x) == self.dim_x k_x = min(k, self.size) return self._nn(DATA_X, x, k=k_x, radius=radius, eps=eps, p=p)
Find the k nearest neighbors of x in the observed input data @see Databag.nn() for argument description @return distance and indexes of found nearest neighbors.
def exists_using_casper(self, filename): casper_results = casper.Casper(self.connection["jss"]) distribution_servers = casper_results.find("distributionservers") all_packages = [] for distribution_server in distribution_servers: packages = set() for package in distribution_server.findall("packages/package"): packages.add(os.path.basename(package.find("fileURL").text)) all_packages.append(packages) base_set = all_packages.pop() for packages in all_packages: base_set = base_set.intersection(packages) return filename in base_set
Check for the existence of a package file. Unlike other DistributionPoint types, JDS and CDP types have no documented interface for checking whether the server and its children have a complete copy of a file. The best we can do is check for an object using the API /packages URL--JSS.Package() or /scripts and look for matches on the filename. If this is not enough, this method uses the results of the casper.jxml page to determine if a package exists. This is an undocumented feature and as such should probably not be relied upon. Please note, scripts are not listed per-distributionserver like packages. For scripts, the best you can do is use the regular exists method. It will test for whether the file exists on ALL configured distribution servers. This may register False if the JDS is busy syncing them.
def progressbar(length, label): return click.progressbar(length=length, label=label, show_pos=True)
Creates a progressbar Parameters ---------- length int Length of the ProgressBar label str Label to give to the progressbar Returns ------- click.progressbar Progressbar
def score(self, phone_number, account_lifecycle_event, **params): return self.post(SCORE_RESOURCE.format(phone_number=phone_number), account_lifecycle_event=account_lifecycle_event, **params)
Score is an API that delivers reputation scoring based on phone number intelligence, traffic patterns, machine learning, and a global data consortium. See https://developer.telesign.com/docs/score-api for detailed API documentation.
def backfill_previous_messages(self, reverse=False, limit=10): res = self.client.api.get_room_messages(self.room_id, self.prev_batch, direction="b", limit=limit) events = res["chunk"] if not reverse: events = reversed(events) for event in events: self._put_event(event)
Backfill handling of previous messages. Args: reverse (bool): When false messages will be backfilled in their original order (old to new), otherwise the order will be reversed (new to old). limit (int): Number of messages to go back.
def add_link(app, pagename, templatename, context, doctree): context['show_slidelink'] = ( app.config.slide_link_html_to_slides and hasattr(app.builder, 'get_outfilename') ) if context['show_slidelink']: context['slide_path'] = slide_path(app.builder, pagename)
Add the slides link to the HTML context.
def next(self): if self._current_index < len(self._collection): value = self._collection[self._current_index] self._current_index += 1 return value elif self._next_cursor: self.__fetch_next() return self.next() else: self._current_index = 0 raise StopIteration
Returns the next item in the cursor.
def delete_existing_cname(env, zone_id, dns_name): client = boto3.Session(profile_name=env).client('route53') startrecord = None newrecord_name = dns_name startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME') if startrecord: LOG.info("Deleting old record: %s", newrecord_name) _response = client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch={'Changes': [{ 'Action': 'DELETE', 'ResourceRecordSet': startrecord }]}) LOG.debug('Response from deleting %s: %s', dns_name, _response)
Delete an existing CNAME record. This is used when updating to multi-region for deleting old records. The record can not just be upserted since it changes types. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. dns_name (str): FQDN of application's dns entry to add/update.
def all_pairs_normalized_distances_reference(X): n_samples, n_cols = X.shape D = np.ones((n_samples, n_samples), dtype="float32") * np.inf for i in range(n_samples): diffs = X - X[i, :].reshape((1, n_cols)) missing_diffs = np.isnan(diffs) missing_counts_per_row = missing_diffs.sum(axis=1) valid_rows = missing_counts_per_row < n_cols D[i, valid_rows] = np.nanmean( diffs[valid_rows, :] ** 2, axis=1) return D
Reference implementation of normalized all-pairs distance, used for testing the more efficient implementation above for equivalence.
def _weighted_formula(form, weight_func): for e, mf in form.items(): if e == Atom.H: continue yield e, mf, weight_func(e)
Yield weight of each formula element.
def _get_table_names(statement): parts = statement.to_unicode().split() tables = set() for i, token in enumerate(parts): if token.lower() == 'from' or token.lower().endswith('join'): tables.add(parts[i + 1].rstrip(';')) return list(tables)
Returns table names found in the query. NOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well. Args: statement (sqlparse.sql.Statement): parsed by sqlparse sql statement. Returns: list of str
def has_metadata(self, name): return self.has_implementation(name) or \ name in self.non_returning or \ name in self.prototypes
Check if a function has either an implementation or any metadata associated with it :param name: The name of the function as a string :return: A bool indicating if anything is known about the function
def resolve_remote(self, uri): try: return super(LocalRefResolver, self).resolve_remote(uri) except ValueError: return super(LocalRefResolver, self).resolve_remote( 'file://' + get_schema_path(uri.rsplit('.json', 1)[0]) )
Resolve a uri or relative path to a schema.
def transformer_clean(): hparams = transformer_base_v2() hparams.label_smoothing = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.max_length = 0 return hparams
No dropout, label smoothing, max_length.
def set_db_attribute(self, table, record, column, value, key=None): if key is not None: column = '%s:%s' % (column, key) command = ovs_vsctl.VSCtlCommand( 'set', (table, record, '%s=%s' % (column, value))) self.run_command([command])
Sets 'value' into 'column' in 'record' in 'table'. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl set TBL REC COL[:KEY]=VALUE
def histogram_day_counts( df, variable ): if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]: log.error("index is not datetime") return False counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:]) counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
Create a week-long histogram of counts of the variable for each day. It is assumed that the DataFrame index is datetime and that the variable `weekday_name` exists.
def filter_time_nearest(self, time, regex=None): return min(self._get_datasets_with_times(regex), key=lambda i: abs((i[0] - time).total_seconds()))[-1]
Filter keys for an item closest to the desired time. Loops over all keys in the collection and uses `regex` to extract and build `datetime`s. The collection of `datetime`s is compared to `start` and the value that has a `datetime` closest to that requested is returned.If none of the keys in the collection match the regex, indicating that the keys are not date/time-based, a ``ValueError`` is raised. Parameters ---------- time : ``datetime.datetime`` The desired time regex : str, optional The regular expression to use to extract date/time information from the key. If given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute', 'second', and 'microsecond', as appropriate. When a match is found, any of those groups missing from the pattern will be assigned a value of 0. The default pattern looks for patterns like: 20171118_2356. Returns ------- The value with a time closest to that desired
def load_py(stream, filepath=None): with add_sys_paths(config.package_definition_build_python_paths): return _load_py(stream, filepath=filepath)
Load python-formatted data from a stream. Args: stream (file-like object). Returns: dict.
def _quote(str, LegalChars=_LegalChars): r if all(c in LegalChars for c in str): return str else: return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
r"""Quote a string for use in a cookie header. If the string does not need to be double-quoted, then just return the string. Otherwise, surround the string in doublequotes and quote (with a \) special characters.
async def is_pairwise_exists(wallet_handle: int, their_did: str) -> bool: logger = logging.getLogger(__name__) logger.debug("is_pairwise_exists: >>> wallet_handle: %r, their_did: %r", wallet_handle, their_did) if not hasattr(is_pairwise_exists, "cb"): logger.debug("is_pairwise_exists: Creating callback") is_pairwise_exists.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_bool)) c_wallet_handle = c_int32(wallet_handle) c_their_did = c_char_p(their_did.encode('utf-8')) res = await do_call('indy_is_pairwise_exists', c_wallet_handle, c_their_did, is_pairwise_exists.cb) logger.debug("is_pairwise_exists: <<< res: %r", res) return res
Check if pairwise is exists. :param wallet_handle: wallet handler (created by open_wallet). :param their_did: encoded Did. :return: true - if pairwise is exists, false - otherwise
def executions(self): if self._executions is None: self._executions = ExecutionList(self._version, flow_sid=self._solution['sid'], ) return self._executions
Access the executions :returns: twilio.rest.studio.v1.flow.execution.ExecutionList :rtype: twilio.rest.studio.v1.flow.execution.ExecutionList
def pending(): upgrader = InvenioUpgrader() logger = upgrader.get_logger() try: upgrades = upgrader.get_upgrades() if not upgrades: logger.info("All upgrades have been applied.") return logger.info("Following upgrade(s) are ready to be applied:") for u in upgrades: logger.info( " * {0} {1}".format(u.name, u.info)) except RuntimeError as e: for msg in e.args: logger.error(unicode(msg)) raise
Command for showing upgrades ready to be applied.
def get_utt_regions(self): regions = [] current_offset = 0 for utt_idx in sorted(self.utt_ids): offset = current_offset num_frames = [] refs = [] for cnt in self.containers: num_frames.append(cnt.get(utt_idx).shape[0]) refs.append(cnt.get(utt_idx, mem_map=True)) if len(set(num_frames)) != 1: raise ValueError('Utterance {} has not the same number of frames in all containers!'.format(utt_idx)) num_chunks = math.ceil(num_frames[0] / float(self.frames_per_chunk)) region = (offset, num_chunks, refs) regions.append(region) current_offset += num_chunks return regions
Return the regions of all utterances, assuming all utterances are concatenated. It is assumed that the utterances are sorted in ascending order for concatenation. A region is defined by offset (in chunks), length (num-chunks) and a list of references to the utterance datasets in the containers. Returns: list: List of with a tuple for every utterances containing the region info.
def tempput(local_path=None, remote_path=None, use_sudo=False, mirror_local_mode=False, mode=None): import warnings warnings.simplefilter('ignore', RuntimeWarning) if remote_path is None: remote_path = os.tempnam() put(local_path, remote_path, use_sudo, mirror_local_mode, mode) yield remote_path run("rm '{}'".format(remote_path))
Put a file to remote and remove it afterwards