code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def action(act, config): if not config: pass elif act is "list": do_list() else: config_dir = os.path.join(CONFIG_ROOT, config) globals()["do_" + act](config, config_dir)
CLI action preprocessor
def iterativeFetch(query, batchSize=default_batch_size): while True: rows = query.fetchmany(batchSize) if not rows: break rowDicts = sqliteRowsToDicts(rows) for rowDict in rowDicts: yield rowDict
Returns rows of a sql fetch query on demand
def from_environ(cls, environ=os.environ): base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2] return cls( environ['HTTP_X_APPENGINE_TASKNAME'], environ['HTTP_X_APPENGINE_QUEUENAME'], base_path)
Constructs a _PipelineContext from the task queue environment.
def source_get(method_name): def source_get(_value, context, **_params): method = getattr(context["model"].source, method_name) return _get(method, context["key"], (), {}) return source_get
Creates a getter that will drop the current value, and call the source's method with specified name using the context's key as first argument. @param method_name: the name of a method belonging to the source. @type method_name: str
def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike: with open(filename, "r") as f: config = yaml.load(f) return config
Load an analysis configuration from a file. Args: yaml: YAML object to use in loading the configuration. filename: Filename of the YAML configuration file. Returns: dict-like object containing the loaded configuration
def size(self): if not self._size: self._size = os.path.getsize(self._path) return self._size
size in bytes
def get_review_sh(self, revision, item): identity = self.get_sh_identity(revision) update = parser.parse(item[self.get_field_date()]) erevision = self.get_item_sh_fields(identity, update) return erevision
Add sorting hat enrichment fields for the author of the revision
def attach_db(self, db): if db is not None: if isinstance(db, basestring): db = gffutils.FeatureDB(db) if not isinstance(db, gffutils.FeatureDB): raise ValueError( "`db` must be a filename or a gffutils.FeatureDB") self._kwargs['db'] = db self.db = db
Attach a gffutils.FeatureDB for access to features. Useful if you want to attach a db after this instance has already been created. Parameters ---------- db : gffutils.FeatureDB
def quote_xml(text): text = _coerce_unicode(text) if text.startswith(CDATA_START): return text return saxutils.escape(text)
Format a value for display as an XML text node. Returns: Unicode string (str on Python 3, unicode on Python 2)
def get_url(self, cmd, **args): return self.http.base_url + self._mkurl(cmd, *args)
Expand the request URL for a request.
def log_to(logger): logger_id = id(logger) def decorator(function): func = add_label(function, 'log_to', logger_id=logger_id) return func return decorator
Wraps a function that has a connection passed such that everything that happens on the connection is logged using the given logger. :type logger: Logger :param logger: The logger that handles the logging.
def inserir(self, name): logical_environment_map = dict() logical_environment_map['name'] = name code, xml = self.submit( {'logical_environment': logical_environment_map}, 'POST', 'logicalenvironment/') return self.response(code, xml)
Inserts a new Logical Environment and returns its identifier. :param name: Logical Environment name. String with a minimum 2 and maximum of 80 characters :return: Dictionary with the following structure: :: {'logical_environment': {'id': < id_logical_environment >}} :raise InvalidParameterError: Name is null and invalid. :raise NomeAmbienteLogicoDuplicadoError: There is already a registered Logical Environment with the value of name. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def cap(self): to_cap = (self._inputnodes, self._outputnodes, self._prov) if to_cap == (None, None, None): self._inputnodes = { f: self._make_inputnode(f) for f in self.input_frequencies} self._outputnodes = { f: self._make_outputnode(f) for f in self.output_frequencies} self._prov = self._gen_prov() elif None in to_cap: raise ArcanaError( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}".format(self))
"Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance.
def local_bind_ports(self): self._check_is_started() return [_server.local_port for _server in self._server_list if _server.local_port is not None]
Return a list containing the ports of local side of the TCP tunnels
def get_t_factor(t1, t2): t_factor = None if t1 is not None and t2 is not None and t1 != t2: dt = t2 - t1 year = timedelta(days=365.25) t_factor = abs(dt.total_seconds() / year.total_seconds()) return t_factor
Time difference between two datetimes, expressed as decimal year
def _get_next_buffered_row(self): if self._iter_row == self._iter_nrows: raise StopIteration if self._row_buffer_index >= self._iter_row_buffer: self._buffer_iter_rows(self._iter_row) data = self._row_buffer[self._row_buffer_index] self._iter_row += 1 self._row_buffer_index += 1 return data
Get the next row for iteration.
def disconnect(remote_app): if remote_app not in current_oauthclient.disconnect_handlers: return abort(404) ret = current_oauthclient.disconnect_handlers[remote_app]() db.session.commit() return ret
Disconnect user from remote application. Removes application as well as associated information.
def get_drill_bits_d_metric(): return np.concatenate((np.arange(1.0, 10.0, 0.1), np.arange(10.0, 18.0, 0.5), np.arange(18.0, 36.0, 1.0), np.arange(40.0, 55.0, 5.0))) * u.mm
Return array of possible drill diameters in metric.
def apply_rows(applicators, rows): for row in rows: for (cols, function) in applicators: for col in (cols or []): value = row.get(col, '') row[col] = function(row, value) yield row
Yield rows after applying the applicator functions to them. Applicators are simple unary functions that return a value, and that value is stored in the yielded row. E.g. `row[col] = applicator(row[col])`. These are useful to, e.g., cast strings to numeric datatypes, to convert formats stored in a cell, extract features for machine learning, and so on. Args: applicators: a tuple of (cols, applicator) where the applicator will be applied to each col in cols rows: an iterable of rows for applicators to be called on Yields: Rows with specified column values replaced with the results of the applicators .. deprecated:: v0.7.0
def _combine_core_aux_specs(self): all_specs = [] for core_dict in self._permute_core_specs(): for aux_dict in self._permute_aux_specs(): all_specs.append(_merge_dicts(core_dict, aux_dict)) return all_specs
Combine permutations over core and auxilliary Calc specs.
def execute(self): self._collect_garbage() upstream_channels = {} for node in nx.topological_sort(self.logical_topo): operator = self.operators[node] downstream_channels = self._generate_channels(operator) handles = self.__generate_actors(operator, upstream_channels, downstream_channels) if handles: self.actor_handles.extend(handles) upstream_channels.update(downstream_channels) logger.debug("Running...") return self.actor_handles
Deploys and executes the physical dataflow.
def _func_router(self, msg, fname, **config): FNAME = 'handle_%s_autocloud_%s' if ('compose_id' in msg['msg'] or 'compose_job_id' in msg['msg'] or 'autocloud.compose' in msg['topic']): return getattr(self, FNAME % ('v2', fname))(msg, **config) else: return getattr(self, FNAME % ('v1', fname))(msg, **config)
This method routes the messages based on the params and calls the appropriate method to process the message. The utility of the method is to cope up with the major message change during different releases.
def get_job_amounts(agent, project_name, spider_name=None): job_list = agent.get_job_list(project_name) pending_job_list = job_list['pending'] running_job_list = job_list['running'] finished_job_list = job_list['finished'] job_amounts = {} if spider_name is None: job_amounts['pending'] = len(pending_job_list) job_amounts['running'] = len(running_job_list) job_amounts['finished'] = len(finished_job_list) else: job_amounts['pending'] = len([j for j in pending_job_list if j['spider'] == spider_name]) job_amounts['running'] = len([j for j in running_job_list if j['spider'] == spider_name]) job_amounts['finished'] = len([j for j in finished_job_list if j['spider'] == spider_name]) return job_amounts
Get amounts that pending job amount, running job amount, finished job amount.
def _process_table_cells(self, table): rows = [] for i, tr in enumerate(table.find_all('tr')): row = [] for c in tr.contents: cell_type = getattr(c, 'name', None) if cell_type not in ('td', 'th'): continue rowspan = int(c.attrs.get('rowspan', 1)) colspan = int(c.attrs.get('colspan', 1)) contents = self._process_children(c).strip() if cell_type == 'th' and i > 0: contents = self._inline('**', contents) row.append(Cell(cell_type, rowspan, colspan, contents)) rows.append(row) return rows
Compile all the table cells. Returns a list of rows. The rows may have different lengths because of column spans.
def imshow_interact(self, canvas, plot_function, extent=None, label=None, vmin=None, vmax=None, **kwargs): raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
This function is optional! Create an imshow controller to stream the image returned by the plot_function. There is an imshow controller written for mmatplotlib, which updates the imshow on changes in axis. The origin of the image show is (0,0), such that X[0,0] gets plotted at [0,0] of the image! the kwargs are plotting library specific kwargs!
def _partial_extraction_fixed(self, idx, extra_idx=0): myarray = np.array([]) with open(self.abspath) as fobj: contents = fobj.readlines()[idx+extra_idx:] for line in contents: try: vals = re.findall(r' *[\w\-\+\.]*', line) temp = np.array([float(val) for val in vals if val not in ('', ' ')]) myarray = np.hstack((myarray, temp)) except ValueError: break return myarray
Private method for a single extraction on a fixed-type tab file
def add(self, item): with self.lock: if item in self.set: return False self.set.add(item) return True
Add an item to the set, and return whether it was newly added
def _render_border_line(self, t, settings): s = self._es(settings, self.SETTING_WIDTH, self.SETTING_MARGIN, self.SETTING_MARGIN_LEFT, self.SETTING_MARGIN_RIGHT) w = self.calculate_width_widget(**s) s = self._es(settings, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING) border_line = self.fmt_border(w, t, **s) s = self._es(settings, self.SETTING_MARGIN, self.SETTING_MARGIN_LEFT, self.SETTING_MARGIN_RIGHT, self.SETTING_MARGIN_CHAR) border_line = self.fmt_margin(border_line, **s) return border_line
Render box border line.
def peek(self, offset=0): pos = self.pos + offset if pos >= self.end: return None return self.text[pos]
Looking forward in the input text without actually stepping the current position. returns None if the current position is at the end of the input.
def _CheckStorageFile(self, storage_file_path): if os.path.exists(storage_file_path): if not os.path.isfile(storage_file_path): raise errors.BadConfigOption( 'Storage file: {0:s} already exists and is not a file.'.format( storage_file_path)) logger.warning('Appending to an already existing storage file.') dirname = os.path.dirname(storage_file_path) if not dirname: dirname = '.' if not os.access(dirname, os.W_OK): raise errors.BadConfigOption( 'Unable to write to storage file: {0:s}'.format(storage_file_path))
Checks if the storage file path is valid. Args: storage_file_path (str): path of the storage file. Raises: BadConfigOption: if the storage file path is invalid.
def createuser(self, email, name='', password=''): self._proxy.User.create(email, name, password) return self.getuser(email)
Return a bugzilla User for the given username :arg email: The email address to use in bugzilla :kwarg name: Real name to associate with the account :kwarg password: Password to set for the bugzilla account :raises XMLRPC Fault: Code 501 if the username already exists Code 500 if the email address isn't valid Code 502 if the password is too short Code 503 if the password is too long :return: User record for the username
def arrays2wcxf(C): d = {} for k, v in C.items(): if np.shape(v) == () or np.shape(v) == (1,): d[k] = v else: ind = np.indices(v.shape).reshape(v.ndim, v.size).T for i in ind: name = k + '_' + ''.join([str(int(j) + 1) for j in i]) d[name] = v[tuple(i)] return d
Convert a dictionary with Wilson coefficient names as keys and numbers or numpy arrays as values to a dictionary with a Wilson coefficient name followed by underscore and numeric indices as keys and numbers as values. This is needed for the output in WCxf format.
def extend(self, **kwargs): props = self.copy() props.update(kwargs) return TemplateData(**props)
Returns a new instance with this instance's data overlayed by the key-value args.
def wait_for_contract(self, contract_address_hex, timeout=None): contract_address = decode_hex(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) current_time = time.time() while not result: if timeout and start_time + timeout > current_time: return False result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) gevent.sleep(0.5) current_time = time.time() return len(result) > 0
Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise
def _locate_file(f, base_dir): if base_dir == None: return f file_name = os.path.join(base_dir, f) real = os.path.realpath(file_name) return real
Utility method for finding full path to a filename as string
def force_encoding(self, encoding): if not encoding: self.disabled = False else: self.write_with_encoding(encoding, None) self.disabled = True
Sets a fixed encoding. The change is emitted right away. From now one, this buffer will switch the code page anymore. However, it will still keep track of the current code page.
def setup_dictionary(self, task): dictionary_options = task.get('dictionary', {}) output = os.path.abspath(dictionary_options.get('output', self.dict_bin)) lang = dictionary_options.get('lang', 'en_US') wordlists = dictionary_options.get('wordlists', []) if lang and wordlists: self.compile_dictionary(lang, dictionary_options.get('wordlists', []), None, output) else: output = None return output
Setup dictionary.
def T_i(v_vars: List[fl.Var], mass: np.ndarray, i: int): assert len(v_vars) == 3 * len(mass) m = mass[i] T = (0.5 * m) * flux_v2(v_vars, i) return T
Make Fluxion with the kinetic energy of body i
def _dataset_line(args): if args['command'] == 'list': filter_ = args['filter'] if args['filter'] else '*' context = google.datalab.Context.default() if args['project']: context = google.datalab.Context(args['project'], context.credentials) return _render_list([str(dataset) for dataset in bigquery.Datasets(context) if fnmatch.fnmatch(str(dataset), filter_)]) elif args['command'] == 'create': try: bigquery.Dataset(args['name']).create(friendly_name=args['friendly']) except Exception as e: print('Failed to create dataset %s: %s' % (args['name'], e)) elif args['command'] == 'delete': try: bigquery.Dataset(args['name']).delete() except Exception as e: print('Failed to delete dataset %s: %s' % (args['name'], e))
Implements the BigQuery dataset magic subcommand used to operate on datasets The supported syntax is: %bq datasets <command> <args> Commands: {list, create, delete} Args: args: the optional arguments following '%bq datasets command'.
def get_in_net_id(cls, tenant_id): if 'in' not in cls.ip_db_obj: LOG.error("Fabric not prepared for tenant %s", tenant_id) return None db_obj = cls.ip_db_obj.get('in') in_subnet_dict = cls.get_in_ip_addr(tenant_id) sub = db_obj.get_subnet(in_subnet_dict.get('subnet')) return sub.network_id
Retrieve the network ID of IN network.
def save_object(filename, obj): logging.info('saving {}...'.format(filename)) try: with gzip.GzipFile(filename, 'wb') as f: f.write(pickle.dumps(obj, 1)) except Exception as e: logging.error('save failure: {}'.format(e)) raise
Compresses and pickles given object to the given filename.
def _on_change(self): font = self.__generate_font_tuple() self._example_label.configure(font=font)
Callback if any of the values are changed.
def log(self, sequence, infoarray) -> None: aggregated = ((infoarray is not None) and (infoarray.info['type'] != 'unmodified')) descr = sequence.descr_sequence if aggregated: descr = '_'.join([descr, infoarray.info['type']]) if descr in self.variables: var_ = self.variables[descr] else: if aggregated: cls = NetCDFVariableAgg elif self._flatten: cls = NetCDFVariableFlat else: cls = NetCDFVariableDeep var_ = cls(name=descr, isolate=self._isolate, timeaxis=self._timeaxis) self.variables[descr] = var_ var_.log(sequence, infoarray)
Pass the given |IoSequence| to a suitable instance of a |NetCDFVariableBase| subclass. When writing data, the second argument should be an |InfoArray|. When reading data, this argument is ignored. Simply pass |None|. (1) We prepare some devices handling some sequences by applying function |prepare_io_example_1|. We limit our attention to the returned elements, which handle the more diverse sequences: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, (element1, element2, element3) = prepare_io_example_1() (2) We define some shortcuts for the sequences used in the following examples: >>> nied1 = element1.model.sequences.inputs.nied >>> nied2 = element2.model.sequences.inputs.nied >>> nkor2 = element2.model.sequences.fluxes.nkor >>> nkor3 = element3.model.sequences.fluxes.nkor (3) We define a function that logs these example sequences to a given |NetCDFFile| object and prints some information about the resulting object structure. Note that sequence `nkor2` is logged twice, the first time with its original time series data, the second time with averaged values: >>> from hydpy import classname >>> def test(ncfile): ... ncfile.log(nied1, nied1.series) ... ncfile.log(nied2, nied2.series) ... ncfile.log(nkor2, nkor2.series) ... ncfile.log(nkor2, nkor2.average_series()) ... ncfile.log(nkor3, nkor3.average_series()) ... for name, variable in ncfile.variables.items(): ... print(name, classname(variable), variable.subdevicenames) (4) We prepare a |NetCDFFile| object with both options `flatten` and `isolate` being disabled: >>> from hydpy.core.netcdftools import NetCDFFile >>> ncfile = NetCDFFile( ... 'model', flatten=False, isolate=False, timeaxis=1, dirpath='') (5) We log all test sequences results in two |NetCDFVariableDeep| and one |NetCDFVariableAgg| objects. To keep both NetCDF variables related to |lland_fluxes.NKor| distinguishable, the name `flux_nkor_mean` includes information about the kind of aggregation performed: >>> test(ncfile) input_nied NetCDFVariableDeep ('element1', 'element2') flux_nkor NetCDFVariableDeep ('element2',) flux_nkor_mean NetCDFVariableAgg ('element2', 'element3') (6) We confirm that the |NetCDFVariableBase| objects received the required information: >>> ncfile.flux_nkor.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor.element2.array InfoArray([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) >>> ncfile.flux_nkor_mean.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor_mean.element2.array InfoArray([ 16.5, 18.5, 20.5, 22.5]) (7) We again prepare a |NetCDFFile| object, but now with both options `flatten` and `isolate` being enabled. To log test sequences with their original time series data does now trigger the initialisation of class |NetCDFVariableFlat|. When passing aggregated data, nothing changes: >>> ncfile = NetCDFFile( ... 'model', flatten=True, isolate=True, timeaxis=1, dirpath='') >>> test(ncfile) input_nied NetCDFVariableFlat ('element1', 'element2') flux_nkor NetCDFVariableFlat ('element2_0', 'element2_1') flux_nkor_mean NetCDFVariableAgg ('element2', 'element3') >>> ncfile.flux_nkor.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor.element2.array InfoArray([[ 16., 17.], [ 18., 19.], [ 20., 21.], [ 22., 23.]]) >>> ncfile.flux_nkor_mean.element2.sequence.descr_device 'element2' >>> ncfile.flux_nkor_mean.element2.array InfoArray([ 16.5, 18.5, 20.5, 22.5]) (8) We technically confirm that the `isolate` argument is passed to the constructor of subclasses of |NetCDFVariableBase| correctly: >>> from unittest.mock import patch >>> with patch('hydpy.core.netcdftools.NetCDFVariableFlat') as mock: ... ncfile = NetCDFFile( ... 'model', flatten=True, isolate=False, timeaxis=0, ... dirpath='') ... ncfile.log(nied1, nied1.series) ... mock.assert_called_once_with( ... name='input_nied', timeaxis=0, isolate=False)
def get_included_resources(request, serializer=None): include_resources_param = request.query_params.get('include') if request else None if include_resources_param: return include_resources_param.split(',') else: return get_default_included_resources_from_serializer(serializer)
Build a list of included resources.
def latinize_text(text, ascii=False): if text is None or not isinstance(text, six.string_types) or not len(text): return text if ascii: if not hasattr(latinize_text, '_ascii'): latinize_text._ascii = Transliterator.createInstance('Any-Latin; NFKD; [:Symbol:] Remove; [:Nonspacing Mark:] Remove; NFKC; Accents-Any; Latin-ASCII') return latinize_text._ascii.transliterate(text) if not hasattr(latinize_text, '_tr'): latinize_text._tr = Transliterator.createInstance('Any-Latin') return latinize_text._tr.transliterate(text)
Transliterate the given text to the latin script. This attempts to convert a given text to latin script using the closest match of characters vis a vis the original script.
def fetch_state_data(self, states): print("Fetching census data") for table in CensusTable.objects.all(): api = self.get_series(table.series) for variable in table.variables.all(): estimate = "{}_{}".format(table.code, variable.code) print( ">> Fetching {} {} {}".format( table.year, table.series, estimate ) ) for state in tqdm(states): self.get_state_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_county_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, ) self.get_district_estimates_by_state( api=api, table=table, variable=variable, estimate=estimate, state=state, )
Fetch census estimates from table.
def cc(project, detect_project=False): from benchbuild.utils import cmd cc_name = str(CFG["compiler"]["c"]) wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project) return cmd["./{}".format(cc_name)]
Return a clang that hides CFLAGS and LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: cflags: The CFLAGS we want to hide. ldflags: The LDFLAGS we want to hide. func (optional): A function that will be pickled alongside the compiler. It will be called before the actual compilation took place. This way you can intercept the compilation process with arbitrary python code. Returns (benchbuild.utils.cmd): Path to the new clang command.
def to_bytes(self): return struct.pack(Ethernet._PACKFMT, self._dst.packed, self._src.packed, self._ethertype.value)
Return packed byte representation of the Ethernet header.
def fill_predictive_missing_parameters(self): if hasattr(self, 'host_name') and not hasattr(self, 'address'): self.address = self.host_name if hasattr(self, 'host_name') and not hasattr(self, 'alias'): self.alias = self.host_name if self.initial_state == 'd': self.state = 'DOWN' elif self.initial_state == 'x': self.state = 'UNREACHABLE'
Fill address with host_name if not already set and define state with initial_state :return: None
def get_storage_info(self, human=False): res = self._req_get_storage_info() if human: res['total'] = humanize.naturalsize(res['total'], binary=True) res['used'] = humanize.naturalsize(res['used'], binary=True) return res
Get storage info :param bool human: whether return human-readable size :return: total and used storage :rtype: dict
def raise_for_error(self): if self.ok: return self tip = "running {0} @<{1}> error, return code {2}".format( " ".join(self.cmd), self.cwd, self.return_code ) logger.error("{0}\nstdout:{1}\nstderr:{2}\n".format( tip, self._stdout.decode("utf8"), self._stderr.decode("utf8") )) raise ShCmdError(self)
raise `ShCmdError` if the proc's return_code is not 0 otherwise return self ..Usage:: >>> proc = shcmd.run("ls").raise_for_error() >>> proc.return_code == 0 True
def add_view(self, *args, **kwargs): new_view = View(*args, **kwargs) for view in self.views: if view.uid == new_view.uid: raise ValueError("View with this uid already exists") self.views += [new_view] return new_view
Add a new view Parameters ---------- uid: string The uid of new view width: int The width of this of view on a 12 unit grid height: int The height of the this view. The height is proportional to the height of all the views present. x: int The position of this view on the grid y: int The position of this view on the grid initialXDoamin: [int, int] The initial x range of the view initialYDomain: [int, int] The initial y range of the view
def _get_class_handlers(cls, signal_name, instance): handlers = cls._signal_handlers_sorted[signal_name] return [getattr(instance, hname) for hname in handlers]
Returns the handlers registered at class level.
def run_manage_command(self, command, venv_path, verbose=True): self.logger.debug('Running manage command `%s` for `%s` ...' % (command, venv_path)) self._run_shell_command( '. %s/bin/activate && python %s %s' % (venv_path, self._get_manage_py_path(), command), pipe_it=(not verbose))
Runs a given Django manage command in a given virtual environment. :param str command: :param str venv_path: :param bool verbose:
def get_current_clementine(): try: return get_info_mpris2('clementine') except DBusErrorResponse: bus_name = 'org.mpris.clementine' path = '/Player' interface = 'org.freedesktop.MediaPlayer' return dbus_get_metadata(path, bus_name, interface)
Get the current song from clementine.
def unpack_message(buffer): hdr_size = Header().get_size() hdr_buff, msg_buff = buffer[:hdr_size], buffer[hdr_size:] header = Header() header.unpack(hdr_buff) message = new_message_from_header(header) message.unpack(msg_buff) return message
Unpack the whole buffer, including header pack. Args: buffer (bytes): Bytes representation of a openflow message. Returns: object: Instance of openflow message.
def is_authenticated(user): if not hasattr(user, 'is_authenticated'): return False if callable(user.is_authenticated): return user.is_authenticated() else: return user.is_authenticated
Return whether or not a User is authenticated. Function provides compatibility following deprecation of method call to `is_authenticated()` in Django 2.0. This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier), as `is_authenticated` was introduced as a property in v1.10.s
async def inspect(self, *, node_id: str) -> Mapping[str, Any]: response = await self.docker._query_json( "nodes/{node_id}".format(node_id=node_id), method="GET" ) return response
Inspect a node Args: node_id: The ID or name of the node
def validate_receiver(self, key, value): if value not in current_webhooks.receivers: raise ReceiverDoesNotExist(self.receiver_id) return value
Validate receiver identifier.
def _ConvertInteger(value): if isinstance(value, float) and not value.is_integer(): raise ParseError('Couldn\'t parse integer: {0}.'.format(value)) if isinstance(value, six.text_type) and value.find(' ') != -1: raise ParseError('Couldn\'t parse integer: "{0}".'.format(value)) return int(value)
Convert an integer. Args: value: A scalar value to convert. Returns: The integer value. Raises: ParseError: If an integer couldn't be consumed.
def raw(self, from_, to, body): if isinstance(to, string_types): raise TypeError('"to" parameter must be enumerable') return self._session.post('{}/raw'.format(self._url), json={ 'from': from_, 'to': to, 'body': body, }).json()
Send a raw MIME message.
def LL(n): if (n<=0):return Context('0') else: LL1=LL(n-1) r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1 r2 = LL1 - LL1 - LL1 return r1 + r2
constructs the LL context
def decision_function(self, X): predictions = self.predict_proba(X) out = np.zeros((predictions.shape[0], 1)) out[:, 0] = 1 - predictions[:, -1] return out
Generate an inlier score for each test data example. Parameters ---------- X : array Test data, of dimension N times d (rows are examples, columns are data dimensions) Returns: ------- scores : array A vector of length N, where each element contains an inlier score in the range 0-1 (outliers have values close to zero, inliers have values close to one).
def oidc_to_user_data(payload): payload = payload.copy() field_map = { 'given_name': 'first_name', 'family_name': 'last_name', 'email': 'email', } ret = {} for token_attr, user_attr in field_map.items(): if token_attr not in payload: continue ret[user_attr] = payload.pop(token_attr) ret.update(payload) return ret
Map OIDC claims to Django user fields.
def split_flanks(self, _, result): if not result.strip(): self.left, self.right = "", "" return result match = self.flank_re.match(result) assert match, "This regexp should always match" self.left, self.right = match.group(1), match.group(3) return match.group(2)
Return `result` without flanking whitespace.
def choose(items, title_text, question_text): print(title_text) for i, item in enumerate(items, start=1): print('%d) %s' % (i, item)) print('%d) Abort' % (i + 1)) selected = input(question_text) try: index = int(selected) except ValueError: index = -1 if not (index - 1) in range(len(items)): print('Aborting.') return None return items[index - 1]
Interactively choose one of the items.
def dumpfile(item, path): with io.open(path, 'wb') as fd: fd.write(en(item))
Dump an object to a file by path. Args: item (object): The object to serialize. path (str): The file path to save. Returns: None
def gauge(self, *args, **kwargs): orig_gauge = super(Nagios, self).gauge if 'timestamp' in kwargs and 'timestamp' not in getargspec(orig_gauge).args: del kwargs['timestamp'] orig_gauge(*args, **kwargs)
Compatability wrapper for Agents that do not submit gauge metrics with custom timestamps
def match_rank (self, ps): assert isinstance(ps, property_set.PropertySet) all_requirements = self.requirements () property_requirements = [] feature_requirements = [] for r in all_requirements: if get_value (r): property_requirements.append (r) else: feature_requirements.append (r) return all(ps.get(get_grist(s)) == [get_value(s)] for s in property_requirements) \ and all(ps.get(get_grist(s)) for s in feature_requirements)
Returns true if the generator can be run with the specified properties.
def get_num_primal_variables(self): if self.x is not None: return self.x.size if self.gphi is not None: return self.gphi.size if self.Hphi is not None: return self.Hphi.shape[0] if self.A is not None: return self.A.shape[1] if self.J is not None: return self.J.shape[1] if self.u is not None: return self.u.size if self.l is not None: return self.l.size return 0
Gets number of primal variables. Returns ------- num : int
def pop_changeset(self, changeset_id: uuid.UUID) -> Dict[bytes, Union[bytes, DeletedEntry]]: if changeset_id not in self.journal_data: raise KeyError(changeset_id, "Unknown changeset in JournalDB") all_ids = tuple(self.journal_data.keys()) changeset_idx = all_ids.index(changeset_id) changesets_to_pop = all_ids[changeset_idx:] popped_clears = tuple(idx for idx in changesets_to_pop if idx in self._clears_at) if popped_clears: last_clear_idx = changesets_to_pop.index(popped_clears[-1]) changesets_to_drop = changesets_to_pop[:last_clear_idx] changesets_to_merge = changesets_to_pop[last_clear_idx:] else: changesets_to_drop = () changesets_to_merge = changesets_to_pop changeset_data = merge(*( self.journal_data.pop(c_id) for c_id in changesets_to_merge )) for changeset_id in changesets_to_drop: self.journal_data.pop(changeset_id) self._clears_at.difference_update(popped_clears) return changeset_data
Returns all changes from the given changeset. This includes all of the changes from any subsequent changeset, giving precidence to later changesets.
def nsmallest(self, n, columns, keep='first'): return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest()
Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI
def _fusion_range_to_dsl(tokens) -> FusionRangeBase: if FUSION_MISSING in tokens: return missing_fusion_range() return fusion_range( reference=tokens[FUSION_REFERENCE], start=tokens[FUSION_START], stop=tokens[FUSION_STOP] )
Convert a PyParsing data dictionary into a PyBEL. :type tokens: ParseResult
def element_data_from_sym(sym): sym_lower = sym.lower() if sym_lower not in _element_sym_map: raise KeyError('No element data for symbol \'{}\''.format(sym)) return _element_sym_map[sym_lower]
Obtain elemental data given an elemental symbol The given symbol is not case sensitive An exception is thrown if the symbol is not found
def can_map_ipa_string(self, ipa_string): canonical = [(c.canonical_representation, ) for c in ipa_string] split = split_using_dictionary(canonical, self, self.max_key_length, single_char_parsing=False) for sub in split: if not sub in self.ipa_canonical_representation_to_mapped_str: return False return True
Return ``True`` if the mapper can map all the IPA characters in the given IPA string. :param IPAString ipa_string: the IPAString to be parsed :rtype: bool
def gen_bisz(src, dst): return ReilBuilder.build(ReilMnemonic.BISZ, src, ReilEmptyOperand(), dst)
Return a BISZ instruction.
def create_env(self, interpreter, is_current, options): if is_current: pyvenv_options = options['pyvenv_options'] if "--system-site-packages" in pyvenv_options: self.system_site_packages = True logger.debug("Creating virtualenv with pyvenv. options=%s", pyvenv_options) self.create(self.env_path) else: virtualenv_options = options['virtualenv_options'] logger.debug("Creating virtualenv with virtualenv") self.create_with_virtualenv(interpreter, virtualenv_options) logger.debug("env_bin_path: %s", self.env_bin_path) pip_bin = os.path.join(self.env_bin_path, "pip") pip_exe = os.path.join(self.env_bin_path, "pip.exe") if not (os.path.exists(pip_bin) or os.path.exists(pip_exe)): logger.debug("pip isn't installed in the venv, setting pip_installed=False") self.pip_installed = False return self.env_path, self.env_bin_path, self.pip_installed
Create the virtualenv and return its info.
def storepage(self, page): try: return self._api_entrypoint.storePage(self._session_token, page) except XMLRPCError as e: log.error('Failed to store page %s: %s' % (page.get('title', '[unknown title]'), e)) return None
Stores a page object, updating the page if it already exists. returns the stored page, or None if the page could not be stored.
def parse_response(self, response, header=None): response = response.decode(self.encoding) if header: header = "".join((self.resp_prefix, header, self.resp_header_sep)) if not response.startswith(header): raise IEC60488.ParsingError('Response header mismatch') response = response[len(header):] return response.split(self.resp_data_sep)
Parses the response message. The following graph shows the structure of response messages. :: +----------+ +--+ data sep +<-+ | +----------+ | | | +--------+ +------------+ | +------+ | +-->| header +------->+ header sep +---+--->+ data +----+----+ | +--------+ +------------+ +------+ | | | --+ +----------+ +--> | +--+ data sep +<-+ | | | +----------+ | | | | | | | | +------+ | | +--------------------------------------+--->+ data +----+----+ +------+
def verify_fun(lazy_obj, fun): if not fun: raise salt.exceptions.SaltInvocationError( 'Must specify a function to run!\n' 'ex: manage.up' ) if fun not in lazy_obj: raise salt.exceptions.CommandExecutionError(lazy_obj.missing_fun_string(fun))
Check that the function passed really exists
def _getMethodNamePrefix(self, node): targetName = node.name for sibling in node.parent.nodes_of_class(type(node)): if sibling is node: continue prefix = self._getCommonStart(targetName, sibling.name) if not prefix.rstrip('_'): continue return prefix return ''
Return the prefix of this method based on sibling methods. @param node: the current node
def getTmpFilename(self, tmp_dir=None, prefix='tmp', suffix='.txt', include_class_id=False, result_constructor=FilePath): if not tmp_dir: tmp_dir = self.TmpDir elif not tmp_dir.endswith("/"): tmp_dir += "/" if include_class_id: class_id = str(self.__class__()) prefix = ''.join([prefix, class_id[class_id.rindex('.') + 1: class_id.index(' ')]]) try: mkdir(tmp_dir) except OSError: pass return result_constructor(tmp_dir) + result_constructor(prefix) + \ result_constructor(''.join([choice(_all_chars) for i in range(self.TmpNameLen)])) +\ result_constructor(suffix)
Return a temp filename tmp_dir: directory where temporary files will be stored prefix: text to append to start of file name suffix: text to append to end of file name include_class_id: if True, will append a class identifier (built from the class name) to the filename following prefix. This is False by default b/c there is some string processing overhead in getting the class name. This will probably be most useful for testing: if temp files are being left behind by tests, you can turn this on in here (temporarily) to find out which tests are leaving the temp files. result_constructor: the constructor used to build the result (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor.
def _find_address_range(addresses): first = last = addresses[0] last_index = 0 for ip in addresses[1:]: if ip._ip == last._ip + 1: last = ip last_index += 1 else: break return (first, last, last_index)
Find a sequence of addresses. Args: addresses: a list of IPv4 or IPv6 addresses. Returns: A tuple containing the first and last IP addresses in the sequence, and the index of the last IP address in the sequence.
def annotation(args): from jcvi.formats.base import DictFile p = OptionParser(annotation.__doc__) p.add_option("--queryids", help="Query IDS file to switch [default: %default]") p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args d = "\t" qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None blast = Blast(blastfile) for b in blast: query, subject = b.query, b.subject if qids: query = qids[query] if sids: subject = sids[subject] print("\t".join((query, subject)))
%prog annotation blastfile > annotations Create simple two column files from the first two coluns in blastfile. Use --queryids and --subjectids to switch IDs or descriptions.
def maximum_address(self): maximum_address = self._segments.maximum_address if maximum_address is not None: maximum_address //= self.word_size_bytes return maximum_address
The maximum address of the data, or ``None`` if the file is empty.
def set_setting(key, value, qsettings=None): full_key = '%s/%s' % (APPLICATION_NAME, key) set_general_setting(full_key, value, qsettings)
Set value to QSettings based on key in InaSAFE scope. :param key: Unique key for setting. :type key: basestring :param value: Value to be saved. :type value: QVariant :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings
def count_dimensions(entry): result = 0 for e in entry: if isinstance(e, str): sliced = e.strip(",").split(",") result += 0 if len(sliced) == 1 and sliced[0] == "" else len(sliced) return result
Counts the number of dimensions from a nested list of dimension assignments that may include function calls.
def sponsor_tagged_image(sponsor, tag): if sponsor.files.filter(tag_name=tag).exists(): return sponsor.files.filter(tag_name=tag).first().tagged_file.item.url return ''
returns the corresponding url from the tagged image list.
def add_callback(self, name, callback, echo_old=False, priority=0): if self.is_callback_property(name): prop = getattr(type(self), name) prop.add_callback(self, callback, echo_old=echo_old, priority=priority) else: raise TypeError("attribute '{0}' is not a callback property".format(name))
Add a callback that gets triggered when a callback property of the class changes. Parameters ---------- name : str The instance to add the callback to. callback : func The callback function to add echo_old : bool, optional If `True`, the callback function will be invoked with both the old and new values of the property, as ``callback(old, new)``. If `False` (the default), will be invoked as ``callback(new)`` priority : int, optional This can optionally be used to force a certain order of execution of callbacks (larger values indicate a higher priority).
def get_tags(self, rev=None): rev = rev or 'HEAD' return set(self._invoke('tag', '--points-at', rev).splitlines())
Return the tags for the current revision as a set
def _get_script_args(cls, type_, name, header, script_text): if type_ == 'gui': launcher_type = 'gui' ext = '-script.pyw' old = ['.pyw'] else: launcher_type = 'cli' ext = '-script.py' old = ['.py', '.pyc', '.pyo'] hdr = cls._adjust_header(type_, header) blockers = [name + x for x in old] yield (name + ext, hdr + script_text, 't', blockers) yield ( name + '.exe', get_win_launcher(launcher_type), 'b' ) if not is_64bit(): m_name = name + '.exe.manifest' yield (m_name, load_launcher_manifest(name), 't')
For Windows, add a .py extension and an .exe launcher
def get_payment_request(self, cart, request): try: self.charge(cart, request) thank_you_url = OrderModel.objects.get_latest_url() js_expression = 'window.location.href="{}";'.format(thank_you_url) return js_expression except (KeyError, stripe.error.StripeError) as err: raise ValidationError(err)
From the given request, add a snippet to the page.
def hydra_parser(in_file, options=None): if options is None: options = {} BedPe = namedtuple('BedPe', ["chrom1", "start1", "end1", "chrom2", "start2", "end2", "name", "strand1", "strand2", "support"]) with open(in_file) as in_handle: reader = csv.reader(in_handle, dialect="excel-tab") for line in reader: cur = BedPe(line[0], int(line[1]), int(line[2]), line[3], int(line[4]), int(line[5]), line[6], line[8], line[9], float(line[18])) if cur.support >= options.get("min_support", 0): yield cur
Parse hydra input file into namedtuple of values.
def objname(self, obj=None): obj = obj or self.obj _objname = self.pretty_objname(obj, color=None) _objname = "'{}'".format(colorize(_objname, "blue")) return _objname
Formats object names in a pretty fashion
def _energy_evaluation(self, operator): if self._quantum_state is not None: input_circuit = self._quantum_state else: input_circuit = [self.opt_circuit] if operator._paulis: mean_energy, std_energy = operator.evaluate_with_result(self._operator_mode, input_circuit, self._quantum_instance.backend, self.ret) else: mean_energy = 0.0 std_energy = 0.0 operator.disable_summarize_circuits() logger.debug('Energy evaluation {} returned {}'.format(self._eval_count, np.real(mean_energy))) return np.real(mean_energy), np.real(std_energy)
Evaluate the energy of the current input circuit with respect to the given operator. :param operator: Hamiltonian of the system :return: Energy of the Hamiltonian
def get_expr_summ_id(self, experiment_id, time_slide_id, veto_def_name, datatype, sim_proc_id = None): for row in self: if (row.experiment_id, row.time_slide_id, row.veto_def_name, row.datatype, row.sim_proc_id) == (experiment_id, time_slide_id, veto_def_name, datatype, sim_proc_id): return row.experiment_summ_id return None
Return the expr_summ_id for the row in the table whose experiment_id, time_slide_id, veto_def_name, and datatype match the given. If sim_proc_id, will retrieve the injection run matching that sim_proc_id. If a matching row is not found, returns None.
def figs(self): ret = utils.DefaultOrderedDict(lambda: self[1:0]) for arr in self: if arr.psy.plotter is not None: ret[arr.psy.plotter.ax.get_figure()].append(arr) return OrderedDict(ret)
A mapping from figures to data objects with the plotter in this figure
def create_response(version, status, headers): message = [] message.append('HTTP/{} {}\r\n'.format(version, status)) for name, value in headers: message.append(name) message.append(': ') message.append(value) message.append('\r\n') message.append('\r\n') return s2b(''.join(message))
Create a HTTP response header.
def create_channels(chan_name=None, n_chan=None): if chan_name is not None: n_chan = len(chan_name) elif n_chan is not None: chan_name = _make_chan_name(n_chan) else: raise TypeError('You need to specify either the channel names (chan_name) or the number of channels (n_chan)') xyz = round(random.randn(n_chan, 3) * 10, decimals=2) return Channels(chan_name, xyz)
Create instance of Channels with random xyz coordinates Parameters ---------- chan_name : list of str names of the channels n_chan : int if chan_name is not specified, this defines the number of channels Returns ------- instance of Channels where the location of the channels is random
def _array2cstr(arr): out = StringIO() np.save(out, arr) return b64encode(out.getvalue())
Serializes a numpy array to a compressed base64 string