Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
363,300
def getchild(self, name, parent): if name.startswith(): return parent.get_attribute(name[1:]) else: return parent.get_child(name)
get a child by name
363,301
def tgread_date(self): value = self.read_int() if value == 0: return None else: return datetime.fromtimestamp(value, tz=timezone.utc)
Reads and converts Unix time (used by Telegram) into a Python datetime object.
363,302
def gev_expval(xi, mu=0, sigma=1): return mu - (sigma / xi) + (sigma / xi) * flib.gamfun(1 - xi)
Expected value of generalized extreme value distribution.
363,303
def transformer_from_metric(metric, tol=None): if not np.allclose(metric, metric.T): raise ValueError("The input metric should be symmetric.") if np.array_equal(metric, np.diag(np.diag(metric))): _check_sdp_from_eigen(np.diag(metric), tol) return np.diag(np.sqrt(np.maximum(0, np.diag(metric)...
Returns the transformation matrix from the Mahalanobis matrix. Returns the transformation matrix from the Mahalanobis matrix, i.e. the matrix L such that metric=L.T.dot(L). Parameters ---------- metric : symmetric `np.ndarray`, shape=(d x d) The input metric, from which we want to extract a tran...
363,304
def add_arguments(self, parser): parser.add_argument( , , default=0, type=int, help=u"The Submission.id at which to begin updating rows. 0 by default." ) parser.add_argument( , , default=1000, type=int, ...
Add arguments to the command parser. Uses argparse syntax. See documentation at https://docs.python.org/3/library/argparse.html.
363,305
def make_router(*routings): routes = [] for routing in routings: methods, regex, app = routing[:3] if isinstance(methods, basestring): methods = (methods,) vars = routing[3] if len(routing) >= 4 else {} routes.append((methods, re.compile(unicode(regex)), app, var...
Return a WSGI application that dispatches requests to controllers
363,306
def get_grade_entries_for_gradebook_column_on_date(self, gradebook_column_id, from_, to): grade_entry_list = [] for grade_entry in self.get_grade_entries_for_gradebook_column(gradebook_column_id): if overlap(from_, to, grade_entry.start_date, grade_entry.end_date):...
Gets a ``GradeEntryList`` for the given gradebook column and effective during the entire given date range inclusive but not confined to the date range. arg: gradebook_column_id (osid.id.Id): a gradebook column ``Id`` arg: from (osid.calendaring.DateTime): start of date range ...
363,307
def substr(requestContext, seriesList, start=0, stop=0): for series in seriesList: left = series.name.rfind() + 1 right = series.name.find() if right < 0: right = len(series.name)+1 cleanName = series.name[left:right:].split() if int(stop) == 0: s...
Takes one metric or a wildcard seriesList followed by 1 or 2 integers. Assume that the metric name is a list or array, with each element separated by dots. Prints n - length elements of the array (if only one integer n is passed) or n - m elements of the array (if two integers n and m are passed). The l...
363,308
def raw_data(self, event): self.log(, lvl=events) if not parse: return nmea_time = event.data[0] try: parsed_data = parse(event.data[1]) except Exception as e: self.log(, event.data[1], e, type(e), exc=True, lvl=...
Handles incoming raw sensor data :param event: Raw sentences incoming data
363,309
def _prepare_request(self, url, method, headers, data): request = httpclient.HTTPRequest( url=url, method=method, headers=headers, body=data, connect_timeout=self._connect_timeout, request_timeout=self._request_timeout, auth_username=self._username, auth...
Prepare HTTP request. :param str url: request URL. :param str method: request method. :param dict headers: request headers. :param object data: JSON-encodable object. :rtype: httpclient.HTTPRequest
363,310
def create(cls, pid_type=None, pid_value=None, object_type=None, object_uuid=None, status=None, **kwargs): assert pid_value assert pid_type or cls.pid_type pid = PersistentIdentifier.create( pid_type or cls.pid_type, pid_value, pid_pro...
Create a new instance for the given type and pid. :param pid_type: Persistent identifier type. (Default: None). :param pid_value: Persistent identifier value. (Default: None). :param status: Current PID status. (Default: :attr:`invenio_pidstore.models.PIDStatus.NEW`) :param ...
363,311
def _read_country_names(self, countries_file=None): if not countries_file: countries_file = os.path.join(os.path.dirname(__file__), self.COUNTRY_FILE_DEFAULT) with open(countries_file) as f: countries_json = f.read() return json.loads(countries_json)
Read list of countries from specified country file or default file.
363,312
def _read_layers(self, layers, image_id): for layer in self.docker.history(image_id): layers.append(layer[])
Reads the JSON metadata for specified layer / image id
363,313
def get_behave_args(self, argv=sys.argv): parser = BehaveArgsHelper().create_parser(, ) args, unknown = parser.parse_known_args(argv[2:]) behave_args = [] for option in unknown: if option.startswith(): option = option.replace(, , 1) ...
Get a list of those command line arguments specified with the management command that are meant as arguments for running behave.
363,314
def _config_params(base_config, assoc_files, region, out_file, items): params = [] dbsnp = assoc_files.get("dbsnp") if dbsnp: params += ["--dbsnp", dbsnp] cosmic = assoc_files.get("cosmic") if cosmic: params += ["--cosmic", cosmic] variant_regions = bedutils.population_varia...
Add parameters based on configuration variables, associated files and genomic regions.
363,315
def get_foreign_key_base_declaration_sql(self, foreign_key): sql = "" if foreign_key.get_name(): sql += "CONSTRAINT %s " % foreign_key.get_quoted_name(self) sql += "FOREIGN KEY (" if not foreign_key.get_local_columns(): raise DBALException() if...
Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint of a field declaration to be used in statements like CREATE TABLE. :param foreign_key: The foreign key :type foreign_key: ForeignKeyConstraint :rtype: str
363,316
def blksize(self): self._blksize = self.lib.iperf_get_test_blksize(self._test) return self._blksize
The test blksize.
363,317
def enable_messaging(message_viewer, sender=dispatcher.Any): LOGGER.debug( % (message_viewer, sender)) dispatcher.connect( message_viewer.dynamic_message_event, signal=DYNAMIC_MESSAGE_SIGNAL, sender=sender) dispatcher.connect( message_v...
Set up the dispatcher for messaging. :param message_viewer: A message viewer to show the message. :type message_viewer: MessageViewer :param sender: Sender of the message signal. Default to Any object. :type sender: object
363,318
def disconnect(self, callback=None, sender=None, senders=None, key=None, keys=None, weak=True): weak_callback = yield from self._get_ref(callback, weak) if (sender is None) and (senders is None) and (key is None) and (keys is None): if weak_callback in sel...
*This method is a coroutine.* Disconnects the callback from the signal. If no arguments are supplied for ``sender``, ``senders``, ``key``, or ``keys`` -- the callback is completely disconnected. Otherwise, only the supplied ``senders`` and ``keys`` are disconnected for the callback. ...
363,319
def v_unique_name_defintions(ctx, stmt): defs = [(, , stmt.i_typedefs), (, , stmt.i_groupings)] def f(s): for (keyword, errcode, dict) in defs: if s.keyword == keyword and s.arg in dict: err_add(ctx.errors, dict[s.arg].pos, errcode, (s...
Make sure that all top-level definitions in a module are unique
363,320
def _contextualise_connection(self, connection): ctx = stack.top if ctx is not None: if not hasattr(ctx, ): ctx.ldap3_manager_connections = [connection] else: ctx.ldap3_manager_connections.append(connection)
Add a connection to the appcontext so it can be freed/unbound at a later time if an exception occured and it was not freed. Args: connection (ldap3.Connection): Connection to add to the appcontext
363,321
def list_row_sparse_data(self, row_id): if self._stype != : raise RuntimeError("Cannot return copies of Parameter on all contexts via " \ "list_row_sparse_data() because its storage type is %s. Please " \ "use data() instead." %...
Returns copies of the 'row_sparse' parameter on all contexts, in the same order as creation. The copy only retains rows whose ids occur in provided row ids. The parameter must have been initialized before. Parameters ---------- row_id: NDArray Row ids to retain for t...
363,322
def load_itasser_folder(self, ident, itasser_folder, organize=False, outdir=None, organize_name=None, set_as_representative=False, representative_chain=, force_rerun=False): if organize: if not outdir: outdir = self.structure_dir i...
Load the results folder from an I-TASSER run (local, not from the website) and copy relevant files over to the protein structures directory. Args: ident (str): I-TASSER ID itasser_folder (str): Path to results folder organize (bool): If select files from modeling sho...
363,323
def parse_doc_str(text=None, is_untabbed=True, is_stripped=True, tab=None, split_character="::"): text = text or function_doc(2) text = text.split(split_character, 1)[-1] text = text.split()[0].split()[0] tab = is_untabbed and \ (tab or text[:-1 * len(text.lstrip())].spl...
Returns a str of the parsed doc for example the following would return 'a:A\nb:B' :: a:A b:B :param text: str of the text to parse, by default uses calling function doc :param is_untabbed: bool if True will untab the text :param is_stripped: ...
363,324
def _autorestart_components(self, bundle): with self.__instances_lock: instances = self.__auto_restart.get(bundle) if not instances: return for factory, name, properties in instances: try: ...
Restart the components of the given bundle :param bundle: A Bundle object
363,325
def _parse_current_member(self, previous_rank, values): rank, name, vocation, level, joined, status = values rank = previous_rank[1] if rank == " " else rank title = None previous_rank[1] = rank m = title_regex.match(name) if m: name = m.group(1) ...
Parses the column texts of a member row into a member dictionary. Parameters ---------- previous_rank: :class:`dict`[int, str] The last rank present in the rows. values: tuple[:class:`str`] A list of row contents.
363,326
def snapshot(self): return dict((n, self._slots[n].get()) for n in self._slots.keys())
Return a dictionary of current slots by reference.
363,327
def get_historical_data(self, uuid, start, end, average_by=0): return self.parse_data((yield from self._get( HISTORICAL_DATA_URL.format(uuid= uuid, start = trunc(start.replace(tzinfo=timezone.utc).timestamp()), end = trunc(end.replace(tzinfo=timezone.utc).tim...
Get the data from one device for a specified time range. .. note:: Can fetch a maximum of 42 days of data. To speed up query processing, you can use a combination of average factor multiple of 1H in seconds (e.g. 3600), and o'clock start and end times :param uuid: I...
363,328
def calc_glmelt_in_v1(self): con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if ((con.zonetype[k] == GLACIER) and (sta.sp[k] <= 0...
Calculate melting from glaciers which are actually not covered by a snow layer and add it to the water release of the snow module. Required control parameters: |NmbZones| |ZoneType| |GMelt| Required state sequence: |SP| Required flux sequence: |TC| Calculated fluxes...
363,329
def split_in_blocks(sequence, hint, weight=lambda item: 1, key=nokey): if isinstance(sequence, int): return split_in_slices(sequence, hint) elif hint in (0, 1) and key is nokey: return [sequence] elif hint in (0, 1): blocks = [] for k, group in groupby(sequence, key)...
Split the `sequence` in a number of WeightedSequences close to `hint`. :param sequence: a finite sequence of items :param hint: an integer suggesting the number of subsequences to generate :param weight: a function returning the weigth of a given item :param key: a function returning the key of a given...
363,330
def check_time(value): try: h, m = value.split() h = int(h) m = int(m) if h >= 24 or h < 0: raise ValueError if m >= 60 or m < 0: raise ValueError except ValueError: raise TimeDefinitionError("Invalid definition of time %r" % value)
check that it's a value like 03:45 or 1:1
363,331
def _post(self, url_suffix, data, content_type=ContentType.json): (url, data_str, headers) = self._url_parts(url_suffix, data, content_type=content_type) resp = self.http.post(url, data_str, headers=headers) return self._check_err(resp, url_suffix, data, allow_pagination=False)
Send POST request to API at url_suffix with post_data. Raises error if x-total-pages is contained in the response. :param url_suffix: str URL path we are sending a POST to :param data: object data we are sending :param content_type: str from ContentType that determines how we format the ...
363,332
def keystroke_toggled(self, settings, key, user_data): for i in self.guake.notebook_manager.iter_terminals(): i.set_scroll_on_keystroke(settings.get_boolean(key))
If the gconf var scroll_keystroke be changed, this method will be called and will set the scroll_on_keystroke in all terminals open.
363,333
def mol(self): if self._mol is None: apiurl = % (self.csid,TOKEN) response = urlopen(apiurl) tree = ET.parse(response) self._mol = tree.getroot().text return self._mol
Return record in MOL format
363,334
def resample(self, rate): rate1 = self.sample_rate.value if isinstance(rate, units.Quantity): rate2 = rate.value else: rate2 = float(rate) if (rate2 / rate1).is_integer(): raise NotImplementedError("StateVector upsampling has not " ...
Resample this `StateVector` to a new rate Because of the nature of a state-vector, downsampling is done by taking the logical 'and' of all original samples in each new sampling interval, while upsampling is achieved by repeating samples. Parameters ---------- ra...
363,335
def ExpandArg(self, **_): if len(self.stack) <= 1: raise lexer.ParseError( "Unbalanced parenthesis: Can not expand " % self.processed_buffer) parameter_name = self.stack.pop(-1) if "." not in parameter_name: parameter_name = "%s.%s" % (self.default_section, paramet...
Expand the args as a section.parameter from the config.
363,336
def popular(category=None, sortOption = "title"): s = Search() s.popular(category, sortOption) return s
Return a search result containing torrents appearing on the KAT home page. Can be categorized. Cannot be sorted or contain multiple pages
363,337
def subscribe(self, topic, callback, qos): if topic in self.topics: return def _message_callback(mqttc, userdata, msg): callback(msg.topic, msg.payload.decode(), msg.qos) self._mqttc.subscribe(topic, qos) self._mqttc.message_callback_add(to...
Subscribe to an MQTT topic.
363,338
def maskedConvolve(arr, kernel, mask, mode=): arr2 = extendArrayForConvolution(arr, kernel.shape, modex=mode, modey=mode) print(arr2.shape) out = np.zeros_like(arr) return _calc(arr2, kernel, mask, out)
same as scipy.ndimage.convolve but is only executed on mask==True ... which should speed up everything
363,339
def remove_none_value(data): return dict((k, v) for k, v in data.items() if v is not None)
remove item from dict if value is None. return new dict.
363,340
def get_id(page): start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
Extract the id from a page. Args: page: a string Returns: an integer
363,341
def scale_transform(t, scale): if isinstance(scale, float) or isinstance(scale, int): scale = [scale, scale, scale] if _is_indexable(t[0]): return [[t[0][0] * scale[0], t[0][1] * scale[1], t[0][2] * scale[2]], t[1]] else: return [t[0] * scale[0], t[1] * scale[1]...
Apply a scale to a transform :param t: The transform [[x, y, z], [x, y, z, w]] to be scaled OR the position [x, y, z] to be scaled :param scale: the scale of type float OR the scale [scale_x, scale_y, scale_z] :return: The scaled
363,342
def delete(self, table): self._table = table self._limit = None self._query = "DELETE FROM {0}".format(self._table) return self
Deletes record in table >>> yql.delete('yql.storage').where(['name','=','store://YEl70PraLLMSMuYAauqNc7'])
363,343
def document_iter(self, context): for _event, elem in context: if not self.debug: yield URMLDocumentGraph(elem, tokenize=self.tokenize, precedence=self.precedence) else: yield elem e...
Iterates over all the elements in an iterparse context (here: <document> elements) and yields an URMLDocumentGraph instance for each of them. For efficiency, the elements are removed from the DOM / main memory after processing them. If ``self.debug`` is set to ``True`` (in the ``__init_...
363,344
def create_from_yamlfile(cls, yamlfile): data = load_yaml(yamlfile) nebins = len(data) emin = np.array([data[i][] for i in range(nebins)]) emax = np.array([data[i][] for i in range(nebins)]) ref_flux = np.array([data[i][][1] for i in range(nebins)]) ref_eflux = n...
Create a Castro data object from a yaml file contains the likelihood data.
363,345
def daily(target_coll, source_coll, interp_days=32, interp_method=): if interp_method.lower() == : def _linear(image): target_image = ee.Image(image).select(0).double() target_date = ee.Date(image.get()) ...
Generate daily ETa collection from ETo and ETf collections Parameters ---------- target_coll : ee.ImageCollection Source images will be interpolated to each target image time_start. Target images should have a daily time step. This will typically be the reference ET (ETr) collectio...
363,346
def array2tabledef(data, table_type=, write_bitcols=False): is_ascii = (table_type == ) if data.dtype.fields is None: raise ValueError("data must have fields") names = [] names_nocase = {} formats = [] dims = [] descr = data.dtype.descr for d in descr: npy...
Similar to descr2tabledef but if there are object columns a type and max length will be extracted and used for the tabledef
363,347
def get_breadcrumbs(url, request=None): from wave.reverse import preserve_builtin_query_params from wave.settings import api_settings from wave.views import APIView view_name_func = api_settings.VIEW_NAME_FUNCTION def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen): ...
Given a url returns a list of breadcrumbs, which are each a tuple of (name, url).
363,348
def kill(self, sig): if sys.platform == : if sig in [signal.SIGINT, signal.CTRL_C_EVENT]: sig = signal.CTRL_C_EVENT elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]: sig = signal.CTRL_BREAK_EVENT else: sig = signa...
Sends a Unix signal to the subprocess. Use constants from the :mod:`signal` module to specify which signal.
363,349
def angle2vecs(vec1, vec2): dot = np.dot(vec1, vec2) vec1_modulus = np.sqrt(np.multiply(vec1, vec1).sum()) vec2_modulus = np.sqrt(np.multiply(vec2, vec2).sum()) if (vec1_modulus * vec2_modulus) == 0: cos_angle = 1 else: cos_angle = dot / (vec1_modulus * vec2_modulus) return mat...
angle between two vectors
363,350
def getUniqueFilename(dir=None, base=None): while True: fn = str(random.randint(0, 100000)) + ".tmp" if not os.path.exists(fn): break return fn
DESCRP: Generate a filename in the directory <dir> which is unique (i.e. not in use at the moment) PARAMS: dir -- the directory to look in. If None, use CWD base -- use this as the base name for the filename RETURN: string -- the filename generated
363,351
def to_buffer(f): @functools.wraps(f) def wrap(*args, **kwargs): iterator = kwargs.get(, args[0]) if not isinstance(iterator, Buffer): iterator = Buffer(iterator) return f(iterator, *args[1:], **kwargs) return wrap
Decorator converting all strings and iterators/iterables into Buffers.
363,352
def makedirs(directory): parent = os.path.dirname(os.path.abspath(directory)) if not os.path.exists(parent): makedirs(parent) os.mkdir(directory)
Resursively create a named directory.
363,353
def debug(msg: str, resource: Optional[] = None, stream_id: Optional[int] = None) -> None: engine = get_engine() if engine is not None: _log(engine, engine_pb2.DEBUG, msg, resource, stream_id) else: print("debug: " + msg, file=sys.stderr)
Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stre...
363,354
def plot_pseudosection_type2(self, mid, **kwargs): c = self.configs AB_ids = self._get_unique_identifiers(c[:, 0:2]) MN_ids = self._get_unique_identifiers(c[:, 2:4]) ab_sorted = np.sort(c[:, 0:2], axis=1) mn_sorted = np.sort(c[:, 2:4], axis=1) AB_coords = [ ...
Create a pseudosection plot of type 2. For a given measurement data set, create plots that graphically show the data in a 2D color plot. Hereby, x and y coordinates in the plot are determined by the current dipole (x-axis) and voltage dipole (y-axis) of the corresponding measurement con...
363,355
def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args): flow_urn = flow.StartAFF4Flow( client_id=client_id, token=token, sync=True, **flow_args) WaitForFlow(flow_urn, token=token, timeout=timeout) return flow_urn
Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run.
363,356
def configure_health(graph): ns = Namespace( subject=Health, ) include_build_info = strtobool(graph.config.health_convention.include_build_info) convention = HealthConvention(graph, include_build_info) convention.configure(ns, retrieve=tuple()) return convention.health
Configure the health endpoint. :returns: a handle to the `Health` object, allowing other components to manipulate health state.
363,357
def ovsdb_server_port(self, **kwargs): config = ET.Element("config") ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(ovsdb_server, "name") name_key.text = kwargs.pop() port = ET.SubElement(ovsdb_...
Auto Generated Code
363,358
def verify_fft_options(opt,parser): if opt.fftw_measure_level not in [0,1,2,3]: parser.error("{0} is not a valid FFTW measure level.".format(opt.fftw_measure_level)) if opt.fftw_import_system_wisdom and ((opt.fftw_input_float_wisdom_file is not None) or (o...
Parses the FFT options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance.
363,359
def get_huc8(prefix): if not prefix.isdigit(): name = prefix prefix = None for row in hucs: if row.basin.lower() == name.lower(): if prefix is None or len(row.huc) < len(prefix): prefix = row.huc if prefix is...
Return all HUC8s matching the given prefix (e.g. 1801) or basin name (e.g. Klamath)
363,360
def dcdict2rdfpy(dc_dict): ark_prefix = uri = URIRef() rdf_py = ConjunctiveGraph() DC = Namespace() for element_value in dc_dict[]: if element_value[].startswith(ark_prefix): uri = URIRef( element_value[].replace( ark_prefi...
Convert a DC dictionary into an RDF Python object.
363,361
def aggregate_data(self, start, end, aggregation, keys=[], tags=[], attrs={}, rollup=None, period=None, interpolationf=None, interpolation_period=None, tz=None, limit=1000): url = vstart = check_time_param(start) vend = check_time_param(e...
Read data from multiple series according to a filter and apply a function across all the returned series to put the datapoints together into one aggregrate series. See the :meth:`list_series` method for a description of how the filter criteria are applied, and the :meth:`read_data` meth...
363,362
def create_dbinstance_read_replica(self, id, source_id, instance_class=None, port=3306, availability_zone=None, auto_minor_version_upgrade=None): p...
Create a new DBInstance Read Replica. :type id: str :param id: Unique identifier for the new instance. Must contain 1-63 alphanumeric characters. First character must be a letter. May not end with a hyphen or contain two consecutive hyphens ...
363,363
def del_role(self, role): target = AuthGroup.objects(role=role, creator=self.client).first() if target: target.delete() return True else: return False
deletes a group
363,364
def propagate_averages(self, n, tv, bv, var, outgroup=False): if n.is_terminal() and outgroup==False: if tv is None or np.isinf(tv) or np.isnan(tv): res = np.array([0, 0, 0, 0, 0, 0]) elif var==0: res = np.array([np.inf, np.inf, np.inf, np.inf, np...
This function implements the propagation of the means, variance, and covariances along a branch. It operates both towards the root and tips. Parameters ---------- n : (node) the branch connecting this node to its parent is used for propagation t...
363,365
def bits_to_bytes(bits): if len(bits) % 8 != 0: raise Exception("num bits must be multiple of 8") res = "" for x in six.moves.range(0, len(bits), 8): byte_bits = bits[x:x+8] byte_val = int(.join(map(str, byte_bits)), 2) res += chr(byte_val) return utils.binary(res...
Convert the bit list into bytes. (Assumes bits is a list whose length is a multiple of 8)
363,366
def hook_inform(self, inform_name, callback): if callback not in self._inform_hooks[inform_name]: self._inform_hooks[inform_name].append(callback)
Hookup a function to be called when an inform is received. Useful for interface-changed and sensor-status informs. Parameters ---------- inform_name : str The name of the inform. callback : function The function to be called.
363,367
def _call(self, method, **kwargs): kwargs.update(dict( partnerid=self.partner_id, partnerkey=self.partner_key, method=method, )) url = options.ENDPOINT_URL + + parse.urlencode(kwargs) try: raw_json = request.urlopen(url, timeout=...
Вызов метода API
363,368
def get_root_directory(self, timestamp=None): if timestamp is None: timestamp = self.timestamp if self.timestamp_format is not None: root_name = (time.strftime(self.timestamp_format, timestamp) + + self.batch_name) else: root_name = se...
A helper method that supplies the root directory name given a timestamp.
363,369
def default(self): import json import ambry.bundle.default_files as df import os path = os.path.join(os.path.dirname(df.__file__), ) if six.PY2: with open(path, ) as f: content_str = f.read() else: with open...
Return default contents
363,370
def getUpdatedFields(self, cascadeObjects=False): updatedFields = {} for thisField in self.FIELDS: thisVal = object.__getattribute__(self, thisField) if self._origData.get(thisField, ) != thisVal: updatedFields[thisField] = (self._origData[thisField], thisVal) if cascadeObjects is True and issubcla...
getUpdatedFields - See changed fields. @param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively). Otherwise, will just check if the pk has changed. @return - a dictionary of fieldName : tuple(old, new). fieldName may be ...
363,371
def _add_stats_box(h1: Histogram1D, ax: Axes, stats: Union[str, bool] = "all"): if stats in ["all", True]: text = "Total: {0}\nMean: {1:.2f}\nStd.dev: {2:.2f}".format( h1.total, h1.mean(), h1.std()) elif stats == "total": text = "Total: {0}".format(h1.total) else: ...
Insert a small legend-like box with statistical information. Parameters ---------- stats : "all" | "total" | True What info to display Note ---- Very basic implementation.
363,372
def extract(self, node, condition, skip=0): for child in self.filter(node, condition): if not skip: return child skip -= 1 raise TypeError()
Extract a single node that matches the provided condition, otherwise a TypeError is raised. An optional skip parameter can be provided to specify how many matching nodes are to be skipped over.
363,373
def _user_mdata(mdata_list=None, mdata_get=None): grains = {} if not mdata_list: mdata_list = salt.utils.path.which() if not mdata_get: mdata_get = salt.utils.path.which() if not mdata_list or not mdata_get: return grains for mdata_grain in __salt__[](mdata_list, ign...
User Metadata
363,374
def get(self, tag, nth=1): tag = fix_tag(tag) nth = int(nth) for t, v in self.pairs: if t == tag: nth -= 1 if nth == 0: return v return None
Return n-th value for tag. :param tag: FIX field tag number. :param nth: Index of tag if repeating, first is 1. :return: None if nothing found, otherwise value matching tag. Defaults to returning the first matching value of 'tag', but if the 'nth' parameter is overridden, can g...
363,375
def retry_with_delay(f, delay=60): @wraps(f) def inner(*args, **kwargs): kwargs[] = 5 remaining = get_retries() + 1 while remaining: remaining -= 1 try: return f(*args, **kwargs) except (requests.ConnectionError, requests.Timeout):...
Retry the wrapped requests.request function in case of ConnectionError. Optionally limit the number of retries or set the delay between retries.
363,376
def institutes(): institute_objs = user_institutes(store, current_user) institutes = [] for ins_obj in institute_objs: sanger_recipients = [] for user_mail in ins_obj.get(,[]): user_obj = store.user(user_mail) if not user_obj: continue ...
Display a list of all user institutes.
363,377
def legacy(**kwargs): params = [] params += [BoolParameter(qualifier=, copy_for={: , : [, , ], : }, dataset=, value=kwargs.get(, True), description=)] params += [ChoiceParameter(copy_for = {: [], : }, component=, qualifier=, value=kwargs.get(, ), choices=[, ], description=)] params...
Compute options for using the PHOEBE 1.0 legacy backend (must be installed). Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_compute` Please see :func:`phoebe.backend.backends.legacy` for a list of sources to cite when using this backend. ...
363,378
def read(fname, merge_duplicate_shots=False, encoding=): return PocketTopoTxtParser(fname, merge_duplicate_shots, encoding).parse()
Read a PocketTopo .TXT file and produce a `TxtFile` object which represents it
363,379
def __extend_uri(prefixes, short): for prefix in prefixes: if short.startswith(prefix): return short.replace(prefix + , prefixes[prefix]) return short
Extend a prefixed uri with the help of a specific dictionary of prefixes :param prefixes: Dictionary of prefixes :param short: Prefixed uri to be extended :return:
363,380
def as_tuple(self): Row = namedtuple("Row", self.headers) for value_dp_list in self.value_dp_matrix: if typepy.is_empty_sequence(value_dp_list): continue row = Row(*[value_dp.data for value_dp in value_dp_list]) yield row
:return: Rows of the table. :rtype: list of |namedtuple| :Sample Code: .. code:: python from tabledata import TableData records = TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ...
363,381
def get_weakref(func): if func is None: raise ValueError if not hasattr(func, ): return weakref.ref(func) return WeakMethod(func)
Get a weak reference to bound or unbound `func`. If `func` is unbound (i.e. has no __self__ attr) get a weakref.ref, otherwise get a wrapper that simulates weakref.ref.
363,382
def export(self, metadata, **kwargs): kwargs.setdefault(, SafeDumper) kwargs.setdefault(, False) kwargs.setdefault(, True) metadata = yaml.dump(metadata, **kwargs).strip() return u(metadata)
Export metadata as YAML. This uses yaml.SafeDumper by default.
363,383
def start(self): global CODE url = self._get_auth_url() webbrowser.open(url) tornado.ioloop.IOLoop.current().start() self.code = CODE
Starts the `PrawOAuth2Server` server. It will open the default web browser and it will take you to Reddit's authorization page, asking you to authorize your Reddit account(or account of the bot's) with your app(or bot script). Once authorized successfully, it will show `successful` messa...
363,384
def check_message(keywords, message): exc_type, exc_value, exc_traceback = sys.exc_info() if set(str(exc_value).split(" ")).issuperset(set(keywords)): exc_value.message = message raise
Checks an exception for given keywords and raises a new ``ActionError`` with the desired message if the keywords are found. This allows selective control over API error messages.
363,385
def init(scope): class SinonGlobals(object): pass global CPSCOPE CPSCOPE = SinonGlobals() funcs = [obj for obj in scope.values() if isinstance(obj, FunctionType)] for func in funcs: setattr(CPSCOPE, func.__name__, func) return CPSCOPE
Copy all values of scope into the class SinonGlobals Args: scope (eg. locals() or globals()) Return: SinonGlobals instance
363,386
def post(cond): source = inspect.getsource(cond).strip() def inner(f): if enabled: f = getattr(f, , f) def check_condition(result): if not cond(result): raise AssertionError( % source) if not hasatt...
Add a postcondition check to the annotated method. The condition is passed the return value of the annotated method.
363,387
def initialize_page(title, style, script, header=None): page = markup.page(mode="strict_html") page._escape = False page.init(title=title, css=style, script=script, header=header) return page
A function that returns a markup.py page object with the required html header.
363,388
def end_position(variant_obj): alt_bases = len(variant_obj[]) num_bases = max(len(variant_obj[]), alt_bases) return variant_obj[] + (num_bases - 1)
Calculate end position for a variant.
363,389
def quantile(arg, quantile, interpolation=): if isinstance(quantile, collections.abc.Sequence): op = ops.MultiQuantile(arg, quantile, interpolation) else: op = ops.Quantile(arg, quantile, interpolation) return op.to_expr()
Return value at the given quantile, a la numpy.percentile. Parameters ---------- quantile : float/int or array-like 0 <= quantile <= 1, the quantile(s) to compute interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation me...
363,390
def hash(pii_csv, keys, schema, clk_json, quiet, no_header, check_header, validate): schema_object = clkhash.schema.from_json_file(schema_file=schema) header = True if not check_header: header = if no_header: header = False try: clk_data = clk.generate_clk_from_csv( ...
Process data to create CLKs Given a file containing CSV data as PII_CSV, and a JSON document defining the expected schema, verify the schema, then hash the data to create CLKs writing them as JSON to CLK_JSON. Note the CSV file should contain a header row - however this row is not used by this tool...
363,391
def get_rich_menu(self, rich_menu_id, timeout=None): response = self._get( .format(rich_menu_id=rich_menu_id), timeout=timeout ) return RichMenuResponse.new_from_json_dict(response.json)
Call get rich menu API. https://developers.line.me/en/docs/messaging-api/reference/#get-rich-menu :param str rich_menu_id: ID of the rich menu :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, re...
363,392
def set_level(self, level): if not level: return None self.levelmap = set() for char in level: self.levelmap = self.levelmap.union(self.levels[char]) self.loglevel = level return self.loglevel
Sets :attr:loglevel to @level @level: #str one or several :attr:levels
363,393
def get_range_response(self, range_start, range_end, sort_order=None, sort_target=, **kwargs): range_request = self._build_get_range_request( key=range_start, range_end=range_end, sort_order=sort_order, sort_target=sort_target, ...
Get a range of keys.
363,394
def get_tensors_by_names(names): ret = [] G = tfv1.get_default_graph() for n in names: opn, varn = get_op_tensor_name(n) ret.append(G.get_tensor_by_name(varn)) return ret
Get a list of tensors in the default graph by a list of names. Args: names (list):
363,395
def get_catalog(mid): if isinstance(mid, _uuid.UUID): mid = mid.hex return _get_catalog(mid)
Return catalog entry for the specified ID. `mid` should be either a UUID or a 32 digit hex number.
363,396
def get_file(self, name, save_to, add_to_cache=True, force_refresh=False, _lock_exclusive=False): uname, version = split_name(name) lock = None if self.local_store: lock = self.lock_manager.lock_for(uname) if _lock_exclusive: lo...
Retrieves file identified by ``name``. The file is saved as ``save_to``. If ``add_to_cache`` is ``True``, the file is added to the local store. If ``force_refresh`` is ``True``, local cache is not examined if a remote store is configured. If a remote store is con...
363,397
def download_artist_by_search(self, artist_name): try: artist = self.crawler.search_artist(artist_name, self.quiet) except RequestException as exception: click.echo(exception) else: self.download_artist_by_id(artist.artist_id, artist.artist_name)
Download a artist's top50 songs by his/her name. :params artist_name: artist name.
363,398
def _is_cache_dir_appropriate(cache_dir, cache_file): if os.path.exists(cache_dir): files = os.listdir(cache_dir) if cache_file in files: return True return not bool(files) return True
Determine if a directory is acceptable for building. A directory is suitable if any of the following are true: - it doesn't exist - it is empty - it contains an existing build cache
363,399
def build_from_entities(self, tax_benefit_system, input_dict): input_dict = deepcopy(input_dict) simulation = Simulation(tax_benefit_system, tax_benefit_system.instantiate_entities()) for (variable_name, _variable) in tax_benefit_system.variables.items(): self.reg...
Build a simulation from a Python dict ``input_dict`` fully specifying entities. Examples: >>> simulation_builder.build_from_entities({ 'persons': {'Javier': { 'salary': {'2018-11': 2000}}}, 'households': {'household': {'parents': ['Javier']}} })