code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def future(self, rev=None): if rev is not None: self.seek(rev) return WindowDictFutureView(self._future)
Return a Mapping of items after the given revision. Default revision is the last one looked up.
def persistent_write(self, address, byte, refresh_config=False): self._persistent_write(address, byte) if refresh_config: self.load_config(False)
Write a single byte to an address in persistent memory. Parameters ---------- address : int Address in persistent memory (e.g., EEPROM). byte : int Value to write to address. refresh_config : bool, optional Is ``True``, :meth:`load_config()` is called afterward to refresh the configuration settings.
def sum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None): _initialize_client_from_environment() return _client.sum(event_collection=event_collection, timeframe=timeframe, timezone=timezone, interval=interval, filters=filters, group_by=group_by, order_by=order_by, target_property=target_property, max_age=max_age, limit=limit)
Performs a sum query Adds the values of a target property for events that meet the given criteria. :param event_collection: string, the name of the collection to query :param target_property: string, the name of the event property you would like use :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds
def update_config( self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs ): kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) if exists(fname): conf = f90nml.read(fname) else: conf = {top_level_key: {}} conf[top_level_key].update(kwargs) f90nml.write(conf, fname, force=True) return conf
Updates a configuration file for MAGICC Updates the contents of a fortran namelist in the run directory, creating a new namelist if none exists. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file
def completedefault(self, text, line, begidx, endidx): if self.argparser_completer and any((line.startswith(x) for x in self.argparse_names())): self.argparser_completer.rl_complete(line, 0) return [x[begidx:] for x in self.argparser_completer._rl_matches] else: return []
Accessing the argcompleter if available.
def _get_raw_objects(self): if not hasattr(self, '_raw_objects'): result = self._client.get(type(self).api_endpoint, model=self) self._raw_objects = result return self._raw_objects
Helper function to populate the first page of raw objects for this tag. This has the side effect of creating the ``_raw_objects`` attribute of this object.
async def info(self): stat = self._items.stat() return {'indx': self._items.index(), 'metrics': self._metrics.index(), 'stat': stat}
Returns information about the CryoTank instance. Returns: dict: A dict containing items and metrics indexes.
def _format_value_element(lines, element, spacer=""): lines.append(spacer + element.definition()) _format_summary(lines, element) _format_generic(lines, element, ["summary"])
Formats a member or parameter for full documentation output.
def cluster_replicate(self, node_id): fut = self.execute(b'CLUSTER', b'REPLICATE', node_id) return wait_ok(fut)
Reconfigure a node as a slave of the specified master node.
def should_stream(proxy_response): content_type = proxy_response.headers.get('Content-Type') if is_html_content_type(content_type): return False try: content_length = int(proxy_response.headers.get('Content-Length', 0)) except ValueError: content_length = 0 if not content_length or content_length > MIN_STREAMING_LENGTH: return True return False
Function to verify if the proxy_response must be converted into a stream.This will be done by checking the proxy_response content-length and verify if its length is bigger than one stipulated by MIN_STREAMING_LENGTH. :param proxy_response: An Instance of urllib3.response.HTTPResponse :returns: A boolean stating if the proxy_response should be treated as a stream
def instance_signals_and_handlers(cls, instance): isignals = cls._signals.copy() ihandlers = cls._build_instance_handler_mapping( instance, cls._signal_handlers ) return isignals, ihandlers
Calculate per-instance signals and handlers.
def count_rows_distinct(self, table, cols='*'): return self.fetch('SELECT COUNT(DISTINCT {0}) FROM {1}'.format(join_cols(cols), wrap(table)))
Get the number distinct of rows in a particular table.
def deps_used(self, pkg, used): if find_package(pkg + self.meta.sp, self.meta.pkg_path): if pkg not in self.deps_dict.values(): self.deps_dict[pkg] = used else: self.deps_dict[pkg] += used
Create dependencies dictionary
def read_gcvs(filename): with open(filename, 'r') as fp: parser = GcvsParser(fp) for star in parser: yield star
Reads variable star data in `GCVS format`_. :param filename: path to GCVS data file (usually ``iii.dat``) .. _`GCVS format`: http://www.sai.msu.su/gcvs/gcvs/iii/html/
def bech32_decode(bech): if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or (bech.lower() != bech and bech.upper() != bech)): return None, None bech = bech.lower() pos = bech.rfind('1') if pos < 1 or pos + 7 > len(bech) or len(bech) > 90: return None, None if not all(x in CHARSET for x in bech[pos+1:]): return None, None hrp = bech[:pos] data = [CHARSET.find(x) for x in bech[pos+1:]] if not bech32_verify_checksum(hrp, data): return None, None return hrp, data[:-6]
Validate a Bech32 string, and determine HRP and data.
def get_reservations(self, email, date, timeout=None): try: resp = self._request("GET", "/1.1/space/bookings?email={}&date={}&limit=100".format(email, date), timeout=timeout) except resp.exceptions.HTTPError as error: raise APIError("Server Error: {}".format(error)) except requests.exceptions.ConnectTimeout: raise APIError("Timeout Error") return resp.json()
Gets reservations for a given email. :param email: the email of the user who's reservations are to be fetched :type email: str
def score(text, *score_functions): if not score_functions: raise ValueError("score_functions must not be empty") return statistics.mean(func(text) for func in score_functions)
Score ``text`` using ``score_functions``. Examples: >>> score("abc", function_a) >>> score("abc", function_a, function_b) Args: text (str): The text to score *score_functions (variable length argument list): functions to score with Returns: Arithmetic mean of scores Raises: ValueError: If score_functions is empty
def exon_overlap(self,tx,multi_minover=10,multi_endfrac=0,multi_midfrac=0.8,single_minover=50,single_frac=0.5,multi_consec=True): return ExonOverlap(self,tx,multi_minover,multi_endfrac,multi_midfrac,single_minover,single_frac,multi_consec=multi_consec)
Get a report on how mucht the exons overlap :param tx: :param multi_minover: multi-exons need to overlap by at lest this much to be considered overlapped (default 10) :param multi_endfrac: multi-exons need an end fraction coverage of at least this by default (default 0) :param multi_midfrac: multi-exons need (default 0.8) mutual coverage for internal exons :parma single_minover: single-exons need at least this much shared overlap (default 50) :param single_frac: at least this fraction of single exons must overlap (default 0.5) :parma multi_consec: exons need to have multiexon consecutive mapping to consider it a match (default True) :type tx: :type multi_minover: int :type multi_endfrac: float :type multi_midfrac: float :type single_minover: int :type single_frac: float :type multi_consec: bool :return: ExonOverlap report :rtype: Transcript.ExonOverlap
def _create_non_null_wrapper(name, t): 'creates type wrapper for non-null of given type' def __new__(cls, json_data, selection_list=None): if json_data is None: raise ValueError(name + ' received null value') return t(json_data, selection_list) def __to_graphql_input__(value, indent=0, indent_string=' '): return t.__to_graphql_input__(value, indent, indent_string) return type(name, (t,), { '__new__': __new__, '_%s__auto_register' % name: False, '__to_graphql_input__': __to_graphql_input__, })
creates type wrapper for non-null of given type
def scores(self, text: str) -> Dict[str, float]: values = extract(text) input_fn = _to_func(([values], [])) prediction = self._classifier.predict_proba(input_fn=input_fn) probabilities = next(prediction).tolist() sorted_languages = sorted(self.languages) return dict(zip(sorted_languages, probabilities))
A score for each language corresponding to the probability that the text is written in the given language. The score is a `float` value between 0.0 and 1.0 :param text: source code. :return: language to score dictionary
def on_connected(self, connection): log.info('PikaClient: connected to RabbitMQ') self.connected = True self.in_channel = self.connection.channel(self.on_channel_open)
AMQP connection callback. Creates input channel. Args: connection: AMQP connection
async def reseed_apply(self) -> DIDInfo: LOGGER.debug('Wallet.reseed_apply >>>') if not self.handle: LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) await did.replace_keys_apply(self.handle, self.did) self.verkey = await did.key_for_local_did(self.handle, self.did) now = int(time()) rv = DIDInfo(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now}) await did.set_did_metadata(self.handle, self.did, json.dumps(rv.metadata)) LOGGER.info('Wallet %s set seed hash metadata for DID %s', self.name, self.did) LOGGER.debug('Wallet.reseed_apply <<< %s', rv) return rv
Replace verification key with new verification key from reseed operation. Raise WalletState if wallet is closed. :return: DIDInfo with new verification key and metadata for DID
def remove(self, key): item = self.item_finder.pop(key) item[-1] = None self.removed_count += 1
remove the value found at key from the queue
def GetPossibleGroup(self): this_method = self._call_queue.pop() assert this_method == self group = None try: group = self._call_queue[-1] except IndexError: pass return group
Returns a possible group from the end of the call queue or None if no other methods are on the stack.
def _slopes_directions(self, data, dX, dY, method='tarboton'): if method == 'tarboton': return self._tarboton_slopes_directions(data, dX, dY) elif method == 'central': return self._central_slopes_directions(data, dX, dY)
Wrapper to pick between various algorithms
def peek(self): try: self._fetch() pkt = self.pkt_queue[0] return pkt except IndexError: raise StopIteration()
Get the current packet without consuming it.
def close(self, file_des): file_handle = self.filesystem.get_open_file(file_des) file_handle.close()
Close a file descriptor. Args: file_des: An integer file descriptor for the file object requested. Raises: OSError: bad file descriptor. TypeError: if file descriptor is not an integer.
def rename_acquisition(self, plate_name, name, new_name): logger.info( 'rename acquisistion "%s" of experiment "%s", plate "%s"', name, self.experiment_name, plate_name ) content = {'name': new_name} acquisition_id = self._get_acquisition_id(plate_name, name) url = self._build_api_url( '/experiments/{experiment_id}/acquisitions/{acquisition_id}'.format( experiment_id=self._experiment_id, acquisition_id=acquisition_id ) ) res = self._session.put(url, json=content) res.raise_for_status()
Renames an acquisition. Parameters ---------- plate_name: str name of the parent plate name: str name of the acquisition that should be renamed new_name: str name that should be given to the acquisition See also -------- :func:`tmserver.api.acquisition.update_acquisition` :class:`tmlib.models.acquisition.Acquisition`
def to_api_repr(self): resource = copy.deepcopy(self._properties) query_parameters = resource["query"].get("queryParameters") if query_parameters: if query_parameters[0].get("name") is None: resource["query"]["parameterMode"] = "POSITIONAL" else: resource["query"]["parameterMode"] = "NAMED" return resource
Build an API representation of the query job config. Returns: dict: A dictionary in the format used by the BigQuery API.
def gerrymanderNodeFilenames(self): for node in self.all_nodes: node.file_name = os.path.basename(node.file_name) if node.kind == "file": node.program_file = os.path.basename(node.program_file)
When creating nodes, the filename needs to be relative to ``conf.py``, so it will include ``self.root_directory``. However, when generating the API, the file we are writing to is in the same directory as the generated node files so we need to remove the directory path from a given ExhaleNode's ``file_name`` before we can ``include`` it or use it in a ``toctree``.
def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS): for flag_name in flag_names: mark_flag_as_required(flag_name, flag_values)
Ensures that flags are not None during program execution. Recommended usage: if __name__ == '__main__': flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) app.run() Args: flag_names: Sequence[str], names of the flags. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. Raises: AttributeError: If any of flag name has not already been defined as a flag.
def first_name(anon, obj, field, val): return anon.faker.first_name(field=field)
Returns a random first name
def dependencies(self, images): for dep in self.commands.dependent_images: if not isinstance(dep, six.string_types): yield dep.name for image, _ in self.dependency_images(): yield image
Yield just the dependency images
def geocode_address(self, address, **kwargs): fields = ",".join(kwargs.pop("fields", [])) response = self._req(verb="geocode", params={"q": address, "fields": fields}) if response.status_code != 200: return error_response(response) return Location(response.json())
Returns a Location dictionary with the components of the queried address and the geocoded location. >>> client = GeocodioClient('some_api_key') >>> client.geocode("1600 Pennsylvania Ave, Washington DC") { "input": { "address_components": { "number": "1600", "street": "Pennsylvania", "suffix": "Ave", "city": "Washington", "state": "DC" }, "formatted_address": "1600 Pennsylvania Ave, Washington DC" }, "results": [ { "address_components": { "number": "1600", "street": "Pennsylvania", "suffix": "Ave", "city": "Washington", "state": "DC", "zip": "20500" }, "formatted_address": "1600 Pennsylvania Ave, Washington DC, 20500", "location": { "lat": 38.897700000000, "lng": -77.03650000000, }, "accuracy": 1 }, { "address_components": { "number": "1600", "street": "Pennsylvania", "suffix": "Ave", "city": "Washington", "state": "DC", "zip": "20500" }, "formatted_address": "1600 Pennsylvania Ave, Washington DC, 20500", "location": { "lat": 38.897700000000, "lng": -77.03650000000, }, "accuracy": 0.8 } ] }
def compute_edge_reduction(self) -> float: nb_init_edge = self.init_edge_number() nb_poweredge = self.edge_number() return (nb_init_edge - nb_poweredge) / (nb_init_edge)
Compute the edge reduction. Costly computation
def set_authorization_password(self, password): self.authorization_password = password self.changed_event.emit(self)
Changes the authorization password of the account. :type password: string :param password: The new authorization password.
def get(self, **url_params): if url_params: self.http_method_args["params"].update(url_params) return self.http_method("GET")
Makes the HTTP GET to the url.
def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix): text_view_entry_comment = extract_element_internationalized_comment(text_view) if text_view_entry_comment is None: return if text_view.hasAttribute('usesAttributedText') and text_view.attributes['usesAttributedText'].value == 'YES': add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment) else: try: text_view_entry_key = text_view.attributes['text'].value results.append((text_view_entry_key, text_view_entry_comment + ' default text value')) except KeyError: pass warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix)
Adds string pairs from a textview element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. text_view(element): The textview element from the xib, to extract the string pairs from. special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
def parse_line(line): line = line.rstrip() line_type = _get_line_type(line) return TabLine( type=line_type, data=_DATA_PARSERS[line_type](line), original=line, )
Parse a line into a `TabLine` object.
def count(self): if not self.query.store.autocommit: self.query.store.checkpoint() target = ', '.join([ tableClass.storeID.getColumnName(self.query.store) for tableClass in self.query.tableClass ]) sql, args = self.query._sqlAndArgs( 'SELECT DISTINCT', target) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self.query.store.querySQL(sql, args) assert len(result) == 1, 'more than one result: %r' % (result,) return result[0][0] or 0
Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results.
def get(self): value = {} for (elementname, elementvar) in self._elementvars.items(): value[elementname] = elementvar.get() return value
Return a dictionary that represents the Tcl array
def assert_is_substring(substring, subject, message=None, extra=None): assert ( (subject is not None) and (substring is not None) and (subject.find(substring) != -1) ), _assert_fail_message(message, substring, subject, "is not in", extra)
Raises an AssertionError if substring is not a substring of subject.
def badge_color(self): if not self.thresholds: return self.default_color if self.value_type == str: if self.value in self.thresholds: return self.thresholds[self.value] else: return self.default_color threshold_list = [[self.value_type(i[0]), i[1]] for i in self.thresholds.items()] threshold_list.sort(key=lambda x: x[0]) color = None for threshold, color in threshold_list: if float(self.value) < float(threshold): return color if color and self.use_max_when_value_exceeds: return color else: return self.default_color
Find the badge color based on the thresholds.
def delete_user_login(self, id, user_id): path = {} data = {} params = {} path["user_id"] = user_id path["id"] = id self.logger.debug("DELETE /api/v1/users/{user_id}/logins/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/{user_id}/logins/{id}".format(**path), data=data, params=params, no_data=True)
Delete a user login. Delete an existing login.
def detect_intent_texts(project_id, session_id, texts, language_code): import dialogflow_v2 as dialogflow session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session)) for text in texts: text_input = dialogflow.types.TextInput( text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent( session=session, query_input=query_input) print('=' * 20) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text))
Returns the result of detect intent with texts as inputs. Using the same `session_id` between requests allows continuation of the conversation.
def omega(self, structure, n, u): l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n) l0 *= 1e-10 weight = float(structure.composition.weight) * 1.66054e-27 vol = structure.volume * 1e-30 vel = (1e9 * self[0].einsum_sequence([n, u, n, u]) / (weight / vol)) ** 0.5 return vel / l0
Finds directional frequency contribution to the heat capacity from direction and polarization Args: structure (Structure): Structure to be used in directional heat capacity determination n (3x1 array-like): direction for Cv determination u (3x1 array-like): polarization direction, note that no attempt for verification of eigenvectors is made
def and_yields(self, *values): def generator(): for value in values: yield value self.__expect(Expectation, Invoke(generator))
Expects the return value of the expectation to be a generator of the given values
def delif(self, iname): _runshell([brctlexe, 'delif', self.name, iname], "Could not delete interface %s from %s." % (iname, self.name))
Delete an interface from the bridge.
def parse_data(data, type, **kwargs): suffixes = { 'xml': '.osm', 'pbf': '.pbf', } try: suffix = suffixes[type] except KeyError: raise ValueError('Unknown data type "%s"' % type) fd, filename = tempfile.mkstemp(suffix=suffix) try: os.write(fd, data) os.close(fd) return parse_file(filename, **kwargs) finally: os.remove(filename)
Return an OSM networkx graph from the input OSM data Parameters ---------- data : string type : string ('xml' or 'pbf') >>> graph = parse_data(data, 'xml')
def contrast(colour1, colour2): r colour_for_type = Colour() if type(colour1) is type(colour_for_type): mycolour1 = colour1 else: try: mycolour1 = Colour(colour1) except: raise TypeError("colour1 must be a colourettu.colour") if type(colour2) is type(colour_for_type): mycolour2 = colour2 else: try: mycolour2 = Colour(colour2) except: raise TypeError("colour2 must be a colourettu.colour") lum1 = mycolour1.luminance() lum2 = mycolour2.luminance() minlum = min(lum1, lum2) maxlum = max(lum1, lum2) return (maxlum + 0.05) / (minlum + 0.05)
r"""Determines the contrast between two colours. Args: colour1 (colourettu.Colour): a colour colour2 (colourettu.Colour): a second colour Contrast the difference in (perceived) brightness between colours. Values vary between 1:1 (a given colour on itself) and 21:1 (white on black). To compute contrast, two colours are required. .. code:: pycon >>> colourettu.contrast("#FFF", "#FFF") # white on white 1.0 >>> colourettu.contrast(c1, "#000") # black on white 20.999999999999996 >>> colourettu.contrast(c4, c5) 4.363552233203198 ``contrast`` can also be called on an already existing colour, but a second colour needs to be provided: .. code:: pycon >>> c4.contrast(c5) 4.363552233203198 .. note:: Uses the formula: \\[ contrast = \\frac{lum_1 + 0.05}{lum_2 + 0.05} \\] **Use of Contrast** For Basic readability, the ANSI standard is a contrast of 3:1 between the text and it's background. The W3C proposes this as a minimum accessibility standard for regular text under 18pt and bold text under 14pt. This is referred to as the *A* standard. The W3C defines a higher *AA* standard with a minimum contrast of 4.5:1. This is approximately equivalent to 20/40 vision, and is common for those over 80. The W3C define an even higher *AAA* standard with a 7:1 minimum contrast. This would be equivalent to 20/80 vision. Generally, it is assumed that those with vision beyond this would access the web with the use of assistive technologies. If needed, these constants are stored in the library. .. code:: pycon >>> colourettu.A_contrast 3.0 >>> colourettu.AA_contrast 4.5 >>> colourettu.AAA_contrast 7.0 I've also found mention that if the contrast is *too* great, this can also cause readability problems when reading longer passages. This is confirmed by personal experience, but I have been (yet) unable to find any quantitative research to this effect.
def _StructPackDecoder(wire_type, format): value_size = struct.calcsize(format) local_unpack = struct.unpack def InnerDecode(buffer, pos): new_pos = pos + value_size result = local_unpack(format, buffer[pos:new_pos])[0] return (result, new_pos) return _SimpleDecoder(wire_type, InnerDecode)
Return a constructor for a decoder for a fixed-width field. Args: wire_type: The field's wire type. format: The format string to pass to struct.unpack().
def natural_breaks(values, k=5): values = np.array(values) uv = np.unique(values) uvk = len(uv) if uvk < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % uvk, UserWarning) k = uvk kres = _kmeans(values, k) sids = kres[-1] fit = kres[-2] class_ids = kres[0] cuts = kres[1] return (sids, class_ids, fit, cuts)
natural breaks helper function Jenks natural breaks is kmeans in one dimension
def value(self): choice = weighted_choice(self._responses) if isinstance(choice, tuple): return ''.join(map(str, choice)).strip() return str(choice)
Fetch a random weighted choice
def create_impression_event(self, experiment, variation_id, user_id, attributes): params = self._get_common_params(user_id, attributes) impression_params = self._get_required_params_for_impression(experiment, variation_id) params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(impression_params) return Event(self.EVENTS_URL, params, http_verb=self.HTTP_VERB, headers=self.HTTP_HEADERS)
Create impression Event to be sent to the logging endpoint. Args: experiment: Experiment for which impression needs to be recorded. variation_id: ID for variation which would be presented to user. user_id: ID for user. attributes: Dict representing user attributes and values which need to be recorded. Returns: Event object encapsulating the impression event.
def EMAIL_REQUIRED(self): from allauth.account import app_settings as account_settings return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED)
The user is required to hand over an e-mail address when signing up
def _load_sentence_list(self, path): result = {} for entry in textfile.read_separated_lines_generator(path, separator='\t', max_columns=3): if self.include_languages is None or entry[1] in self.include_languages: result[entry[0]] = entry[1:] return result
Load and filter the sentence list. Args: path (str): Path to the sentence list. Returns: dict: Dictionary of sentences (id : language, transcription)
def starmap_async(function, iterables, *args, **kwargs): return _map_or_starmap_async(function, iterables, args, kwargs, "starmap")
This function is the multiprocessing.Pool.starmap_async version that supports multiple arguments. >>> return ([function(x1,x2,x3,..., args[0], args[1],...) for >>> (x1,x2,x3...) in iterable]) :param pm_parallel: Force parallelization on/off. If False, the function won't be asynchronous. :type pm_parallel: bool :param pm_chunksize: see :py:class:`multiprocessing.pool.Pool` :type pm_chunksize: int :param pm_callback: see :py:class:`multiprocessing.pool.Pool` :type pm_callback: function :param pm_error_callback: see :py:class:`multiprocessing.pool.Pool` :type pm_error_callback: function :param pm_pool: Pass an existing pool. :type pm_pool: multiprocessing.pool.Pool :param pm_processes: Number of processes to use in the pool. See :py:class:`multiprocessing.pool.Pool` :type pm_processes: int
def get_boolean(self, input_string): if input_string in ('--write_roc', '--plot', '--compare'): try: index = self.args.index(input_string) + 1 except ValueError: return False return True
Return boolean type user input
def get_param_values(self_,onlychanged=False): self_or_cls = self_.self_or_cls vals = [] for name,val in self_or_cls.param.objects('existing').items(): value = self_or_cls.param.get_value_generator(name) if not onlychanged or not all_equal(value,val.default): vals.append((name,value)) vals.sort(key=itemgetter(0)) return vals
Return a list of name,value pairs for all Parameters of this object. When called on an instance with onlychanged set to True, will only return values that are not equal to the default value (onlychanged has no effect when called on a class).
def closed(self): for who, what, old, new in self.history(): if what == "status" and new == "closed": return True return False
True if ticket was closed in given time frame
def _fetch_features(self): if self.next_page_url is None: return response = get_json(self.next_page_url, post_values=self.query, headers=self.gpd_session.session_headers) self.features.extend(response['features']) self.next_page_url = response['pagination']['next'] self.layer_size = response['pagination']['total']
Retrieves a new page of features from Geopedia
def ref_string_matches_ref_sequence(self, ref_sequence): if self.POS < 0: return False end_pos = self.ref_end_pos() if end_pos >= len(ref_sequence): return False return self.REF == ref_sequence[self.POS:end_pos + 1]
Returns true iff the REF string in the record agrees with the given ref_sequence
def convert_to_nested_dict(dotted_dict): nested_dict = {} for k, v in iterate_flattened(dotted_dict): set_by_dotted_path(nested_dict, k, v) return nested_dict
Convert a dict with dotted path keys to corresponding nested dict.
def write_string(self, s, codec): for i in range(0, len(s), self.bufsize): chunk = s[i:i + self.bufsize] buf, consumed = codec.encode(chunk) assert consumed == len(chunk) self.write(buf)
Write string encoding it with codec into stream
def _create_eval_metric_composite(metric_names: List[str]) -> mx.metric.CompositeEvalMetric: metrics = [EarlyStoppingTrainer._create_eval_metric(metric_name) for metric_name in metric_names] return mx.metric.create(metrics)
Creates a composite EvalMetric given a list of metric names.
def get_message(self, *parameters): _, message_fields, header_fields = self._get_parameters_with_defaults(parameters) return self._encode_message(message_fields, header_fields)
Get encoded message. * Send Message -keywords are convenience methods, that will call this to get the message object and then send it. Optional parameters are message field values separated with colon. Examples: | ${msg} = | Get message | | ${msg} = | Get message | field_name:value |
def libvlc_vlm_get_event_manager(p_instance): f = _Cfunctions.get('libvlc_vlm_get_event_manager', None) or \ _Cfunction('libvlc_vlm_get_event_manager', ((1,),), class_result(EventManager), ctypes.c_void_p, Instance) return f(p_instance)
Get libvlc_event_manager from a vlm media. The p_event_manager is immutable, so you don't have to hold the lock. @param p_instance: a libvlc instance. @return: libvlc_event_manager.
def exists(self, table_id): from google.api_core.exceptions import NotFound table_ref = self.client.dataset(self.dataset_id).table(table_id) try: self.client.get_table(table_ref) return True except NotFound: return False except self.http_error as ex: self.process_http_error(ex)
Check if a table exists in Google BigQuery Parameters ---------- table : str Name of table to be verified Returns ------- boolean true if table exists, otherwise false
def register_previewer(self, name, previewer): if name in self.previewers: assert name not in self.previewers, \ "Previewer with same name already registered" self.previewers[name] = previewer if hasattr(previewer, 'previewable_extensions'): self._previewable_extensions |= set( previewer.previewable_extensions)
Register a previewer in the system.
def search_project_root(): while True: current = os.getcwd() if pathlib.Path("Miragefile.py").is_file() or pathlib.Path("Miragefile").is_file(): return current elif os.getcwd() == "/": raise FileNotFoundError else: os.chdir("../")
Search your Django project root. returns: - path:string Django project root path
def manufacturer(self): buf = ctypes.cast(self.sManu, ctypes.c_char_p).value return buf.decode() if buf else None
Returns the name of the manufacturer of the device. Args: self (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance Returns: Manufacturer name.
def get_cfgdict_list_subset(cfgdict_list, keys): r import utool as ut cfgdict_sublist_ = [ut.dict_subset(cfgdict, keys) for cfgdict in cfgdict_list] cfgtups_sublist_ = [tuple(ut.dict_to_keyvals(cfgdict)) for cfgdict in cfgdict_sublist_] cfgtups_sublist = ut.unique_ordered(cfgtups_sublist_) cfgdict_sublist = list(map(dict, cfgtups_sublist)) return cfgdict_sublist
r""" returns list of unique dictionaries only with keys specified in keys Args: cfgdict_list (list): keys (list): Returns: list: cfglbl_list CommandLine: python -m utool.util_gridsearch --test-get_cfgdict_list_subset Example: >>> # ENABLE_DOCTEST >>> from utool.util_gridsearch import * # NOQA >>> import utool as ut >>> # build test data >>> cfgdict_list = [ ... {'K': 3, 'dcvs_clip_max': 0.1, 'p': 0.1}, ... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.1}, ... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.2}, ... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}, ... {'K': 5, 'dcvs_clip_max': 0.2, 'p': 0.1}, ... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}] >>> keys = ['K', 'dcvs_clip_max'] >>> # execute function >>> cfgdict_sublist = get_cfgdict_list_subset(cfgdict_list, keys) >>> # verify results >>> result = ut.repr4(cfgdict_sublist) >>> print(result) [ {'K': 3, 'dcvs_clip_max': 0.1}, {'K': 5, 'dcvs_clip_max': 0.1}, {'K': 3, 'dcvs_clip_max': 0.2}, {'K': 5, 'dcvs_clip_max': 0.2}, ]
def write_byte_data(self, address, register, value): LOGGER.debug("Writing byte data %s to register %s on device %s", bin(value), hex(register), hex(address)) return self.driver.write_byte_data(address, register, value)
Write a byte value to a device's register.
def _generate_statistics(self, out_path, results_path): if not os.path.exists(out_path): report = StatisticsReport(self._corpus, self._tokenizer, results_path) report.generate_statistics() with open(out_path, mode='w', encoding='utf-8', newline='') as fh: report.csv(fh)
Writes a statistics report for the results at `results_path` to `out_path`. Reuses an existing statistics report if one exists at `out_path`. :param out_path: path to output statistics report to :type out_path: `str` :param results_path: path of results to generate statistics for :type results_path: `str`
def step1c(self): if self.ends(['y']) and self.vowel_in_stem(): self.b[self.k] = 'i'
turn terminal y into i if there's a vowel in stem
def replaceelement(oldelem, newelem): parent = oldelem.getparent() if parent is not None: size = len(parent.getchildren()) for x in range(0, size): if parent.getchildren()[x] == oldelem: parent.remove(oldelem) parent.insert(x, newelem)
Given a parent element, replace oldelem with newelem.
def initial_dist_from_config(cp, variable_params): r if len(cp.get_subsections("initial")): logging.info("Using a different distribution for the starting points " "than the prior.") initial_dists = distributions.read_distributions_from_config( cp, section="initial") constraints = distributions.read_constraints_from_config( cp, constraint_section="initial_constraint") init_dist = distributions.JointDistribution( variable_params, *initial_dists, **{"constraints": constraints}) else: init_dist = None return init_dist
r"""Loads a distribution for the sampler start from the given config file. A distribution will only be loaded if the config file has a [initial-\*] section(s). Parameters ---------- cp : Config parser The config parser to try to load from. variable_params : list of str The variable parameters for the distribution. Returns ------- JointDistribution or None : The initial distribution. If no [initial-\*] section found in the config file, will just return None.
async def start(self): if self._server_task is not None: self._logger.debug("AsyncValidatingWSServer.start() called twice, ignoring") return started_signal = self._loop.create_future() self._server_task = self._loop.add_task(self._run_server_task(started_signal)) await started_signal if self.port is None: self.port = started_signal.result()
Start the websocket server. When this method returns, the websocket server will be running and the port property of this class will have its assigned port number. This method should be called only once in the lifetime of the server and must be paired with a call to stop() to cleanly release the server's resources.
def del_ns(self, namespace): namespace = str(namespace) attr_name = None if hasattr(self, namespace): delattr(self, namespace)
will remove a namespace ref from the manager. either Arg is optional. args: namespace: prefix, string or Namespace() to remove
def _export_work_errors(self, work, output_file): errors = set() for v in itervalues(work.work): if v['is_completed'] and v['error'] is not None: errors.add(v['error']) with open(output_file, 'w') as f: for e in sorted(errors): f.write(e) f.write('\n')
Saves errors for given work pieces into file. Args: work: instance of either AttackWorkPieces or DefenseWorkPieces output_file: name of the output file
def clear_profiling_cookies(request, response): if 'profile_page' in request.COOKIES: path = request.path response.set_cookie('profile_page', max_age=0, path=path)
Expire any cookie that initiated profiling request.
def pattern_error(self, original, loc, value_var, check_var): base_line = clean(self.reformat(getline(loc, original))) line_wrap = self.wrap_str_of(base_line) repr_wrap = self.wrap_str_of(ascii(base_line)) return ( "if not " + check_var + ":\n" + openindent + match_err_var + ' = _coconut_MatchError("pattern-matching failed for " ' + repr_wrap + ' " in " + _coconut.repr(_coconut.repr(' + value_var + ")))\n" + match_err_var + ".pattern = " + line_wrap + "\n" + match_err_var + ".value = " + value_var + "\nraise " + match_err_var + "\n" + closeindent )
Construct a pattern-matching error message.
def shutdown_instances(self): self.min_size = 0 self.max_size = 0 self.desired_capacity = 0 self.update()
Convenience method which shuts down all instances associated with this group.
def get_attribute(self, reference): prefix, _, name = reference.rpartition('.') match = None for attribute in self._contents: if name == attribute.name and \ (not prefix or prefix == attribute.prefix): if match: raise AttributeReferenceError( 'Ambiguous attribute reference: {}.'.format( attribute.name)) else: match = attribute if match: return match raise AttributeReferenceError( 'Attribute does not exist: {}'.format(reference))
Return the attribute that matches the reference. Raise an error if the attribute cannot be found, or if there is more then one match.
def _bare_name_matches(self, nodes): count = 0 r = {} done = False max = len(nodes) while not done and count < max: done = True for leaf in self.content: if leaf[0].match(nodes[count], r): count += 1 done = False break r[self.name] = nodes[:count] return count, r
Special optimized matcher for bare_name.
def createDashboardOverlay(self, pchOverlayKey, pchOverlayFriendlyName): fn = self.function_table.createDashboardOverlay pMainHandle = VROverlayHandle_t() pThumbnailHandle = VROverlayHandle_t() result = fn(pchOverlayKey, pchOverlayFriendlyName, byref(pMainHandle), byref(pThumbnailHandle)) return result, pMainHandle, pThumbnailHandle
Creates a dashboard overlay and returns its handle
def warsaw_to_warsawmass(C, parameters=None, sectors=None): p = default_parameters.copy() if parameters is not None: p.update(parameters) C_out = C.copy() C_rotate_u = ['uphi', 'uG', 'uW', 'uB'] for name in C_rotate_u: _array = smeft_toarray(name, C) V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"]) UuL = V.conj().T _array = UuL.conj().T @ _array _dict = smeft_fromarray(name, _array) C_out.update(_dict) _array = smeft_toarray('llphiphi', C) _array = np.diag(ckmutil.diag.msvd(_array)[1]) _dict = smeft_fromarray('llphiphi', _array) C_out.update(_dict) return C_out
Translate from the Warsaw basis to the 'Warsaw mass' basis. Parameters used: - `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined as the mismatch between left-handed quark mass matrix diagonalization matrices).
def _CheckCollation(cursor): cur_collation_connection = _ReadVariable("collation_connection", cursor) if cur_collation_connection != COLLATION: logging.warning("Require MySQL collation_connection of %s, got %s.", COLLATION, cur_collation_connection) cur_collation_database = _ReadVariable("collation_database", cursor) if cur_collation_database != COLLATION: logging.warning( "Require MySQL collation_database of %s, got %s." " To create your database, use: %s", COLLATION, cur_collation_database, CREATE_DATABASE_QUERY)
Checks MySQL collation and warns if misconfigured.
def list(self): params = {'user': self.user_id} response = self.session.get(self.url, params=params) blocks = response.data['blocks'] return [Block(self, **block) for block in blocks]
List the users you have blocked. :return: a list of :class:`~groupy.api.blocks.Block`'s :rtype: :class:`list`
def ordered_list(text_array): text_list = [] for number, item in enumerate(text_array): text_list.append( (esc_format(number + 1) + ".").ljust(3) + " " + esc_format(item) ) return "\n".join(text_list)
Return an ordered list from an array. >>> ordered_list(["first", "second", "third", "fourth"]) '1. first\\n2. second\\n3. third\\n4. fourth'
def words(ctx, input, output): log.info('chemdataextractor.read.elements') log.info('Reading %s' % input.name) doc = Document.from_file(input) for element in doc.elements: if isinstance(element, Text): for sentence in element.sentences: output.write(u' '.join(sentence.raw_tokens)) output.write(u'\n')
Read input document, and output words.
def from_(cls, gsim): ltbranch = N('logicTreeBranch', {'branchID': 'b1'}, nodes=[N('uncertaintyModel', text=str(gsim)), N('uncertaintyWeight', text='1.0')]) lt = N('logicTree', {'logicTreeID': 'lt1'}, nodes=[N('logicTreeBranchingLevel', {'branchingLevelID': 'bl1'}, nodes=[N('logicTreeBranchSet', {'applyToTectonicRegionType': '*', 'branchSetID': 'bs1', 'uncertaintyType': 'gmpeModel'}, nodes=[ltbranch])])]) return cls(repr(gsim), ['*'], ltnode=lt)
Generate a trivial GsimLogicTree from a single GSIM instance.
def node_mkdir(self, path=''): 'Does not raise any errors if dir already exists.' return self(path, data=dict(kind='directory'), encode='json', method='put')
Does not raise any errors if dir already exists.
def _write(self, session, openFile, replaceParamFile): if self.raster is not None: converter = RasterConverter(session) grassAsciiGrid = converter.getAsGrassAsciiRaster(rasterFieldName='raster', tableName=self.__tablename__, rasterIdFieldName='id', rasterId=self.id) openFile.write(grassAsciiGrid) elif self.rasterText is not None: openFile.write(self.rasterText)
Raster Map File Write to File Method
def is_installed(self, pkgname): return any(d for d in self.get_distributions() if d.project_name == pkgname)
Given a package name, returns whether it is installed in the environment :param str pkgname: The name of a package :return: Whether the supplied package is installed in the environment :rtype: bool
def extract_user_id(self, request): payload = self.extract_payload(request) user_id_attribute = self.config.user_id() return payload.get(user_id_attribute, None)
Extract a user id from a request object.
def _get_type_name(type_): name = repr(type_) if name.startswith("<"): name = getattr(type_, "__qualname__", getattr(type_, "__name__", "")) return name.rsplit(".", 1)[-1] or repr(type_)
Return a displayable name for the type. Args: type_: A class object. Returns: A string value describing the class name that can be used in a natural language sentence.
def sign_message(privkey_path, message, passphrase=None): key = get_rsa_key(privkey_path, passphrase) log.debug('salt.crypt.sign_message: Signing message.') if HAS_M2: md = EVP.MessageDigest('sha1') md.update(salt.utils.stringutils.to_bytes(message)) digest = md.final() return key.sign(digest) else: signer = PKCS1_v1_5.new(key) return signer.sign(SHA.new(salt.utils.stringutils.to_bytes(message)))
Use Crypto.Signature.PKCS1_v1_5 to sign a message. Returns the signature.
def _find_spelling_errors_in_chunks(chunks, contents, valid_words_dictionary=None, technical_words_dictionary=None, user_dictionary_words=None): for chunk in chunks: for error in spellcheck_region(chunk.data, valid_words_dictionary, technical_words_dictionary, user_dictionary_words): col_offset = _determine_character_offset(error.line_offset, error.column_offset, chunk.column) msg = _SPELLCHECK_MESSAGES[error.error_type].format(error.word) yield _populate_spelling_error(error.word, error.suggestions, contents, error.line_offset + chunk.line, col_offset, msg)
For each chunk and a set of valid and technical words, find errors.
def metrics(self): from vel.metrics.loss_metric import Loss from vel.metrics.accuracy import Accuracy return [Loss(), Accuracy()]
Set of metrics for this model