code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def parseEvent(self, result, i): fmt = '%Y-%m-%dT%H:%M:%SZ' due = 0 delay = 0 real_time = 'n' number = result['stopEvents'][i]['transportation']['number'] planned = datetime.strptime(result['stopEvents'][i] ['departureTimePlanned'], fmt) destination = result['stopEvents'][i]['transportation']['destination']['name'] mode = self.get_mode(result['stopEvents'][i]['transportation']['product']['class']) estimated = planned if 'isRealtimeControlled' in result['stopEvents'][i]: real_time = 'y' estimated = datetime.strptime(result['stopEvents'][i] ['departureTimeEstimated'], fmt) if estimated > datetime.utcnow(): due = self.get_due(estimated) delay = self.get_delay(planned, estimated) return[ number, due, delay, planned, estimated, real_time, destination, mode ] else: return None
Parse the current event and extract data.
def ensure_connectable(self, nailgun): attempt_count = 1 while 1: try: with closing(nailgun.try_connect()) as sock: logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername())) return except nailgun.NailgunConnectionError: if attempt_count >= self._connect_attempts: logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts)) raise attempt_count += 1 time.sleep(self.WAIT_INTERVAL_SEC)
Ensures that a nailgun client is connectable or raises NailgunError.
def _get_userprofile_from_registry(user, sid): profile_dir = __utils__['reg.read_value']( 'HKEY_LOCAL_MACHINE', 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\{0}'.format(sid), 'ProfileImagePath' )['vdata'] log.debug( 'user %s with sid=%s profile is located at "%s"', user, sid, profile_dir ) return profile_dir
In case net user doesn't return the userprofile we can get it from the registry Args: user (str): The user name, used in debug message sid (str): The sid to lookup in the registry Returns: str: Profile directory
def number(self, p_todo): if config().identifiers() == "text": return self.uid(p_todo) else: return self.linenumber(p_todo)
Returns the line number or text ID of a todo (depends on the configuration.
def chunkreverse(integers, dtype='L'): if dtype in ('B', 8): return map(RBYTES.__getitem__, integers) fmt = '{0:0%db}' % NBITS[dtype] return (int(fmt.format(chunk)[::-1], 2) for chunk in integers)
Yield integers of dtype bit-length reverting their bit-order. >>> list(chunkreverse([0b10000000, 0b11000000, 0b00000001], 'B')) [1, 3, 128] >>> list(chunkreverse([0x8000, 0xC000, 0x0001], 'H')) [1, 3, 32768]
def keygrip_curve25519(vk): return _compute_keygrip([ ['p', util.num2bytes(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED, size=32)], ['a', b'\x01\xDB\x41'], ['b', b'\x01'], ['g', util.num2bytes(0x04000000000000000000000000000000000000000000000000000000000000000920ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9, size=65)], ['n', util.num2bytes(0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED, size=32)], ['q', vk.to_bytes()], ])
Compute keygrip for Curve25519 public keys.
def search_uris( self, uri, threat_types, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "search_uris" not in self._inner_api_calls: self._inner_api_calls[ "search_uris" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.search_uris, default_retry=self._method_configs["SearchUris"].retry, default_timeout=self._method_configs["SearchUris"].timeout, client_info=self._client_info, ) request = webrisk_pb2.SearchUrisRequest(uri=uri, threat_types=threat_types) return self._inner_api_calls["search_uris"]( request, retry=retry, timeout=timeout, metadata=metadata )
This method is used to check whether a URI is on a given threatList. Example: >>> from google.cloud import webrisk_v1beta1 >>> from google.cloud.webrisk_v1beta1 import enums >>> >>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client() >>> >>> # TODO: Initialize `uri`: >>> uri = '' >>> >>> # TODO: Initialize `threat_types`: >>> threat_types = [] >>> >>> response = client.search_uris(uri, threat_types) Args: uri (str): The URI to be checked for matches. threat_types (list[~google.cloud.webrisk_v1beta1.types.ThreatType]): Required. The ThreatLists to search in. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.webrisk_v1beta1.types.SearchUrisResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def _get_triplet_value(self, graph, identity, rdf_type): value = graph.value(subject=identity, predicate=rdf_type) return value.toPython() if value is not None else value
Get a value from an RDF triple
def block_pop_back(self, timeout=10): value = yield self.backend_structure().block_pop_back(timeout) if value is not None: yield self.value_pickler.loads(value)
Remove the last element from of the list. If no elements are available, blocks for at least ``timeout`` seconds.
def clear_queues(self, manager): for queue in (self.to_q, self.from_q): if queue is None: continue if not manager: try: queue.close() queue.join_thread() except AttributeError: pass self.to_q = self.from_q = None
Release the resources associated to the queues of this instance :param manager: Manager() object :type manager: None | object :return: None
def functions_factory(cls, coef, domain, kind, **kwargs): basis_polynomial = cls._basis_polynomial_factory(kind) return basis_polynomial(coef, domain)
Given some coefficients, return a certain kind of orthogonal polynomial defined over a specific domain.
def should_series_dispatch(left, right, op): if left._is_mixed_type or right._is_mixed_type: return True if not len(left.columns) or not len(right.columns): return False ldtype = left.dtypes.iloc[0] rdtype = right.dtypes.iloc[0] if ((is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype))): return True if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype): return True return False
Identify cases where a DataFrame operation should dispatch to its Series counterpart. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- override : bool
def get_subtree(self, tree, xpath_str): return tree.xpath(xpath_str, namespaces=self.namespaces)
Return a subtree given an lxml XPath.
def fileupdate(self, data): self.name = data["name"] add = self.__additional add["filetype"] = "other" for filetype in ("book", "image", "video", "audio", "archive"): if filetype in data: add["filetype"] = filetype break if add["filetype"] in ("image", "video", "audio"): add["thumb"] = data.get("thumb", dict()) add["checksum"] = data["checksum"] add["expire_time"] = data["expires"] / 1000 add["size"] = data["size"] add["info"] = data.get(add["filetype"], dict()) add["uploader"] = data["user"] if self.room.admin: add["info"].update({"room": data.get("room")}) add["info"].update({"uploader_ip": data.get("uploader_ip")}) self.updated = True
Method to update extra metadata fields with dict obtained through `fileinfo`
def reshape_by_blocks(x, x_shape, memory_block_size): x = tf.reshape(x, [ x_shape[0], x_shape[1], x_shape[2] // memory_block_size, memory_block_size, x_shape[3] ]) return x
Reshapes input by splitting its length over blocks of memory_block_size. Args: x: a Tensor with shape [batch, heads, length, depth] x_shape: tf.TensorShape of x. memory_block_size: Integer which divides length. Returns: Tensor with shape [batch, heads, length // memory_block_size, memory_block_size, depth].
def set_cpu_property(self, property_p, value): if not isinstance(property_p, CPUPropertyType): raise TypeError("property_p can only be an instance of type CPUPropertyType") if not isinstance(value, bool): raise TypeError("value can only be an instance of type bool") self._call("setCPUProperty", in_p=[property_p, value])
Sets the virtual CPU boolean value of the specified property. in property_p of type :class:`CPUPropertyType` Property type to query. in value of type bool Property value. raises :class:`OleErrorInvalidarg` Invalid property.
def analysis(self, analysis_id: int) -> models.Analysis: return self.Analysis.query.get(analysis_id)
Get a single analysis.
def copy_with_new_atts(self, **attributes): return FmtStr(*[Chunk(bfs.s, bfs.atts.extend(attributes)) for bfs in self.chunks])
Returns a new FmtStr with the same content but new formatting
def _get_hit_nearest_ref_start(self, hits): nearest_to_start = hits[0] for hit in hits[1:]: if hit.ref_coords().start < nearest_to_start.ref_coords().start: nearest_to_start = hit return nearest_to_start
Returns the hit nearest to the start of the ref sequence from the input list of hits
def load(self, model): self.perceptron.weights, self.tagdict, self.classes, self.clusters = load_model(model) self.perceptron.classes = self.classes
Load pickled model.
def preview(self): msg = self.ui.trackview.model().verify() if msg: answer = QtGui.QMessageBox.warning(self, "Bummer", 'Problem: {}.'.format(msg)) return stim_signal, atten, ovld = self.ui.trackview.model().signal() fig = SpecWidget() fig.setWindowModality(2) fig.updateData(stim_signal, self.ui.trackview.model().samplerate()) fig.setTitle('Stimulus Preview') fig.show() self.previewFig = fig
Assemble the current components in the QStimulusModel and generate a spectrogram plot in a separate window
def validate_request(): flask_request = request request_data = flask_request.get_data() if not request_data: request_data = b'{}' request_data = request_data.decode('utf-8') try: json.loads(request_data) except ValueError as json_error: LOG.debug("Request body was not json. Exception: %s", str(json_error)) return LambdaErrorResponses.invalid_request_content( "Could not parse request body into json: No JSON object could be decoded") if flask_request.args: LOG.debug("Query parameters are in the request but not supported") return LambdaErrorResponses.invalid_request_content("Query Parameters are not supported") request_headers = CaseInsensitiveDict(flask_request.headers) log_type = request_headers.get('X-Amz-Log-Type', 'None') if log_type != 'None': LOG.debug("log-type: %s is not supported. None is only supported.", log_type) return LambdaErrorResponses.not_implemented_locally( "log-type: {} is not supported. None is only supported.".format(log_type)) invocation_type = request_headers.get('X-Amz-Invocation-Type', 'RequestResponse') if invocation_type != 'RequestResponse': LOG.warning("invocation-type: %s is not supported. RequestResponse is only supported.", invocation_type) return LambdaErrorResponses.not_implemented_locally( "invocation-type: {} is not supported. RequestResponse is only supported.".format(invocation_type))
Validates the incoming request The following are invalid 1. The Request data is not json serializable 2. Query Parameters are sent to the endpoint 3. The Request Content-Type is not application/json 4. 'X-Amz-Log-Type' header is not 'None' 5. 'X-Amz-Invocation-Type' header is not 'RequestResponse' Returns ------- flask.Response If the request is not valid a flask Response is returned None: If the request passes all validation
def levels(self): ret = [[] for i in range(self.height)] for node in self.subtree: ret[node.level - self.level].append(node) return ret
Return a list of lists of nodes. The outer list is indexed by the level. Each inner list contains the nodes at that level, in DFS order. :rtype: list of lists of :class:`~aeneas.tree.Tree`
def writeBib(self, fname = None, maxStringLength = 1000, wosMode = False, reducedOutput = False, niceIDs = True): if fname: f = open(fname, mode = 'w', encoding = 'utf-8') else: f = open(self.name[:200] + '.bib', mode = 'w', encoding = 'utf-8') f.write("%This file was generated by the metaknowledge Python package.\n%The contents have been automatically generated and are likely to not work with\n%LaTeX without some human intervention. This file is meant for other automatic\n%systems and not to be used directly for making citations\n") for R in self: try: f.write('\n\n') f.write(R.bibString(maxLength = maxStringLength, WOSMode = wosMode, restrictedOutput = reducedOutput, niceID = niceIDs)) except BadWOSRecord: pass except AttributeError: raise RecordsNotCompatible("The Record '{}', with ID '{}' does not support writing to bibtext files.".format(R, R.id)) f.close()
Writes a bibTex entry to _fname_ for each `Record` in the collection. If the Record is of a journal article (PT J) the bibtext type is set to `'article'`, otherwise it is set to `'misc'`. The ID of the entry is the WOS number and all the Record's fields are given as entries with their long names. **Note** This is not meant to be used directly with LaTeX none of the special characters have been escaped and there are a large number of unnecessary fields provided. _niceID_ and _maxLength_ have been provided to make conversions easier only. **Note** Record entries that are lists have their values separated with the string `' and '`, as this is the way bibTex understands # Parameters _fname_ : `optional [str]` > Default `None`, The name of the file to be written. If not given one will be derived from the collection and the file will be written to . _maxStringLength_ : `optional [int]` > Default 1000, The max length for a continuous string. Most bibTex implementation only allow string to be up to 1000 characters ([source](https://www.cs.arizona.edu/~collberg/Teaching/07.231/BibTeX/bibtex.html)), this splits them up into substrings then uses the native string concatenation (the `'#'` character) to allow for longer strings _WOSMode_ : `optional [bool]` > Default `False`, if `True` the data produced will be unprocessed and use double curly braces. This is the style WOS produces bib files in and mostly macthes that. _restrictedOutput_ : `optional [bool]` > Default `False`, if `True` the tags output will be limited to: `'AF'`, `'BF'`, `'ED'`, `'TI'`, `'SO'`, `'LA'`, `'NR'`, `'TC'`, `'Z9'`, `'PU'`, `'J9'`, `'PY'`, `'PD'`, `'VL'`, `'IS'`, `'SU'`, `'PG'`, `'DI'`, `'D2'`, and `'UT'` _niceID_ : `optional [bool]` > Default `True`, if `True` the IDs used will be derived from the authors, publishing date and title, if `False` it will be the UT tag
def set_row_min_height(self, y: int, min_height: int): if y < 0: raise IndexError('y < 0') self._min_heights[y] = min_height
Sets a minimum height for blocks in the row with coordinate y.
def _get_context(center_idx, sentence_boundaries, window_size, random_window_size, seed): random.seed(seed + center_idx) sentence_index = np.searchsorted(sentence_boundaries, center_idx) sentence_start, sentence_end = _get_sentence_start_end( sentence_boundaries, sentence_index) if random_window_size: window_size = random.randint(1, window_size) start_idx = max(sentence_start, center_idx - window_size) end_idx = min(sentence_end, center_idx + window_size + 1) if start_idx != center_idx and center_idx + 1 != end_idx: context = np.concatenate((np.arange(start_idx, center_idx), np.arange(center_idx + 1, end_idx))) elif start_idx != center_idx: context = np.arange(start_idx, center_idx) elif center_idx + 1 != end_idx: context = np.arange(center_idx + 1, end_idx) else: context = None return context
Compute the context with respect to a center word in a sentence. Takes an numpy array of sentences boundaries.
def _init_update_po_files(self, domains): for language in settings.TRANSLATIONS: for domain, options in domains.items(): if language == options['default']: continue if os.path.isfile(_po_path(language, domain)): self._update_po_file(language, domain, options['pot']) else: self._init_po_file(language, domain, options['pot'])
Update or initialize the `.po` translation files
def get_data(self, size=None, addr=None): if addr is None: addr = self._mem_bytes if size and self._mem_bytes < size: raise ValueError('Size is too big') if size is None: return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, self._mem_bytes) else: return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, size)
Gets data for incoming stream
def reload(self): self.read_notmuch_config(self._notmuchconfig.filename) self.read_config(self._config.filename)
Reload notmuch and alot config files
def inve(env, command, *args, **kwargs): with temp_environ(): os.environ['VIRTUAL_ENV'] = str(workon_home / env) os.environ['PATH'] = compute_path(env) unsetenv('PYTHONHOME') unsetenv('__PYVENV_LAUNCHER__') try: return check_call([command] + list(args), shell=windows, **kwargs) except OSError as e: if e.errno == 2: err('Unable to find', command) return 2 else: raise
Run a command in the given virtual environment. Pass additional keyword arguments to ``subprocess.check_call()``.
def exclude_items(items, any_all=any, ignore_case=False, normalize_values=False, **kwargs): if kwargs: match = functools.partial( _match_item, any_all=any_all, ignore_case=ignore_case, normalize_values=normalize_values, **kwargs ) return filterfalse(match, items) else: return iter(items)
Exclude items by matching metadata. Note: Metadata values are lowercased when ``normalized_values`` is ``True``, so ``ignore_case`` is automatically set to ``True``. Parameters: items (list): A list of item dicts or filepaths. any_all (callable): A callable to determine if any or all filters must match to exclude items. Expected values :obj:`any` (default) or :obj:`all`. ignore_case (bool): Perform case-insensitive matching. Default: ``False`` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` kwargs (list): Lists of values to match the given metadata field. Yields: dict: The next item to be included. Example: >>> from google_music_utils import exclude_items >>> list(exclude_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Golden Feelings']))
def empirical_rate(data, sigma=3.0): from scipy.ndimage.filters import gaussian_filter1d return 0.001 + gaussian_filter1d(data.astype(np.float), sigma, axis=0)
Smooth count data to get an empirical rate
def try_url(url_name): from warnings import warn warn("try_url is deprecated, use the url tag with the 'as' arg instead.") try: url = reverse(url_name) except NoReverseMatch: return "" return url
Mimics Django's ``url`` template tag but fails silently. Used for url names in admin templates as these won't resolve when admin tests are running.
def _depth_image_callback(self, image_msg): encoding = image_msg.encoding try: depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding) import pdb; pdb.set_trace() except CvBridgeError as e: rospy.logerr(e) depth = np.array(depth_arr*MM_TO_METERS, np.float32) self._cur_depth_im = DepthImage(depth, self._frame)
subscribe to depth image topic and keep it up to date
def get_annotations(cls, __fn): if hasattr(__fn, '__func__'): __fn = __fn.__func__ if hasattr(__fn, '__notes__'): return __fn.__notes__ raise AttributeError('{!r} does not have annotations'.format(__fn))
Get the annotations of a given callable.
def finish(self, status): retox_log.info("Completing %s with status %s" % (self.name, status)) result = Screen.COLOUR_GREEN if not status else Screen.COLOUR_RED self.palette['title'] = (Screen.COLOUR_WHITE, Screen.A_BOLD, result) for item in list(self._task_view.options): self._task_view.options.remove(item) self._completed_view.options.append(item) self.refresh()
Move laggard tasks over :param activity: The virtualenv status :type activity: ``str``
def view_plugins(category=None): if category is not None: if category == 'parsers': return { name: {"descript": klass.plugin_descript, "regex": klass.file_regex} for name, klass in _all_plugins[category].items() } return { name: klass.plugin_descript for name, klass in _all_plugins[category].items() } else: return {cat: {name: klass.plugin_descript for name, klass in plugins.items()} for cat, plugins in _all_plugins.items()}
return a view of the loaded plugin names and descriptions Parameters ---------- category : None or str if str, apply for single plugin category Examples -------- >>> from pprint import pprint >>> pprint(view_plugins()) {'decoders': {}, 'encoders': {}, 'parsers': {}} >>> class DecoderPlugin(object): ... plugin_name = 'example' ... plugin_descript = 'a decoder for dicts containing _example_ key' ... dict_signature = ('_example_',) ... >>> errors = load_plugin_classes([DecoderPlugin]) >>> pprint(view_plugins()) {'decoders': {'example': 'a decoder for dicts containing _example_ key'}, 'encoders': {}, 'parsers': {}} >>> view_plugins('decoders') {'example': 'a decoder for dicts containing _example_ key'} >>> unload_all_plugins()
async def pop_log(self): self._check_receive_loop() res = self.log_queue.get() self._check_error(res) return res
Get one log from the log queue.
def calc_drm(skydir, ltc, event_class, event_types, egy_bins, cth_bins, nbin=64): npts = int(np.ceil(128. / bins_per_dec(egy_bins))) egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts)) etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1) egy = 10**utils.edge_to_center(np.log10(egy_bins)) egy_width = utils.edge_to_width(egy_bins) etrue = 10**utils.edge_to_center(np.log10(etrue_bins)) edisp = create_avg_edisp(skydir, ltc, event_class, event_types, egy, etrue, cth_bins) edisp = edisp * egy_width[:, None, None] edisp = sum_bins(edisp, 0, npts) return edisp
Calculate the detector response matrix.
async def get_config(self): config_facade = client.ModelConfigFacade.from_connection( self.connection() ) result = await config_facade.ModelGet() config = result.config for key, value in config.items(): config[key] = ConfigValue.from_json(value) return config
Return the configuration settings for this model. :returns: A ``dict`` mapping keys to `ConfigValue` instances, which have `source` and `value` attributes.
def create_snapshot(self, volume_id_or_uri, snapshot, timeout=-1): uri = self.__build_volume_snapshot_uri(volume_id_or_uri) return self._client.create(snapshot, uri=uri, timeout=timeout, default_values=self.DEFAULT_VALUES_SNAPSHOT)
Creates a snapshot for the specified volume. Args: volume_id_or_uri: Can be either the volume ID or the volume URI. snapshot (dict): Object to create. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Storage volume.
def undo(scm, verbose, fake, hard): scm.fake = fake scm.verbose = fake or verbose scm.repo_check() status_log(scm.undo, 'Last commit removed from history.', hard)
Removes the last commit from history.
def current(self): if not has_request_context(): return self.no_req_ctx_user_stack.top user_stack = getattr(_request_ctx_stack.top, 'user_stack', None) if user_stack and user_stack.top: return user_stack.top return _get_user()
Returns the current user
def verify_pubkey_sig(self, message, sig): if self.opts['master_sign_key_name']: path = os.path.join(self.opts['pki_dir'], self.opts['master_sign_key_name'] + '.pub') if os.path.isfile(path): res = verify_signature(path, message, binascii.a2b_base64(sig)) else: log.error( 'Verification public key %s does not exist. You need to ' 'copy it from the master to the minions pki directory', os.path.basename(path) ) return False if res: log.debug( 'Successfully verified signature of master public key ' 'with verification public key %s', self.opts['master_sign_key_name'] + '.pub' ) return True else: log.debug('Failed to verify signature of public key') return False else: log.error( 'Failed to verify the signature of the message because the ' 'verification key-pairs name is not defined. Please make ' 'sure that master_sign_key_name is defined.' ) return False
Wraps the verify_signature method so we have additional checks. :rtype: bool :return: Success or failure of public key verification
def get(self, *args, **kwargs): self.set_status(self._status_response_code()) self.write(self._status_response())
Tornado RequestHandler GET request endpoint for reporting status :param list args: positional args :param dict kwargs: keyword args
def destinations(stop): from pyruter.api import Departures async def get_destinations(): async with aiohttp.ClientSession() as session: data = Departures(LOOP, stop, session=session) result = await data.get_final_destination() print(json.dumps(result, indent=4, sort_keys=True, ensure_ascii=False)) LOOP.run_until_complete(get_destinations())
Get destination information.
def _parse_alt_url(html_chunk): url_list = html_chunk.find("a", fn=has_param("href")) url_list = map(lambda x: x.params["href"], url_list) url_list = filter(lambda x: not x.startswith("autori/"), url_list) if not url_list: return None return normalize_url(BASE_URL, url_list[0])
Parse URL from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's URL.
def wo_resp(self, resp): if self._data is not None: resp['res'] = self.to_str(self._data) return self.wo_json(resp)
can override for other style
def on_open(self): filename, filter = QtWidgets.QFileDialog.getOpenFileName( self, _('Open')) if filename: self.open_file(filename)
Shows an open file dialog and open the file if the dialog was accepted.
def is_vector(inp): inp = np.asarray(inp) nr_dim = np.ndim(inp) if nr_dim == 1: return True elif (nr_dim == 2) and (1 in inp.shape): return True else: return False
Returns true if the input can be interpreted as a 'true' vector Note ---- Does only check dimensions, not if type is numeric Parameters ---------- inp : numpy.ndarray or something that can be converted into ndarray Returns ------- Boolean True for vectors: ndim = 1 or ndim = 2 and shape of one axis = 1 False for all other arrays
def local(): logger.info("Loading requirements from local file.") with open(REQUIREMENTS_FILE, 'r') as f: requirements = parse(f) for r in requirements: logger.debug("Creating new package: %r", r) create_package_version(r)
Load local requirements file.
def from_df(cls, df): t = cls() labels = df.columns for label in df.columns: t.append_column(label, df[label]) return t
Convert a Pandas DataFrame into a Table.
def default(self, line): if any((line.startswith(x) for x in self.argparse_names())): try: args = self.argparser.parse_args(shlex.split(line)) except Exception: pass else: args.func(args) else: cmd.Cmd.default(self, line)
Overriding default to get access to any argparse commands we have specified.
def read_function(data, window, ij, g_args): output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max() return output
Takes an array, and sets any value above the mean to the max, the rest to 0
async def generate_license(self, title, avatar, badges=None, widgets=None): if not isinstance(title, str): raise TypeError("type of 'title' must be str.") if not isinstance(avatar, str): raise TypeError("type of 'avatar' must be str.") if badges and not isinstance(badges, list): raise TypeError("type of 'badges' must be list.") if widgets and not isinstance(widgets, list): raise TypeError("type of 'widgets' must be list.") data = {"title": title, "avatar": avatar} if badges and len(badges) <= 3: data['badges'] = badges if widgets and len(widgets) <= 3: data['widgets'] = widgets async with aiohttp.ClientSession() as session: async with session.post("https://api.weeb.sh/auto-image/license", headers=self.__headers, data=data) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
Generate a license. This function is a coroutine. Parameters: title: str - title of the license avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image badges: list - list of 1-3 direct image urls. Same requirements as avatar (optional) widgets: list - list of 1-3 strings to fill the three boxes with (optional) Return Type: image data
def write_calculations_to_csv(funcs, states, columns, path, headers, out_name, metaids=[], extension=".xls"): if not isinstance(funcs, list): funcs = [funcs] * len(headers) if not isinstance(states, list): states = [states] * len(headers) if not isinstance(columns, list): columns = [columns] * len(headers) data_agg = [] for i in range(len(headers)): ids, data = read_state_with_metafile(funcs[i], states[i], columns[i], path, metaids, extension) data_agg = np.append(data_agg, [data]) output = pd.DataFrame(data=np.vstack((ids, data_agg)).T, columns=["ID"]+headers) output.to_csv(out_name, sep='\t') return output
Writes each output of the given functions on the given states and data columns to a new column in the specified output file. Note: Column 0 is time. The first data column is column 1. :param funcs: A function or list of functions which will be applied in order to the data. If only one function is given it is applied to all the states/columns :type funcs: function or function list :param states: The state ID numbers for which data should be extracted. List should be in order of calculation or if only one state is given then it will be used for all the calculations :type states: string or string list :param columns: The index of a column, the header of a column, a list of indexes, OR a list of headers of the column(s) that you want to apply calculations to :type columns: int, string, int list, or string list :param path: Path to your ProCoDA metafile (must be tab-delimited) :type path: string :param headers: List of the desired header for each calculation, in order :type headers: string list :param out_name: Desired name for the output file. Can include a relative path :type out_name: string :param metaids: A list of the experiment IDs you'd like to analyze from the metafile :type metaids: string list, optional :param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in :type extension: string, optional :requires: funcs, states, columns, and headers are all of the same length if they are lists. Some being lists and some single values are okay. :return: out_name.csv (CVS file) - A CSV file with the each column being a new calcuation and each row being a new experiment on which the calcuations were performed :return: output (Pandas.DataFrame)- Pandas DataFrame holding the same data that was written to the output file
def process_request(self, request): if not self.is_resource_protected(request): return if self.deny_access_condition(request): return self.deny_access(request)
The actual middleware method, called on all incoming requests. This default implementation will ignore the middleware (return None) if the conditions specified in is_resource_protected aren't met. If they are, it then tests to see if the user should be denied access via the denied_access_condition method, and calls deny_access (which implements failure behaviour) if so.
def user_pk_to_url_str(user): User = get_user_model() if issubclass(type(User._meta.pk), models.UUIDField): if isinstance(user.pk, six.string_types): return user.pk return user.pk.hex ret = user.pk if isinstance(ret, six.integer_types): ret = int_to_base36(user.pk) return str(ret)
This should return a string.
def compare_directory(self, directory): return not self.folder_exclude_check.match(directory + self.sep if self.dir_pathname else directory)
Compare folder.
def place_notes(self, notes, duration): if hasattr(notes, 'notes'): pass elif hasattr(notes, 'name'): notes = NoteContainer(notes) elif type(notes) == str: notes = NoteContainer(notes) elif type(notes) == list: notes = NoteContainer(notes) if self.current_beat + 1.0 / duration <= self.length or self.length\ == 0.0: self.bar.append([self.current_beat, duration, notes]) self.current_beat += 1.0 / duration return True else: return False
Place the notes on the current_beat. Notes can be strings, Notes, list of strings, list of Notes or a NoteContainer. Raise a MeterFormatError if the duration is not valid. Return True if succesful, False otherwise (ie. the Bar hasn't got enough room for a note of that duration).
def update_status(self): try: self.update_connection_status() self.max_stream_rate.set(self.get_stream_rate_str()) self.ip.set(self.status.external_ip) self.uptime.set(self.status.str_uptime) upstream, downstream = self.status.transmission_rate except IOError: pass else: if self.max_downstream > 0: self.in_meter.set_fraction( 1.0 * downstream / self.max_downstream) if self.max_upstream > 0: self.out_meter.set_fraction(1.0 * upstream / self.max_upstream) self.update_traffic_info() self.after(1000, self.update_status)
Update status informations in tkinter window.
def local_2d_halo_exchange(k, v, num_h_blocks, h_dim, num_w_blocks, w_dim, mask_right): for blocks_dim, block_size_dim, halo_size in [ (num_h_blocks, h_dim, h_dim.size), (num_w_blocks, w_dim, w_dim.size)]: if halo_size > 0: if blocks_dim is not None: if mask_right: k = mtf.left_halo_exchange(k, blocks_dim, block_size_dim, halo_size) v = mtf.left_halo_exchange(v, blocks_dim, block_size_dim, halo_size) else: k = mtf.halo_exchange(k, blocks_dim, block_size_dim, halo_size) v = mtf.halo_exchange(v, blocks_dim, block_size_dim, halo_size) else: if mask_right: k = mtf.pad(k, [halo_size, None], block_size_dim.name) v = mtf.pad(v, [halo_size, None], block_size_dim.name) else: k = mtf.pad(k, [halo_size, halo_size], block_size_dim.name) v = mtf.pad(v, [halo_size, halo_size], block_size_dim.name) return k, v
Halo exchange for keys and values for Local 2D attention.
def geometry(obj): gf = vtk.vtkGeometryFilter() gf.SetInputData(obj) gf.Update() return gf.GetOutput()
Apply ``vtkGeometryFilter``.
def get_value(self, model, default=None): if default is not None: default = self._converter(default) value = getattr(model, self.storage_name) return value if value is not None else default
Return field's value. :param DomainModel model: :param object default: :rtype object:
def _runhook(ui, repo, hooktype, filename, kwargs): hname = hooktype + ".autohooks." + os.path.basename(filename) if filename.lower().endswith(".py"): try: mod = mercurial.extensions.loadpath(filename, "hghook.%s" % hname) except Exception: ui.write(_("loading %s hook failed:\n") % hname) raise return mercurial.hook._pythonhook(ui, repo, hooktype, hname, getattr(mod, hooktype.replace("-", "_")), kwargs, True) elif filename.lower().endswith(".sh"): return mercurial.hook._exthook(ui, repo, hname, filename, kwargs, True) return False
Run the hook in `filename` and return its result.
def mixed_use_of_local_and_run(self): cxn = Connection("localhost") result = cxn.local("echo foo", hide=True) assert result.stdout == "foo\n" assert not cxn.is_connected result = cxn.run("echo foo", hide=True) assert cxn.is_connected assert result.stdout == "foo\n"
Run command truly locally, and over SSH via localhost
def PythonTypeFromMetricValueType(value_type): if value_type == rdf_stats.MetricMetadata.ValueType.INT: return int elif value_type == rdf_stats.MetricMetadata.ValueType.FLOAT: return float else: raise ValueError("Unknown value type: %s" % value_type)
Converts MetricMetadata.ValueType enums to corresponding Python types.
def _file_prompt_quiet(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): if not self.prompt_quiet_configured: if self.auto_file_prompt: self.device.send_config_set(["file prompt quiet"]) self.prompt_quiet_changed = True self.prompt_quiet_configured = True else: cmd = "file prompt quiet" show_cmd = "show running-config | inc {}".format(cmd) output = self.device.send_command_expect(show_cmd) if cmd in output: self.prompt_quiet_configured = True else: msg = ( "on-device file operations require prompts to be disabled. " "Configure 'file prompt quiet' or set 'auto_file_prompt=True'" ) raise CommandErrorException(msg) return f(self, *args, **kwargs) return wrapper
Decorator to toggle 'file prompt quiet' for methods that perform file operations.
def _request(self, url, request_type='GET', **params): try: _LOGGER.debug('Request %s %s', url, params) response = self.request( request_type, url, timeout=TIMEOUT.seconds, **params) response.raise_for_status() _LOGGER.debug('Response %s %s %.200s', response.status_code, response.headers['content-type'], response.json()) response = response.json() if 'error' in response: raise OSError(response['error']) return response except OSError as error: _LOGGER.warning('Failed request: %s', error)
Send a request to the Minut Point API.
def _filter_matrix_rows(cls, matrix): indexes_to_keep = [] for i in range(len(matrix)): keep_row = False for element in matrix[i]: if element not in {'NA', 'no'}: keep_row = True break if keep_row: indexes_to_keep.append(i) return [matrix[i] for i in indexes_to_keep]
matrix = output from _to_matrix
def create(cls, counter_user_alias, share_detail, status, monetary_account_id=None, draft_share_invite_bank_id=None, share_type=None, start_date=None, end_date=None, custom_headers=None): if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_COUNTER_USER_ALIAS: counter_user_alias, cls.FIELD_DRAFT_SHARE_INVITE_BANK_ID: draft_share_invite_bank_id, cls.FIELD_SHARE_DETAIL: share_detail, cls.FIELD_STATUS: status, cls.FIELD_SHARE_TYPE: share_type, cls.FIELD_START_DATE: start_date, cls.FIELD_END_DATE: end_date } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
Create a new share inquiry for a monetary account, specifying the permission the other bunq user will have on it. :type user_id: int :type monetary_account_id: int :param counter_user_alias: The pointer of the user to share with. :type counter_user_alias: object_.Pointer :param share_detail: The share details. Only one of these objects may be passed. :type share_detail: object_.ShareDetail :param status: The status of the share. Can be PENDING, REVOKED (the user deletes the share inquiry before it's accepted), ACCEPTED, CANCELLED (the user deletes an active share) or CANCELLATION_PENDING, CANCELLATION_ACCEPTED, CANCELLATION_REJECTED (for canceling mutual connects). :type status: str :param draft_share_invite_bank_id: The id of the draft share invite bank. :type draft_share_invite_bank_id: int :param share_type: The share type, either STANDARD or MUTUAL. :type share_type: str :param start_date: The start date of this share. :type start_date: str :param end_date: The expiration date of this share. :type end_date: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
def OpenSourcePath(self, source_path): source_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_OS, location=source_path) self.AddScanNode(source_path_spec, None)
Opens the source path. Args: source_path (str): source path.
def _validate_conn(self, conn): super(HTTPSConnectionPool, self)._validate_conn(conn) if not getattr(conn, 'sock', None): conn.connect() if not conn.is_verified: warnings.warn(( 'Unverified HTTPS request is being made. ' 'Adding certificate verification is strongly advised. See: ' 'https://urllib3.readthedocs.org/en/latest/security.html'), InsecureRequestWarning)
Called right before a request is made, after the socket is created.
def recordHostname(self, basedir): "Record my hostname in twistd.hostname, for user convenience" log.msg("recording hostname in twistd.hostname") filename = os.path.join(basedir, "twistd.hostname") try: hostname = os.uname()[1] except AttributeError: hostname = socket.getfqdn() try: with open(filename, "w") as f: f.write("{0}\n".format(hostname)) except Exception: log.msg("failed - ignoring")
Record my hostname in twistd.hostname, for user convenience
def get(self, request, **kwargs): customer, _created = Customer.get_or_create( subscriber=subscriber_request_callback(self.request) ) serializer = SubscriptionSerializer(customer.subscription) return Response(serializer.data)
Return the customer's valid subscriptions. Returns with status code 200.
def flatten(self, shallow=None): return self._wrap(self._flatten(self.obj, shallow))
Return a completely flattened version of an array.
def toList(self): slist = angle.toList(self.value) slist[1] = slist[1] % 24 return slist
Returns time as signed list.
def is_bool_dtype(arr_or_dtype): if arr_or_dtype is None: return False try: dtype = _get_dtype(arr_or_dtype) except TypeError: return False if isinstance(arr_or_dtype, CategoricalDtype): arr_or_dtype = arr_or_dtype.categories if isinstance(arr_or_dtype, ABCIndexClass): return (arr_or_dtype.is_object and arr_or_dtype.inferred_type == 'boolean') elif is_extension_array_dtype(arr_or_dtype): dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) return dtype._is_boolean return issubclass(dtype.type, np.bool_)
Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.SparseArray([True, False])) True
def wrap_uda( hdfs_file, inputs, output, update_fn, init_fn=None, merge_fn=None, finalize_fn=None, serialize_fn=None, close_fn=None, name=None, ): func = ImpalaUDA( inputs, output, update_fn, init_fn, merge_fn, finalize_fn, serialize_fn=serialize_fn, name=name, lib_path=hdfs_file, ) return func
Creates a callable aggregation function object. Must be created in Impala to be used Parameters ---------- hdfs_file: .so file that contains relevant UDA inputs: list of strings denoting ibis datatypes output: string denoting ibis datatype update_fn: string Library symbol name for update function init_fn: string, optional Library symbol name for initialization function merge_fn: string, optional Library symbol name for merge function finalize_fn: string, optional Library symbol name for finalize function serialize_fn : string, optional Library symbol name for serialize UDA API function. Not required for all UDAs; see documentation for more. close_fn : string, optional name: string, optional Used internally to track function Returns ------- container : UDA object
def determine(chord, shorthand=False, no_inversions=False, no_polychords=False): if chord == []: return [] elif len(chord) == 1: return chord elif len(chord) == 2: return [intervals.determine(chord[0], chord[1])] elif len(chord) == 3: return determine_triad(chord, shorthand, no_inversions, no_polychords) elif len(chord) == 4: return determine_seventh(chord, shorthand, no_inversions, no_polychords) elif len(chord) == 5: return determine_extended_chord5(chord, shorthand, no_inversions, no_polychords) elif len(chord) == 6: return determine_extended_chord6(chord, shorthand, no_inversions, no_polychords) elif len(chord) == 7: return determine_extended_chord7(chord, shorthand, no_inversions, no_polychords) else: return determine_polychords(chord, shorthand)
Name a chord. This function can determine almost every chord, from a simple triad to a fourteen note polychord.
def notify(self, n=1): if not is_locked(self._lock): raise RuntimeError('lock is not locked') notified = [0] def walker(switcher, predicate): if not switcher.active: return False if predicate and not predicate(): return True if n >= 0 and notified[0] >= n: return True switcher.switch() notified[0] += 1 return False walk_callbacks(self, walker)
Raise the condition and wake up fibers waiting on it. The optional *n* parameter specifies how many fibers will be notified. By default, one fiber is notified.
def is_period_current(self): return self.current_period_end > timezone.now() or ( self.trial_end and self.trial_end > timezone.now() )
Returns True if this subscription's period is current, false otherwise.
def ref(self): sds_ref = _C.SDidtoref(self._id) _checkErr('idtoref', sds_ref, 'illegal SDS identifier') return sds_ref
Get the reference number of the dataset. Args:: no argument Returns:: dataset reference number C library equivalent : SDidtoref
def get_subtask_fields(config_class): from lsst.pex.config import ConfigurableField, RegistryField def is_subtask_field(obj): return isinstance(obj, (ConfigurableField, RegistryField)) return _get_alphabetical_members(config_class, is_subtask_field)
Get all configurable subtask fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- subtask_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.ConfigurableField`` or ``RegistryField``). The mapping is alphabetically ordered by attribute name.
def _on_ready_read(self): while self.bytesAvailable(): if not self._header_complete: self._read_header() else: self._read_payload()
Read bytes when ready read
def match(sel, obj, arr=None, bailout_fn=None): if arr: sel = interpolate(sel, arr) sel = parse(sel)[1] return _forEach(sel, obj, bailout_fn=bailout_fn)
Match a selector to an object, yielding the matched values. Args: sel: The JSONSelect selector to apply (a string) obj: The object against which to apply the selector arr: If sel contains ? characters, then the values in this array will be safely interpolated into the selector. bailout_fn: A callback which takes two parameters, |obj| and |matches|. This will be called on every node in obj. If it returns True, the search for matches will be aborted below that node. The |matches| parameter indicates whether the node matched the selector. This is intended to be used as a performance optimization.
def update(self, forecasts, observations): if len(observations.shape) == 1: obs_cdfs = np.zeros((observations.size, self.thresholds.size)) for o, observation in enumerate(observations): obs_cdfs[o, self.thresholds >= observation] = 1 else: obs_cdfs = observations self.errors["F_2"] += np.sum(forecasts ** 2, axis=0) self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0) self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0) self.errors["O"] += np.sum(obs_cdfs, axis=0) self.num_forecasts += forecasts.shape[0]
Update the statistics with forecasts and observations. Args: forecasts: The discrete Cumulative Distribution Functions of observations:
def typed_assign_stmt_handle(self, tokens): if len(tokens) == 2: if self.target_info >= (3, 6): return tokens[0] + ": " + self.wrap_typedef(tokens[1]) else: return tokens[0] + " = None" + self.wrap_comment(" type: " + tokens[1]) elif len(tokens) == 3: if self.target_info >= (3, 6): return tokens[0] + ": " + self.wrap_typedef(tokens[1]) + " = " + tokens[2] else: return tokens[0] + " = " + tokens[2] + self.wrap_comment(" type: " + tokens[1]) else: raise CoconutInternalException("invalid variable type annotation tokens", tokens)
Process Python 3.6 variable type annotations.
def get_total_irradiance(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, albedo=.25, surface_type=None, model='isotropic', model_perez='allsitescomposite1990', **kwargs): r poa_sky_diffuse = get_sky_diffuse( surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model, model_perez=model_perez) poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo, surface_type) aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse) return irrads
r""" Determine total in-plane irradiance and its beam, sky diffuse and ground reflected components, using the specified sky diffuse irradiance model. .. math:: I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground} Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal. surface_azimuth : numeric Panel azimuth from north. solar_zenith : numeric Solar zenith angle. solar_azimuth : numeric Solar azimuth angle. dni : numeric Direct Normal Irradiance ghi : numeric Global horizontal irradiance dhi : numeric Diffuse horizontal irradiance dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance airmass : None or numeric, default None Airmass albedo : numeric, default 0.25 Surface albedo surface_type : None or String, default None Surface type. See grounddiffuse. model : String, default 'isotropic' Irradiance model. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`perez`. Returns ------- total_irrad : OrderedDict or DataFrame Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse', 'poa_ground_diffuse'``.
def alpha(requestContext, seriesList, alpha): for series in seriesList: series.options['alpha'] = alpha return seriesList
Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1.
async def set_topic(self, topic): self.topic = topic try: if self.topicchannel: await client.edit_channel(self.topicchannel, topic=topic) except Exception as e: logger.exception(e)
Sets the topic for the topic channel
def to_doc(self, xmllist, decorates): result = [] for xitem in xmllist: if xitem.tag != "group": if "name" in list(xitem.keys()): names = re.split("[\s,]+", xitem.get("name")) for name in names: docel = DocElement(xitem, self, decorates) docel.attributes["name"] = name result.append(docel) else: result.append(DocElement(xitem, self, decorates)) else: docel = DocGroup(xitem, decorates) result.append(docel) return result
Converts the specified xml list to a list of docstring elements.
def delete(self, url, callback, json=None): return self.adapter.delete(url, callback, json=json)
Delete a URL. Args: url(string): URL for the request callback(func): The response callback function Keyword Args: json(dict): JSON body for the request Returns: The result of the callback handling the resopnse from the executed request
def disconnect(self): if self.proto: log.debug('%r Disconnecting from %r', self, self.proto.transport.getPeer()) self.proto.transport.loseConnection()
Disconnect from the Kafka broker. This is used to implement disconnection on timeout as a workaround for Kafka connections occasionally getting stuck on the server side under load. Requests are not cancelled, so they will be retried.
def load_all_from_directory(self, directory_path): datas = [] for root, folders, files in os.walk(directory_path): for f in files: datas.append(self.load_from_file(os.path.join(root, f))) return datas
Return a list of dict from a directory containing files
def fileToMD5(filename, block_size=256*128, binary=False): md5 = hashlib.md5() with open(filename,'rb') as f: for chunk in iter(lambda: f.read(block_size), b''): md5.update(chunk) if not binary: return md5.hexdigest() return md5.digest()
A function that calculates the MD5 hash of a file. Args: ----- filename: Path to the file. block_size: Chunks of suitable size. Block size directly depends on the block size of your filesystem to avoid performances issues. Blocks of 4096 octets (Default NTFS). binary: A boolean representing whether the returned info is in binary format or not. Returns: -------- string: The MD5 hash of the file.
def decodedFileID(self, cachedFilePath): fileDir, fileName = os.path.split(cachedFilePath) assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names' return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8')
Decode a cached fileName back to a job store file ID. :param str cachedFilePath: Path to the cached file :return: The jobstore file ID associated with the file :rtype: str
def get_screen_size_range(self): return GetScreenSizeRange( display=self.display, opcode=self.display.get_extension_major(extname), window=self, )
Retrieve the range of possible screen sizes. The screen may be set to any size within this range.
def fixed_legend_position(self, fixed_legend_position): allowed_values = ["RIGHT", "TOP", "LEFT", "BOTTOM"] if fixed_legend_position not in allowed_values: raise ValueError( "Invalid value for `fixed_legend_position` ({0}), must be one of {1}" .format(fixed_legend_position, allowed_values) ) self._fixed_legend_position = fixed_legend_position
Sets the fixed_legend_position of this ChartSettings. Where the fixed legend should be displayed with respect to the chart # noqa: E501 :param fixed_legend_position: The fixed_legend_position of this ChartSettings. # noqa: E501 :type: str
def get_final_window_monitor(cls, settings, window): screen = window.get_screen() use_mouse = settings.general.get_boolean('mouse-display') dest_screen = settings.general.get_int('display-n') if use_mouse: win, x, y, _ = screen.get_root_window().get_pointer() dest_screen = screen.get_monitor_at_point(x, y) n_screens = screen.get_n_monitors() if dest_screen > n_screens - 1: settings.general.set_boolean('mouse-display', False) settings.general.set_int('display-n', dest_screen) dest_screen = screen.get_primary_monitor() if dest_screen == ALWAYS_ON_PRIMARY: dest_screen = screen.get_primary_monitor() return dest_screen
Gets the final screen number for the main window of guake.