code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _get_more(self): if not self.alive: raise pymongo.errors.InvalidOperation( "Can't call get_more() on a MotorCursor that has been" " exhausted or killed.") self.started = True return self._refresh()
Initial query or getMore. Returns a Future.
def _parse_ntthal(ntthal_output): parsed_vals = re.search(_ntthal_re, ntthal_output) return THERMORESULT( True, float(parsed_vals.group(1)), float(parsed_vals.group(2)), float(parsed_vals.group(3)), float(parsed_vals.group(4)) ) if parsed_vals else NULLTHERMORESULT
Helper method that uses regex to parse ntthal output.
def index(self, value, start=None, stop=None): return self.__alias__.index(value, start, stop)
Return first index of value.
def _process_maybe_work(self, yes_work, maybe_work, work_dir, yn_results_path, stats): if maybe_work == yes_work: return stats self._logger.info( 'Processing "maybe" work {} against "yes" work {}.'.format( maybe_work, yes_work)) for siglum in self._corpus.get_sigla(maybe_work): witness = (maybe_work, siglum) stats[COMMON][witness] = 0 stats[SHARED][witness] = 0 stats[UNIQUE][witness] = 100 works = [yes_work, maybe_work] works.sort() ym_results_path = os.path.join( self._ym_intersects_dir, '{}_intersect_{}.csv'.format(*works)) stats = self._process_intersection(yes_work, maybe_work, work_dir, ym_results_path, stats) stats = self._process_diff(yes_work, maybe_work, work_dir, ym_results_path, yn_results_path, stats) return stats
Returns statistics of how `yes_work` compares with `maybe_work`. :param yes_work: name of work for which stats are collected :type yes_work: `str` :param maybe_work: name of work being compared with `yes_work` :type maybe_work: `str` :param work_dir: directory where generated files are saved :type work_dir: `str` :param yn_results_path: path to results intersecting `yes_work` with "no" works :type yn_results_path: `str` :param stats: data structure to hold statistical data of the comparison :type stats: `dict` :rtype: `dict`
def iterfields(klass): is_field = lambda x: isinstance(x, TypedField) for name, field in inspect.getmembers(klass, predicate=is_field): yield name, field
Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples.
async def status(cls): rqst = Request(cls.session, 'GET', '/manager/status') rqst.set_json({ 'status': 'running', }) async with rqst.fetch() as resp: return await resp.json()
Returns the current status of the configured API server.
def insertAdjacentHTML(self, position: str, html: str) -> None: df = self._parse_html(html) pos = position.lower() if pos == 'beforebegin': self.before(df) elif pos == 'afterbegin': self.prepend(df) elif pos == 'beforeend': self.append(df) elif pos == 'afterend': self.after(df) else: raise ValueError( 'The value provided ({}) is not one of "beforeBegin", ' '"afterBegin", "beforeEnd", or "afterEnd".'.format(position) )
Parse ``html`` to DOM and insert to ``position``. ``position`` is a case-insensive string, and must be one of "beforeBegin", "afterBegin", "beforeEnd", or "afterEnd".
def powerset(iterable, *, reverse=False): lst = list(iterable) if reverse: rng = range(len(lst), -1, -1) else: rng = range(len(lst) + 1) return chain.from_iterable(combinations(lst, r) for r in rng)
Return the powerset. Arguments --------- iterable : iterable reverse : boolean Indicates whether the powerset should be returned descending by size Returns ------- A generator producing each element of the powerset.
def post(self, text=None, attachments=None, source_guid=None): return self.messages.create(text=text, attachments=attachments, source_guid=source_guid)
Post a direct message to the user. :param str text: the message content :param attachments: message attachments :param str source_guid: a client-side unique ID for the message :return: the message sent :rtype: :class:`~groupy.api.messages.DirectMessage`
def filename_for(self, subpath): try: filename = self.readme_for(subpath) return os.path.relpath(filename, self.root_directory) except ReadmeNotFoundError: return None
Returns the relative filename for the specified subpath, or the root filename if subpath is None. Raises werkzeug.exceptions.NotFound if the resulting path would fall out of the root directory.
def photos(self, query, page=1, per_page=10): url = "/search/photos" data = self._search(url, query, page=page, per_page=per_page) data["results"] = PhotoModel.parse_list(data.get("results")) return data
Get a single page of photo results for a query. :param query [string]: Search terms. :param page [integer]: Page number to retrieve. (Optional; default: 1) :param per_page [integer]: Number of items per page. (Optional; default: 10) :return: [dict]: {u'total': 0, u'total_pages': 0, u'results': [Photo]}
def needs_gcloud(self): gcloud_default_path = ['google-cloud-sdk', 'bin'] if platform.system() != "Windows": gcloud_default_path = os.path.join(os.path.expanduser('~'), *gcloud_default_path) else: gcloud_default_path = os.path.join(os.environ['LOCALAPPDATA'], 'Google', 'Cloud SDK', *gcloud_default_path) return not os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/') \ and gcloud_default_path not in os.environ["PATH"].split(os.pathsep) \ and which('gcloud') is None
Returns true if gcloud is unavailable and needed for authentication.
def _one_iteration(self, F, Ybus, V, Vm, Va, pv, pq, pvpq): J = self._build_jacobian(Ybus, V, pv, pq, pvpq) dx = -1 * spsolve(J, F) npv = len(pv) npq = len(pq) if npv > 0: Va[pv] = Va[pv] + dx[range(npv)] if npq > 0: Va[pq] = Va[pq] + dx[range(npv, npv + npq)] Vm[pq] = Vm[pq] + dx[range(npv + npq, npv + npq + npq)] V = Vm * exp(1j * Va) Vm = abs(V) Va = angle(V) return V, Vm, Va
Performs one Newton iteration.
def load_rc(self, path=None, system=False): if os.path.isfile(self.user_rc_path) and not system: path = self.user_rc_path elif os.path.isfile(self.sys_rc_path): path = self.sys_rc_path if not path or not os.path.isfile(path): return {} with open(path) as f: return yaml.load(f) or {}
Load the conda configuration file. If both user and system configuration exists, user will be used.
def get_current_value(self, use_cached=False): current_value = self._get_stream_metadata(use_cached).get("currentValue") if current_value: return DataPoint.from_json(self, current_value) else: return None
Return the most recent DataPoint value written to a stream The current value is the last recorded data point for this stream. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The most recent value written to this stream (or None if nothing has been written) :rtype: :class:`~DataPoint` or None
def eigenvalues_samples(self): r res = np.empty((self.nsamples, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :] = self._sampled_hmms[i].eigenvalues return res
r""" Samples of the eigenvalues
def handle(self, dict): ex_res = self.extract(dict['url']) key = "{sid}:{dom}.{suf}:queue".format( sid=dict['spiderid'], dom=ex_res.domain, suf=ex_res.suffix) val = ujson.dumps(dict) self.redis_conn.zadd(key, val, -dict['priority']) if 'expires' in dict and dict['expires'] != 0: key = "timeout:{sid}:{appid}:{crawlid}".format( sid=dict['spiderid'], appid=dict['appid'], crawlid=dict['crawlid']) self.redis_conn.set(key, dict['expires']) dict['parsed'] = True dict['valid'] = True self.logger.info('Added crawl to Redis', extra=dict)
Processes a vaild crawl request @param dict: a valid dictionary object
def parse(self, body): self._parse_top_level(body) self._parse_resource(body['data']) resource = body['data'] if 'attributes' in resource: self._parse_attributes(resource['attributes']) if 'relationships' in resource: self._parse_relationships(resource['relationships'])
Invoke the JSON API spec compliant parser Order is important. Start from the request body root key & work your way down so exception handling is easier to follow. :return: the parsed & vetted request body
def remove_all_labels(stdout=None): if not stdout: stdout = sys.stdout stdout.write("Droping constraints...\n") drop_constraints(quiet=False, stdout=stdout) stdout.write('Droping indexes...\n') drop_indexes(quiet=False, stdout=stdout)
Calls functions for dropping constraints and indexes. :param stdout: output stream :return: None
def _tab(content): response = _data_frame(content).to_csv(index=False,sep='\t') return response
Helper funcation that converts text-based get response to tab separated values for additional manipulation.
def detect_format(filename): filename = Path(filename) if filename.suffix == '.csv': recformat = 'csv' elif filename.suffix == '.sfp': recformat = 'sfp' else: recformat = 'unknown' return recformat
Detect file format of the channels based on extension. Parameters ---------- filename : Path name of the filename Returns ------- str file format
def predict(self, y_prob): y_pred = np.floor(y_prob[:, 1] + (1 - self.threshold_)) return y_pred
Calculate the prediction using the ThresholdingOptimization. Parameters ---------- y_prob : array-like of shape = [n_samples, 2] Predicted probabilities. Returns ------- y_pred : array-like of shape = [n_samples] Predicted class
def _getitem_with_mask(self, key, fill_value=dtypes.NA): if fill_value is dtypes.NA: fill_value = dtypes.get_fill_value(self.dtype) dims, indexer, new_order = self._broadcast_indexes(key) if self.size: if isinstance(self._data, dask_array_type): actual_indexer = indexing.posify_mask_indexer(indexer) else: actual_indexer = indexer data = as_indexable(self._data)[actual_indexer] chunks_hint = getattr(data, 'chunks', None) mask = indexing.create_mask(indexer, self.shape, chunks_hint) data = duck_array_ops.where(mask, fill_value, data) else: mask = indexing.create_mask(indexer, self.shape) data = np.broadcast_to(fill_value, getattr(mask, 'shape', ())) if new_order: data = np.moveaxis(data, range(len(new_order)), new_order) return self._finalize_indexing_result(dims, data)
Index this Variable with -1 remapped to fill_value.
def exposed_method(name=None, private=False, is_coroutine=True, requires_handler_reference=False): def wrapper(func): if name: method_name = name else: method_name = func.__name__ if not METHOD_NAME_REGEX.match(method_name): raise ValueError("Invalid method name: '{}'".format(method_name)) @functools.wraps(func) def real_wrapper(*args, **kwargs): return func(*args, **kwargs) if private: setattr(real_wrapper, "_exposed_private", True) else: setattr(real_wrapper, "_exposed_public", True) if is_coroutine: real_wrapper.__gemstone_is_coroutine = True real_wrapper = tornado.gen.coroutine(real_wrapper) setattr(real_wrapper, "_is_coroutine", True) if requires_handler_reference: setattr(real_wrapper, "_req_h_ref", True) setattr(real_wrapper, "_exposed_name", method_name) return real_wrapper return wrapper
Marks a method as exposed via JSON RPC. :param name: the name of the exposed method. Must contains only letters, digits, dots and underscores. If not present or is set explicitly to ``None``, this parameter will default to the name of the exposed method. If two methods with the same name are exposed, a ``ValueError`` is raised. :type name: str :param private: Flag that specifies if the exposed method is private. :type private: bool :param is_coroutine: Flag that specifies if the method is a Tornado coroutine. If True, it will be wrapped with the :py:func:`tornado.gen.coroutine` decorator. :type is_coroutine: bool :param requires_handler_reference: If ``True``, the handler method will receive as the first parameter a ``handler`` argument with the Tornado request handler for the current request. This request handler can be further used to extract various information from the request, such as headers, cookies, etc. :type requires_handler_reference: bool .. versionadded:: 0.9.0
def _run_cmplx(fn, image): original_format = image.format if image.format != 'complex' and image.format != 'dpcomplex': if image.bands % 2 != 0: raise Error('not an even number of bands') if image.format != 'float' and image.format != 'double': image = image.cast('float') if image.format == 'double': new_format = 'dpcomplex' else: new_format = 'complex' image = image.copy(format=new_format, bands=image.bands / 2) image = fn(image) if original_format != 'complex' and original_format != 'dpcomplex': if image.format == 'dpcomplex': new_format = 'double' else: new_format = 'float' image = image.copy(format=new_format, bands=image.bands * 2) return image
Run a complex function on a non-complex image. The image needs to be complex, or have an even number of bands. The input can be int, the output is always float or double.
def find_by_id(self, organization_export, params={}, **options): path = "/organization_exports/%s" % (organization_export) return self.client.get(path, params, **options)
Returns details of a previously-requested Organization export. Parameters ---------- organization_export : {Id} Globally unique identifier for the Organization export. [params] : {Object} Parameters for the request
def read(cls, source, format=None, coalesce=False, **kwargs): def combiner(listofseglists): out = cls(seg for seglist in listofseglists for seg in seglist) if coalesce: return out.coalesce() return out return io_read_multi(combiner, cls, source, format=format, **kwargs)
Read segments from file into a `SegmentList` Parameters ---------- filename : `str` path of file to read format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. coalesce : `bool`, optional if `True` coalesce the segment list before returning, otherwise return exactly as contained in file(s). **kwargs other keyword arguments depend on the format, see the online documentation for details (:ref:`gwpy-segments-io`) Returns ------- segmentlist : `SegmentList` `SegmentList` active and known segments read from file. Notes -----
def complete(self, stream): assert not self.is_complete() self._marker.addInputPort(outputPort=stream.oport) self.stream.oport.schema = stream.oport.schema self._pending_schema._set(self.stream.oport.schema) stream.oport.operator._start_op = True
Complete the pending stream. Any connections made to :py:attr:`stream` are connected to `stream` once this method returns. Args: stream(Stream): Stream that completes the connection.
def describe_volumes(self, *volume_ids): volumeset = {} for pos, volume_id in enumerate(volume_ids): volumeset["VolumeId.%d" % (pos + 1)] = volume_id query = self.query_factory( action="DescribeVolumes", creds=self.creds, endpoint=self.endpoint, other_params=volumeset) d = query.submit() return d.addCallback(self.parser.describe_volumes)
Describe available volumes.
def _ip_string_from_prefix(self, prefixlen=None): if not prefixlen: prefixlen = self._prefixlen return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
Turn a prefix length into a dotted decimal string. Args: prefixlen: An integer, the netmask prefix length. Returns: A string, the dotted decimal netmask string.
def del_object_from_parent(self): if self.parent: self.parent.objects.pop(self.ref)
Delete object from parent object.
def clean_fail(func): def func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except botocore.exceptions.ClientError as e: print(str(e), file=sys.stderr) sys.exit(1) return func_wrapper
A decorator to cleanly exit on a failed call to AWS. catch a `botocore.exceptions.ClientError` raised from an action. This sort of error is raised if you are targeting a region that isn't set up (see, `credstash setup`.
def _format_summary_node(self, task_class): modulename = task_class.__module__ classname = task_class.__name__ nodes = [] nodes.append( self._format_class_nodes(task_class)) nodes.append( self._format_config_nodes(modulename, classname) ) methods = ('run', 'runDataRef') for method in methods: if hasattr(task_class, method): method_obj = getattr(task_class, method) nodes.append( self._format_method_nodes(method_obj, modulename, classname)) return nodes
Format a section node containg a summary of a Task class's key APIs.
def data_structure_builder(func): @wraps(func) def ds_builder_wrapper(function, *args, **kwargs): try: function = DSBuilder(function) except NoBuilder: pass return func(function, *args, **kwargs) return ds_builder_wrapper
Decorator to handle automatic data structure creation for pipe-utils.
def _create_sot_file(self): try: self._delete_file(filename="sot_file") except Exception: pass commands = [ "terminal dont-ask", "checkpoint file sot_file", "no terminal dont-ask", ] self._send_command_list(commands)
Create Source of Truth file to compare.
def get_doctype(self, index, name): if index not in self.indices: self.get_all_indices() return self.indices.get(index, {}).get(name, None)
Returns a doctype given an index and a name
def WriteUInt160(self, value): if type(value) is UInt160: value.Serialize(self) else: raise Exception("value must be UInt160 instance ")
Write a UInt160 type to the stream. Args: value (UInt160): Raises: Exception: when `value` is not of neocore.UInt160 type.
def _get_model_metadata(model_class, metadata, version=None): from turicreate import __version__ info = { 'turicreate_version': __version__, 'type': model_class, } if version is not None: info['version'] = str(version) info.update(metadata) return info
Returns user-defined metadata, making sure information all models should have is also available, as a dictionary
def get_bbox_list(self, crs=None, buffer=None, reduce_bbox_sizes=None): bbox_list = self.bbox_list if buffer: bbox_list = [bbox.buffer(buffer) for bbox in bbox_list] if reduce_bbox_sizes is None: reduce_bbox_sizes = self.reduce_bbox_sizes if reduce_bbox_sizes: bbox_list = self._reduce_sizes(bbox_list) if crs: return [bbox.transform(crs) for bbox in bbox_list] return bbox_list
Returns a list of bounding boxes that are the result of the split :param crs: Coordinate reference system in which the bounding boxes should be returned. If None the CRS will be the default CRS of the splitter. :type crs: CRS or None :param buffer: A percentage of each BBox size increase. This will cause neighbouring bounding boxes to overlap. :type buffer: float or None :param reduce_bbox_sizes: If `True` it will reduce the sizes of bounding boxes so that they will tightly fit the given geometry in `shape_list`. This overrides the same parameter from constructor :type reduce_bbox_sizes: bool :return: List of bounding boxes :rtype: list(BBox)
async def popHiveKey(self, path): perm = ('hive:pop',) + path self.user.allowed(perm) return await self.cell.hive.pop(path)
Remove and return the value of a key in the cell default hive
async def start_server_in_loop(runner, hostname, port, agent): await runner.setup() agent.web.server = aioweb.TCPSite(runner, hostname, port) await agent.web.server.start() logger.info(f"Serving on http://{hostname}:{port}/")
Listens to http requests and sends them to the webapp. Args: runner: AppRunner to process the http requests hostname: host name to listen from. port: port to listen from. agent: agent that owns the web app.
def scatter(self, x, y, xerr=[], yerr=[], mark='o', markstyle=None): self.plot(x, y, xerr=xerr, yerr=yerr, mark=mark, linestyle=None, markstyle=markstyle)
Plot a series of points. Plot a series of points (marks) that are not connected by a line. Shortcut for plot with linestyle=None. :param x: array containing x-values. :param y: array containing y-values. :param xerr: array containing errors on the x-values. :param yerr: array containing errors on the y-values. :param mark: the symbol used to mark the data points. May be any plot mark accepted by TikZ (e.g. ``*, x, +, o, square, triangle``). :param markstyle: the style of the plot marks (e.g. 'mark size=.75pt') Example:: >>> plot = artist.Plot() >>> x = np.random.normal(size=20) >>> y = np.random.normal(size=20) >>> plot.scatter(x, y, mark='*')
def get_language(self): if 'lang' in self.request.GET: lang = self.request.GET['lang'].lower() if lang in settings.LANGUAGE_URL_MAP: return settings.LANGUAGE_URL_MAP[lang] if self.request.META.get('HTTP_ACCEPT_LANGUAGE'): best = self.get_best_language( self.request.META['HTTP_ACCEPT_LANGUAGE']) if best: return best return settings.LANGUAGE_CODE
Return a locale code we support on the site using the user's Accept-Language header to determine which is best. This mostly follows the RFCs but read bug 439568 for details.
def by_zipcode(self, zipcode, zipcode_type=None, zero_padding=True): if zero_padding: zipcode = str(zipcode).zfill(5) else: zipcode = str(zipcode) res = self.query( zipcode=zipcode, sort_by=None, returns=1, zipcode_type=zipcode_type, ) if len(res): return res[0] else: return self.zip_klass()
Search zipcode by exact 5 digits zipcode. No zero padding is needed. :param zipcode: int or str, the zipcode will be automatically zero padding to 5 digits. :param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute. by default, it returns any zipcode type. :param zero_padding: bool, toggle on and off automatic zero padding.
def keys_values(self): keys_values = [] for value in self.keys.values(): if isinstance(value, list): keys_values += value elif isinstance(value, basestring) and not value.startswith('-'): keys_values.append(value) elif isinstance(value, int) and value >= 0: keys_values.append(str(value)) return keys_values
Key values might be a list or not, always return a list.
def update_pricing(kwargs=None, call=None): url = 'https://cloudpricingcalculator.appspot.com/static/data/pricelist.json' price_json = salt.utils.http.query(url, decode=True, decode_type='json') outfile = os.path.join( __opts__['cachedir'], 'gce-pricing.p' ) with salt.utils.files.fopen(outfile, 'w') as fho: salt.utils.msgpack.dump(price_json['dict'], fho) return True
Download most recent pricing information from GCE and save locally CLI Examples: .. code-block:: bash salt-cloud -f update_pricing my-gce-config .. versionadded:: 2015.8.0
def _check_structure(self): unused_variables = set() unused_operators = set() for variable in self.unordered_variable_iterator(): unused_variables.add(variable.full_name) for operator in self.unordered_operator_iterator(): unused_operators.add(operator.full_name) for operator in self.unordered_operator_iterator(): for variable in operator.inputs: unused_variables.discard(variable.full_name) unused_operators.discard(operator.full_name) for variable in operator.outputs: unused_variables.discard(variable.full_name) unused_operators.discard(operator.full_name) if len(unused_variables) > 0: raise RuntimeError('Isolated variables exist: %s' % unused_variables) if len(unused_operators) > 0: raise RuntimeError('Isolated operators exist: %s' % unused_operators)
This function applies some rules to check if the parsed model is proper. Currently, it only checks if isolated variable and isolated operator exists.
def form_invalid(self, form): messages.error(self.request, form.errors[NON_FIELD_ERRORS]) return redirect( reverse( 'forum_conversation:topic', kwargs={ 'forum_slug': self.object.topic.forum.slug, 'forum_pk': self.object.topic.forum.pk, 'slug': self.object.topic.slug, 'pk': self.object.topic.pk }, ), )
Handles an invalid form.
def process_md5(md5_output, pattern=r"=\s+(\S+)"): match = re.search(pattern, md5_output) if match: return match.group(1) else: raise ValueError("Invalid output from MD5 command: {}".format(md5_output))
Process the string to retrieve the MD5 hash Output from Cisco IOS (ASA is similar) .MD5 of flash:file_name Done! verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2
def filter_pyfqn(cls, value, relative_to=0): def collect_packages(element, packages): parent = element.eContainer() if parent: collect_packages(parent, packages) packages.append(element.name) packages = [] collect_packages(value, packages) if relative_to < 0 or relative_to > len(packages): raise ValueError('relative_to not in range of number of packages') fqn = '.'.join(packages[relative_to:]) if relative_to: fqn = '.' + fqn return cls.module_path_map.get(fqn, fqn)
Returns Python form of fully qualified name. Args: relative_to: If greater 0, the returned path is relative to the first n directories.
def get_value(self, spec, row): column = spec.get('column') default = spec.get('default') if column is None: if default is not None: return self.convert_type(default, spec) return value = row.get(column) if is_empty(value): if default is not None: return self.convert_type(default, spec) return None return self.convert_type(value, spec)
Returns the value or a dict with a 'value' entry plus extra fields.
def install_new_pipeline(): def new_create_pipeline(context, *args, **kwargs): result = old_create_pipeline(context, *args, **kwargs) result.insert(1, DAAPObjectTransformer(context)) return result old_create_pipeline = Pipeline.create_pipeline Pipeline.create_pipeline = new_create_pipeline
Install above transformer into the existing pipeline creator.
def update_momentum_by_name(self, name, **kwargs): momentum = self.pop_momentum_by_name(name) velocity, since, until = momentum[:3] velocity = kwargs.get('velocity', velocity) since = kwargs.get('since', since) until = kwargs.get('until', until) return self.add_momentum(velocity, since, until, name)
Updates a momentum by the given name. :param name: the momentum name. :param velocity: (keyword-only) a new value for `velocity`. :param since: (keyword-only) a new value for `since`. :param until: (keyword-only) a new value for `until`. :returns: a momentum updated. :raises TypeError: `name` is ``None``. :raises KeyError: failed to find a momentum named `name`.
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()): ns = self.user_ns.copy() try: frame = sys._getframe(depth + 1) except ValueError: pass else: ns.update(frame.f_locals) try: cmd = formatter.vformat(cmd, args=[], kwargs=ns) except Exception: pass return cmd
Expand python variables in a string. The depth argument indicates how many frames above the caller should be walked to look for the local namespace where to expand variables. The global namespace for expansion is always the user's interactive namespace.
def revoke_membership(self, username): url = self._build_url('memberships', username, base_url=self._api) return self._boolean(self._delete(url), 204, 404)
Revoke this user's team membership. :param str username: (required), name of the team member :returns: bool
def change_group(self, name, group): m1 = OmapiMessage.open(b"host") m1.update_object(dict(name=name)) r1 = self.query_server(m1) if r1.opcode != OMAPI_OP_UPDATE: raise OmapiError("opening host %s failed" % name) m2 = OmapiMessage.update(r1.handle) m2.update_object(dict(group=group)) r2 = self.query_server(m2) if r2.opcode != OMAPI_OP_UPDATE: raise OmapiError("changing group of host %s to %s failed" % (name, group))
Change the group of a host given the name of the host. @type name: str @type group: str
def get_xml(self, fp, format=FORMAT_NATIVE): r = self._client.request('GET', getattr(self, format), stream=True) filename = stream.stream_response_to_file(r, path=fp) return filename
Returns the XML metadata for this source, converted to the requested format. Converted metadata may not contain all the same information as the native format. :param file fp: A path, or an open file-like object which the content should be written to. :param str format: desired format for the output. This should be one of the available formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format. If you pass this function an open file-like object as the fp parameter, the function will not close that file for you.
def append_use_flags(atom, uses=None, overwrite=False): if not uses: uses = portage.dep.dep_getusedeps(atom) if not uses: return atom = atom[:atom.rfind('[')] append_to_package_conf('use', atom=atom, flags=uses, overwrite=overwrite)
Append a list of use flags for a given package or DEPEND atom CLI Example: .. code-block:: bash salt '*' portage_config.append_use_flags "app-admin/salt[ldap, -libvirt]" salt '*' portage_config.append_use_flags ">=app-admin/salt-0.14.1" "['ldap', '-libvirt']"
def _run_hooks(config, hooks, args, environ): skips = _get_skips(environ) cols = _compute_cols(hooks, args.verbose) filenames = _all_filenames(args) filenames = filter_by_include_exclude(filenames, '', config['exclude']) classifier = Classifier(filenames) retval = 0 for hook in hooks: retval |= _run_single_hook(classifier, hook, args, skips, cols) if retval and config['fail_fast']: break if retval and args.show_diff_on_failure and git.has_diff(): if args.all_files: output.write_line( 'pre-commit hook(s) made changes.\n' 'If you are seeing this message in CI, ' 'reproduce locally with: `pre-commit run --all-files`.\n' 'To run `pre-commit` as part of git workflow, use ' '`pre-commit install`.', ) output.write_line('All changes made by hooks:') subprocess.call(('git', '--no-pager', 'diff', '--no-ext-diff')) return retval
Actually run the hooks.
def start(self): self.__stop = False self._queue.start() self._zk.start()
Starts the connection
def _itemsLoadedDone(self, data): if data is None: return self.continuation = data.get('continuation', None) self.lastUpdated = data.get('updated', None) self.lastLoadLength = len(data.get('items', [])) self.googleReader.itemsToObjects(self, data.get('items', [])) self.lastLoadOk = True
Called when all items are loaded
def distance(cls, q0, q1): q = Quaternion.log_map(q0, q1) return q.norm
Quaternion intrinsic distance. Find the intrinsic geodesic distance between q0 and q1. Params: q0: the first quaternion q1: the second quaternion Returns: A positive amount corresponding to the length of the geodesic arc connecting q0 to q1. Note: Although the q0^(-1)*q1 != q1^(-1)*q0, the length of the path joining them is given by the logarithm of those product quaternions, the norm of which is the same.
def get_rect(self): if self.handle: left, top, right, bottom = win32gui.GetWindowRect(self.handle) return RECT(left, top, right, bottom) else: desktop = win32gui.GetDesktopWindow() left, top, right, bottom = win32gui.GetWindowRect(desktop) return RECT(left, top, right, bottom)
Get rectangle of app or desktop resolution Returns: RECT(left, top, right, bottom)
def compose_post(apikey, resize, rotation, noexif): check_rotation(rotation) check_resize(resize) post_data = { 'formatliste': ('', 'og'), 'userdrehung': ('', rotation), 'apikey': ('', apikey) } if resize and 'x' in resize: width, height = [ x.strip() for x in resize.split('x')] post_data['udefb'] = ('', width) post_data['udefh'] = ('', height) elif resize and '%' in resize: precentage = resize.strip().strip('%') post_data['udefp'] = precentage if noexif: post_data['noexif'] = ('', '') return post_data
composes basic post requests
def purgeRDR(rh): rh.printSysLog("Enter changeVM.purgeRDR") results = purgeReader(rh) rh.updateResults(results) rh.printSysLog("Exit changeVM.purgeRDR, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
Purge the reader belonging to the virtual machine. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'PURGERDR' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error
def gen_template_files(path): " Generate relative template pathes. " path = path.rstrip(op.sep) for root, _, files in walk(path): for f in filter(lambda x: not x in (TPLNAME, CFGNAME), files): yield op.relpath(op.join(root, f), path)
Generate relative template pathes.
def reset_term_stats(set_id, term_id, client_id, user_id, access_token): found_sets = [user_set for user_set in get_user_sets(client_id, user_id) if user_set.set_id == set_id] if len(found_sets) != 1: raise ValueError('{} set(s) found with id {}'.format(len(found_sets), set_id)) found_terms = [term for term in found_sets[0].terms if term.term_id == term_id] if len(found_terms) != 1: raise ValueError('{} term(s) found with id {}'.format(len(found_terms), term_id)) term = found_terms[0] if term.image.url: raise NotImplementedError('"{}" has an image and is thus not supported'.format(term)) print('Deleting "{}"...'.format(term)) delete_term(set_id, term_id, access_token) print('Re-creating "{}"...'.format(term)) add_term(set_id, term, access_token) print('Done')
Reset the stats of a term by deleting and re-creating it.
def _root(self): _n = self while _n.parent: _n = _n.parent return _n
Attribute referencing the root node of the tree. :returns: the root node of the tree containing this instance. :rtype: Node
def replace(old, new): parent = old.getparent() parent.replace(old, new)
A simple way to replace one element node with another.
def economic_qs(K, epsilon=sqrt(finfo(float).eps)): r (S, Q) = eigh(K) nok = abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon nok = nok and abs(max(K.min(), K.max(), key=abs)) >= epsilon if nok: from scipy.linalg import eigh as sp_eigh (S, Q) = sp_eigh(K) ok = S >= epsilon nok = logical_not(ok) S0 = S[ok] Q0 = Q[:, ok] Q1 = Q[:, nok] return ((Q0, Q1), S0)
r"""Economic eigen decomposition for symmetric matrices. A symmetric matrix ``K`` can be decomposed in :math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\ \mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero matrix with size determined by ``K``'s rank deficiency. Args: K (array_like): Symmetric matrix. epsilon (float): Eigen value threshold. Default is ``sqrt(finfo(float).eps)``. Returns: tuple: ``((Q0, Q1), S0)``.
def put_attributes(self, item_name, attributes, replace=True, expected_value=None): return self.connection.put_attributes(self, item_name, attributes, replace, expected_value)
Store attributes for a given item. :type item_name: string :param item_name: The name of the item whose attributes are being stored. :type attribute_names: dict or dict-like object :param attribute_names: The name/value pairs to store as attributes :type expected_value: list :param expected_value: If supplied, this is a list or tuple consisting of a single attribute name and expected value. The list can be of the form: * ['name', 'value'] In which case the call will first verify that the attribute "name" of this item has a value of "value". If it does, the delete will proceed, otherwise a ConditionalCheckFailed error will be returned. The list can also be of the form: * ['name', True|False] which will simply check for the existence (True) or non-existence (False) of the attribute. :type replace: bool :param replace: Whether the attribute values passed in will replace existing values or will be added as addition values. Defaults to True. :rtype: bool :return: True if successful
def map_indices(fn, iterable, indices): r index_set = set(indices) for i, arg in enumerate(iterable): if i in index_set: yield fn(arg) else: yield arg
r""" Map a function across indices of an iterable. Notes ----- Roughly equivalent to, though more efficient than:: lambda fn, iterable, *indices: (fn(arg) if i in indices else arg for i, arg in enumerate(iterable)) Examples -------- >>> a = [4, 6, 7, 1, 6, 8, 2] >>> from operator import mul >>> list(map_indices(partial(mul, 3), a, [0, 3, 5])) [12, 6, 7, 3, 6, 24, 2] >>> b = [9., np.array([5., 6., 2.]), ... np.array([[5., 6., 2.], [2., 3., 9.]])] >>> list(map_indices(np.log, b, [0, 2])) # doctest: +NORMALIZE_WHITESPACE [2.1972245773362196, array([ 5., 6., 2.]), array([[ 1.60943791, 1.79175947, 0.69314718], [ 0.69314718, 1.09861229, 2.19722458]])] .. todo:: Floating point precision >>> list(map_indices(np.exp, list(map_indices(np.log, b, [0, 2])), [0, 2])) ... # doctest: +NORMALIZE_WHITESPACE +SKIP [9., array([5., 6., 2.]), array([[ 5., 6., 2.], [ 2., 3., 9.]])]
def request_update_of_all_params(self): for group in self.toc.toc: for name in self.toc.toc[group]: complete_name = '%s.%s' % (group, name) self.request_param_update(complete_name)
Request an update of all the parameters in the TOC
def get_rollup_ttl(self, use_cached=True): rollup_ttl_text = self._get_stream_metadata(use_cached).get("rollupTtl") return int(rollup_ttl_text)
Retrieve the rollupTtl for this stream The rollupTtl is the time to live (TTL) in seconds for the aggregate roll-ups of data points stored in the stream. A roll-up expires after the configured amount of time and is automatically deleted. :param bool use_cached: If False, the function will always request the latest from Device Cloud. If True, the device will not make a request if it already has cached data. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error :raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created :return: The rollupTtl associated with this stream in seconds :rtype: int or None
def surface_based_cape_cin(pressure, temperature, dewpoint): r p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint) return cape_cin(p, t, td, profile)
r"""Calculate surface-based CAPE and CIN. Calculate the convective available potential energy (CAPE) and convective inhibition (CIN) of a given upper air profile for a surface-based parcel. CIN is integrated between the surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points of the measured temperature profile and parcel profile are linearly interpolated. Parameters ---------- pressure : `pint.Quantity` Atmospheric pressure profile. The first entry should be the starting (surface) observation. temperature : `pint.Quantity` Temperature profile dewpoint : `pint.Quantity` Dewpoint profile Returns ------- `pint.Quantity` Surface based Convective Available Potential Energy (CAPE). `pint.Quantity` Surface based Convective INhibition (CIN). See Also -------- cape_cin, parcel_profile
def write_file(path, data): with open(path, 'w') as f: log.debug('setting %s contents:\n%s', path, data) f.write(data) return f
Writes data to specified path.
def _persist(source, path, encoder=None): import posixpath from dask.bytes import open_files import dask import pickle import json from intake.source.textfiles import TextFilesSource encoder = {None: str, 'str': str, 'json': json.dumps, 'pickle': pickle.dumps}[encoder] try: b = source.to_dask() except NotImplementedError: import dask.bag as db b = db.from_sequence(source.read(), npartitions=1) files = open_files(posixpath.join(path, 'part.*'), mode='wt', num=b.npartitions) dwrite = dask.delayed(write_file) out = [dwrite(part, f, encoder) for part, f in zip(b.to_delayed(), files)] dask.compute(out) s = TextFilesSource(posixpath.join(path, 'part.*')) return s
Save list to files using encoding encoder : None or one of str|json|pickle None is equivalent to str
def plugins(self): from fluent_contents import extensions if self._plugins is None: return extensions.plugin_pool.get_plugins() else: return extensions.plugin_pool.get_plugins_by_name(*self._plugins)
Get the set of plugins that this widget should display.
def leaveEvent(self, event): super(ToolTipWidget, self).leaveEvent(event) self.hide()
Override Qt method to hide the tooltip on leave.
def flatten_pages(self, pages, level=1): flattened = [] for page in pages: if type(page) is list: flattened.append( { 'file': page[0], 'title': page[1], 'level': level, }) if type(page) is dict: if type(list(page.values())[0]) is str: flattened.append( { 'file': list(page.values())[0], 'title': list(page.keys())[0], 'level': level, }) if type(list(page.values())[0]) is list: flattened.extend( self.flatten_pages( list(page.values())[0], level + 1) ) return flattened
Recursively flattens pages data structure into a one-dimensional data structure
def from_dict(document): type_name = document['name'] if type_name == ATTR_TYPE_INT: return IntType() elif type_name == ATTR_TYPE_FLOAT: return FloatType() elif type_name == ATTR_TYPE_ENUM: return EnumType(document['values']) elif type_name == ATTR_TYPE_DICT: return DictType() elif type_name == ATTR_TYPE_LIST: return ListType() else: raise ValueError('invalid attribute type: ' + str(type_name))
Create data type definition form Json-like object represenation. Parameters ---------- document : dict Json-like object represenation Returns ------- AttributeType
def handle_error(self, type_, value, tb): if not issubclass(type_, pywsgi.GreenletExit): self.server.loop.handle_error(self.environ, type_, value, tb) if self.response_length: self.close_connection = True else: tb_stream = traceback.format_exception(type_, value, tb) del tb tb_stream.append('\n') tb_stream.append(pprint.pformat(self.environ)) body = ''.join(tb_stream) headers = pywsgi._INTERNAL_ERROR_HEADERS[:] headers[2] = ('Content-Length', str(len(body))) self.start_response(pywsgi._INTERNAL_ERROR_STATUS, headers) self.write(body)
This method copies the code from pywsgi.WSGIHandler.handle_error, change the write part to be a reflection of traceback and environ
def get_soap_client(db_alias, client_class=None): if not beatbox: raise InterfaceError("To use SOAP API, you'll need to install the Beatbox package.") if client_class is None: client_class = beatbox.PythonClient soap_client = client_class() connection = connections[db_alias] cursor = connection.cursor() cursor.urls_request() auth_info = connections[db_alias].sf_session.auth access_token = auth_info.get_auth()['access_token'] assert access_token[15] == '!' org_id = access_token[:15] url = '/services/Soap/u/{version}/{org_id}'.format(version=salesforce.API_VERSION, org_id=org_id) soap_client.useSession(access_token, auth_info.instance_url + url) return soap_client
Create the SOAP client for the current user logged in the db_alias The default created client is "beatbox.PythonClient", but an alternative client is possible. (i.e. other subtype of beatbox.XMLClient)
def handleOACK(self, pkt): if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
This method handles an OACK from the server, syncing any accepted options.
def decode_step(self, step_input, states): step_output, states, step_additional_outputs =\ self.decoder(self.tgt_embed(step_input), states) step_output = self.tgt_proj(step_output) return step_output, states, step_additional_outputs
One step decoding of the translation model. Parameters ---------- step_input : NDArray Shape (batch_size,) states : list of NDArrays Returns ------- step_output : NDArray Shape (batch_size, C_out) states : list step_additional_outputs : list Additional outputs of the step, e.g, the attention weights
def validate(self, uri): if WURIComponentVerifier.validate(self, uri) is False: return False try: WStrictURIQuery( WURIQuery.parse(uri.component(self.component())), *self.__specs, extra_parameters=self.__extra_parameters ) except ValueError: return False return True
Check that an query part of an URI is compatible with this descriptor. Return True if the URI is compatible. :param uri: an URI to check :return: bool
def set_qos(self, port_name, type='linux-htb', max_rate=None, queues=None): queues = queues if queues else [] command_qos = ovs_vsctl.VSCtlCommand( 'set-qos', [port_name, type, max_rate]) command_queue = ovs_vsctl.VSCtlCommand( 'set-queue', [port_name, queues]) self.run_command([command_qos, command_queue]) if command_qos.result and command_queue.result: return command_qos.result + command_queue.result return None
Sets a Qos rule and creates Queues on the given port.
def file_size(filename): fd = os.open(filename, os.O_RDONLY) try: return os.lseek(fd, 0, os.SEEK_END) except KeyboardInterrupt as e: raise e except Exception as e: raise Exception( "file_size failed to obtain the size of '%s': %s" % (filename, str(e))) finally: os.close(fd)
Obtains the size of a given file. @filename - Path to the file. Returns the size of the file.
def popleft(self, block=True, timeout=None): return self._pop(block, timeout, left=True)
Remove and return an item from the right side of the GeventDeque. If no elements are present, raises an IndexError. If optional args *block* is True and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`IndexError` exception if no item was available within that time. Otherwise (*block* is False), return an item if one is immediately available, else raise the :class:`IndexError` exception (*timeout* is ignored in that case).
def filter(self, record): request = get_request() if request: user = getattr(request, 'user', None) if user and not user.is_anonymous(): record.username = user.username else: record.username = '-' meta = getattr(request, 'META', {}) record.remote_addr = meta.get('REMOTE_ADDR', '-') record.http_user_agent = meta.get('HTTP_USER_AGENT', '-') if not hasattr(record, 'request'): record.request = request else: record.username = '-' record.remote_addr = '-' record.http_user_agent = '-' return True
Adds user and remote_addr to the record.
def random_alphanum(length=10, lower_only=False): character_set = ALPHANUM_LOWER if lower_only else ALPHANUM sample_size = 5 chars = random.sample(character_set, sample_size) while len(chars) < length: chars += random.sample(character_set, sample_size) random.shuffle(chars) return ''.join(chars[:length])
Gets a random alphanumeric value using both letters and numbers. :param length: size of the random alphanumeric string. :param lower_only: boolean indicating if only lower case letters should be used. :return: alphanumeric string size of length This function uses all number except for: * 0 * 1 and uses all letters except for: * lower case "l" (el) * lower and upper case "o" and "O" (oh) For upper and lower cased letters... ------------------------------------ Upper and lower cased letters and numbers can be used more than once which leaves the possible combinations as follows: 8 numbers used + 49 letters used (upper and lower) = 57 total characters Which leads us to the following equation: 57 total characters ^ length = total possible combinations The following total possible combinations are below for a given length: 57 ^ 1 = 57 57 ^ 2 = 3,249 57 ^ 3 = 185,193 57 ^ 4 = 10,556,001 57 ^ 5 = 601,692,057 57 ^ 6 = 34,296,447,249 57 ^ 7 = 1,954,897,493,193 57 ^ 8 = 111,429,157,112,001 57 ^ 9 = 6,351,461,955,384,057 57 ^ 10 = 362,033,331,456,891,249 ... For lower cased letters... -------------------------- Lower cased letters and numbers can be used more than once which leaves the possible combinations as follows: 8 numbers used + 24 letters used (lower only) = 32 total characters Which leads us to the following equation: 32 total characters ^ length = total possible combinations The following total possible combinations are below for a given length: 32 ^ 1 = 32 32 ^ 2 = 1,024 32 ^ 3 = 32,768 32 ^ 4 = 1,048,576 32 ^ 5 = 33,554,432 32 ^ 6 = 1,073,741,824 32 ^ 7 = 34,359,738,368 32 ^ 8 = 1,099,511,627,776 32 ^ 9 = 35,184,372,088,832 32 ^ 10 = 1,125,899,906,842,624 ...
async def set_reply_markup(msg: Dict, request: 'Request', stack: 'Stack') \ -> None: from bernard.platforms.telegram.layers import InlineKeyboard, \ ReplyKeyboard, \ ReplyKeyboardRemove try: keyboard = stack.get_layer(InlineKeyboard) except KeyError: pass else: msg['reply_markup'] = await keyboard.serialize(request) try: keyboard = stack.get_layer(ReplyKeyboard) except KeyError: pass else: msg['reply_markup'] = await keyboard.serialize(request) try: remove = stack.get_layer(ReplyKeyboardRemove) except KeyError: pass else: msg['reply_markup'] = remove.serialize()
Add the "reply markup" to a message from the layers :param msg: Message dictionary :param request: Current request being replied :param stack: Stack to analyze
def _update_pods_metrics(self, instance, pods): tags_map = defaultdict(int) for pod in pods['items']: pod_meta = pod.get('metadata', {}) pod_tags = self.kubeutil.get_pod_creator_tags(pod_meta, legacy_rep_controller_tag=True) services = self.kubeutil.match_services_for_pod(pod_meta) if isinstance(services, list): for service in services: pod_tags.append('kube_service:%s' % service) if 'namespace' in pod_meta: pod_tags.append('kube_namespace:%s' % pod_meta['namespace']) tags_map[frozenset(pod_tags)] += 1 commmon_tags = instance.get('tags', []) for pod_tags, pod_count in tags_map.iteritems(): tags = list(pod_tags) tags.extend(commmon_tags) self.publish_gauge(self, NAMESPACE + '.pods.running', pod_count, tags)
Reports the number of running pods on this node, tagged by service and creator We go though all the pods, extract tags then count them by tag list, sorted and serialized in a pipe-separated string (it is an illegar character for tags)
def delete(self, password, message=""): data = {'user': self.user.name, 'passwd': password, 'delete_message': message, 'confirm': True} return self.request_json(self.config['delete_redditor'], data=data)
Delete the currently authenticated redditor. WARNING! This action is IRREVERSIBLE. Use only if you're okay with NEVER accessing this reddit account again. :param password: password for currently authenticated account :param message: optional 'reason for deletion' message. :returns: json response from the server.
def save_evaluations(self, evaluations_file = None): iterations = np.array(range(1, self.Y.shape[0] + 1))[:, None] results = np.hstack((iterations, self.Y, self.X)) header = ['Iteration', 'Y'] + ['var_' + str(k) for k in range(1, self.X.shape[1] + 1)] data = [header] + results.tolist() self._write_csv(evaluations_file, data)
Saves evaluations at each iteration of the optimization :param evaluations_file: name of the file in which the results are saved.
async def iter_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, batch_size: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: Optional[uuid.UUID] = None, ): correlation_id = correlation_id cmd = convo.IterAllEvents( msg.Position.for_direction(direction, from_position), batch_size, resolve_links, require_master, direction, self.credential, correlation_id, ) result = await self.dispatcher.start_conversation(cmd) iterator = await result async for event in iterator: yield event
Read through all the events in the database. Args: direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the database. >>> with async.connect() as conn: >>> async for event in conn.iter_all() >>> print(event) Print every event from the database in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter_all(direction=StreamDirection.Backward): >>> print(event) Start reading from a known commit position >>> with async.connect() as conn: >>> async for event in conn.iter_all(from_position=Position(12345)) >>> print(event)
def main(global_config, **settings): engine = engine_from_config(settings, "sqlalchemy.") DBSession.configure(bind=engine) Base.metadata.bind = engine config = Configurator(settings=settings) config.include("pyramid_jinja2") config.include("pyramid_debugtoolbar") config.add_route("home", "/") config.add_route("data", "/data") config.add_route("data_advanced", "/data_advanced") config.add_route("data_yadcf", "/data_yadcf") config.add_route("dt_110x", "/dt_110x") config.add_route("dt_110x_custom_column", "/dt_110x_custom_column") config.add_route("dt_110x_basic_column_search", "/dt_110x_basic_column_search") config.add_route("dt_110x_advanced_column_search", "/dt_110x_advanced_column_search") config.add_route("dt_110x_yadcf", "/dt_110x_yadcf") config.scan() json_renderer = JSON() json_renderer.add_adapter(date, date_adapter) config.add_renderer("json_with_dates", json_renderer) config.add_jinja2_renderer('.html') return config.make_wsgi_app()
Return a Pyramid WSGI application.
async def join_voice_channel(self, guild_id, channel_id): voice_ws = self.get_voice_ws(guild_id) await voice_ws.voice_state(guild_id, channel_id)
Alternative way to join a voice channel if node is known.
def bin_spikes(spike_times, binsz): bins = np.empty((len(spike_times),), dtype=int) for i, stime in enumerate(spike_times): bins[i] = np.floor(np.around(stime/binsz, 5)) return bins
Sort spike times into bins :param spike_times: times of spike instances :type spike_times: list :param binsz: length of time bin to use :type binsz: float :returns: list of bin indicies, one for each element in spike_times
def eager_load_relations(self, models): for name, constraints in self._eager_load.items(): if name.find('.') == -1: models = self._load_relation(models, name, constraints) return models
Eager load the relationship of the models. :param models: :type models: list :return: The models :rtype: list