code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def signalize_extensions(): warnings.warn("DB-API extension cursor.rownumber used", SalesforceWarning) warnings.warn("DB-API extension connection.<exception> used", SalesforceWarning) warnings.warn("DB-API extension cursor.connection used", SalesforceWarning) warnings.warn("DB-API extension cursor.messages used", SalesforceWarning) warnings.warn("DB-API extension connection.messages used", SalesforceWarning) warnings.warn("DB-API extension cursor.next(, SalesforceWarning) used") warnings.warn("DB-API extension cursor.__iter__(, SalesforceWarning) used") warnings.warn("DB-API extension cursor.lastrowid used", SalesforceWarning) warnings.warn("DB-API extension .errorhandler used", SalesforceWarning)
DB API 2.0 extension are reported by warnings at run-time.
def extend(self, delta): if delta.total_seconds() < 0: raise ValueError("delta must be a positive timedelta.") if self.trial_end is not None and self.trial_end > timezone.now(): period_end = self.trial_end else: period_end = self.current_period_end period_end += delta return self.update(prorate=False, trial_end=period_end)
Extends this subscription by the provided delta. :param delta: The timedelta by which to extend this subscription. :type delta: timedelta
def _set_channel_gain(self, num): if not 1 <= num <= 3: raise AttributeError( ) for _ in range(num): logging.debug("_set_channel_gain called") start_counter = time.perf_counter() GPIO.output(self._pd_sck, True) GPIO.output(self._pd_sck, False) end_counter = time.perf_counter() time_elapsed = float(end_counter - start_counter) if time_elapsed >= 0.00006: logging.warning( 'setting gain and channel took more than 60µs. ' 'Time elapsed: {:0.8f}'.format(time_elapsed) ) result = self.get_raw_data(times=6) if result is False: raise GenericHX711Exception("channel was not set properly") return True
Finish data transmission from HX711 by setting next required gain and channel Only called from the _read function. :param num: how often so do the set (1...3) :type num: int :return True on success :rtype bool
def _add_item(self, cls, *args, **kwargs): box_index = kwargs.pop('box_index', self._default_box_index) data = cls.validate(*args, **kwargs) n = cls.vertex_count(**data) if not isinstance(box_index, np.ndarray): k = len(self._default_box_index) box_index = _get_array(box_index, (n, k)) data['box_index'] = box_index if cls not in self._items: self._items[cls] = [] self._items[cls].append(data) return data
Add a plot item.
def _write_plan(self, stream): if self.plan is not None: if not self._plan_written: print("1..{0}".format(self.plan), file=stream) self._plan_written = True
Write the plan line to the stream. If we have a plan and have not yet written it out, write it to the given stream.
def transform(self, X, y=None, copy=None): check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X
Perform standardization by centering and scaling using the parameters. :param X: Data matrix to scale. :type X: numpy.ndarray, shape [n_samples, n_features] :param y: Passthrough for scikit-learn ``Pipeline`` compatibility. :type y: None :param bool copy: Copy the X matrix. :return: Scaled version of the X data matrix. :rtype: numpy.ndarray, shape [n_samples, n_features]
def get_offset(cls, info): assert info.layer == 3 if info.version == 1: if info.mode != 3: return 36 else: return 21 else: if info.mode != 3: return 21 else: return 13
Calculate the offset to the Xing header from the start of the MPEG header including sync based on the MPEG header's content.
def get_password(self): if self.password is None: if os.environ.get(self.username+'password'): self.password = os.environ.get(self.username+'password') else: raise PasswordError(self.username)
If password is not provided will look in environment variables for username+'password'.
def faces_unique_edges(self): populate = self.edges_unique result = self._cache['edges_unique_inverse'].reshape((-1, 3)) return result
For each face return which indexes in mesh.unique_edges constructs that face. Returns --------- faces_unique_edges : (len(self.faces), 3) int Indexes of self.edges_unique that construct self.faces Examples --------- In [0]: mesh.faces[0:2] Out[0]: TrackedArray([[ 1, 6946, 24224], [ 6946, 1727, 24225]]) In [1]: mesh.edges_unique[mesh.faces_unique_edges[0:2]] Out[1]: array([[[ 1, 6946], [ 6946, 24224], [ 1, 24224]], [[ 1727, 6946], [ 1727, 24225], [ 6946, 24225]]])
def _init_go_sources(self, go_sources_arg, go2obj_arg): gos_user = set(go_sources_arg) if 'children' in self.kws and self.kws['children']: gos_user |= get_leaf_children(gos_user, go2obj_arg) gos_godag = set(go2obj_arg) gos_source = gos_user.intersection(gos_godag) gos_missing = gos_user.difference(gos_godag) if not gos_missing: return gos_source sys.stdout.write("{N} GO IDs NOT FOUND IN GO DAG: {GOs}\n".format( N=len(gos_missing), GOs=" ".join([str(e) for e in gos_missing]))) return gos_source
Return GO sources which are present in GODag.
def connect_db(config): rv = sqlite3.connect(config["database"]["uri"]) rv.row_factory = sqlite3.Row return rv
Connects to the specific database.
def _get_ctypes(self): ctypes = [] for related_object in self.model._meta.get_all_related_objects(): model = getattr(related_object, 'related_model', related_object.model) ctypes.append(ContentType.objects.get_for_model(model).pk) if model.__subclasses__(): for child in model.__subclasses__(): ctypes.append(ContentType.objects.get_for_model(child).pk) return ctypes
Returns all related objects for this model.
def argsize(self): argsize = sum(arg.nbytes for arg in self.argdefns) return argsize if len(self.argdefns) > 0 else 0
The total size in bytes of all the command arguments.
def validate(self, value): errors = [] self._used_validator = [] for val in self._validators: try: val.validate(value) self._used_validator.append(val) except ValidatorException as e: errors.append(e) except Exception as e: errors.append(ValidatorException("Unknown Error", e)) if len(errors) > 0: raise ValidatorException.from_list(errors) return value
validate function form OrValidator Returns: True if at least one of the validators validate function return True
def render(self, data, accepted_media_type=None, renderer_context=None): assert yaml, 'YAMLRenderer requires pyyaml to be installed' if data is None: return '' return yaml.dump( data, stream=None, encoding=self.charset, Dumper=self.encoder, allow_unicode=not self.ensure_ascii, default_flow_style=self.default_flow_style )
Renders `data` into serialized YAML.
def set_exception(self, exception): self._exception = exception self._result_set = True self._invoke_callbacks(self)
Set the Future's exception.
def tune_scale(acceptance, scale): if acceptance > 0.8: scale *= 2.0 elif acceptance <= 0.8 and acceptance > 0.4: scale *= 1.3 elif acceptance < 0.234 and acceptance > 0.1: scale *= (1/1.3) elif acceptance <= 0.1 and acceptance > 0.05: scale *= 0.4 elif acceptance <= 0.05 and acceptance > 0.01: scale *= 0.2 elif acceptance <= 0.01: scale *= 0.1 return scale
Tunes scale for M-H algorithm Parameters ---------- acceptance : float The most recent acceptance rate scale : float The current scale parameter Returns ---------- scale : float An adjusted scale parameter Notes ---------- Ross : Initially did this by trial and error, then refined by looking at other implementations, so some credit here to PyMC3 which became a guideline for this.
def mesh_surface_area(mesh=None, verts=None, faces=None): r if mesh: verts = mesh.verts faces = mesh.faces else: if (verts is None) or (faces is None): raise Exception('Either mesh or verts and faces must be given') surface_area = measure.mesh_surface_area(verts, faces) return surface_area
r""" Calculates the surface area of a meshed region Parameters ---------- mesh : tuple The tuple returned from the ``mesh_region`` function verts : array An N-by-ND array containing the coordinates of each mesh vertex faces : array An N-by-ND array indicating which elements in ``verts`` form a mesh element. Returns ------- surface_area : float The surface area of the mesh, calculated by ``skimage.measure.mesh_surface_area`` Notes ----- This function simply calls ``scikit-image.measure.mesh_surface_area``, but it allows for the passing of the ``mesh`` tuple returned by the ``mesh_region`` function, entirely for convenience.
def split(mesh, only_watertight=True, adjacency=None, engine=None): if adjacency is None: adjacency = mesh.face_adjacency if only_watertight: min_len = 3 else: min_len = 1 components = connected_components(edges=adjacency, nodes=np.arange(len(mesh.faces)), min_len=min_len, engine=engine) meshes = mesh.submesh(components, only_watertight=only_watertight) return meshes
Split a mesh into multiple meshes from face connectivity. If only_watertight is true, it will only return watertight meshes and will attempt single triangle/quad repairs. Parameters ---------- mesh: Trimesh only_watertight: if True, only return watertight components adjacency: (n,2) list of face adjacency to override using the plain adjacency calculated automatically. engine: str, which engine to use. ('networkx', 'scipy', or 'graphtool') Returns ---------- meshes: list of Trimesh objects
def expand(self, line, do_expand, force=False, vislevels=0, level=-1): lastchild = self.GetLastChild(line, level) line += 1 while line <= lastchild: if force: if vislevels > 0: self.ShowLines(line, line) else: self.HideLines(line, line) elif do_expand: self.ShowLines(line, line) if level == -1: level = self.GetFoldLevel(line) if level & stc.STC_FOLDLEVELHEADERFLAG: if force: self.SetFoldExpanded(line, vislevels - 1) line = self.expand(line, do_expand, force, vislevels - 1) else: expandsub = do_expand and self.GetFoldExpanded(line) line = self.expand(line, expandsub, force, vislevels - 1) else: line += 1 return line
Multi-purpose expand method from original STC class
def uncomment_lines(lines): ret = [] for line in lines: ws_prefix, rest, ignore = RE_LINE_SPLITTER_UNCOMMENT.match(line).groups() ret.append(ws_prefix + rest) return ''.join(ret)
Uncomment the given list of lines and return them. The first hash mark following any amount of whitespace will be removed on each line.
def remove_file(self): if not self.fullpath or not self.archived: raise RuntimeError() try: os.remove(self.fullpath) except: print("Error removing %s: %s" % (self.fullpath, sys.exc_info()[1]))
Removes archived file associated with this DP
def _reset_bbox(self): scale_x, scale_y = self.get_scale_xy() pan_x, pan_y = self.get_pan(coord='data')[:2] win_wd, win_ht = self.get_window_size() win_wd, win_ht = max(1, win_wd), max(1, win_ht) self._calc_bg_dimensions(scale_x, scale_y, pan_x, pan_y, win_wd, win_ht)
This function should only be called internally. It resets the viewers bounding box based on changes to pan or scale.
def _imm_default_init(self, *args, **kwargs): for (k,v) in six.iteritems({k:v for dct in (args + (kwargs,)) for (k,v) in dct}): setattr(self, k, v)
An immutable's defalt initialization function is to accept any number of dictionaries followed by any number of keyword args and to turn them all into the parameters of the immutable that is being created.
def _thumbnail_div(target_dir, src_dir, fname, snippet, is_backref=False, check=True): thumb, _ = _find_image_ext( os.path.join(target_dir, 'images', 'thumb', 'sphx_glr_%s_thumb.png' % fname[:-3])) if check and not os.path.isfile(thumb): raise RuntimeError('Could not find internal sphinx-gallery thumbnail ' 'file:\n%s' % (thumb,)) thumb = os.path.relpath(thumb, src_dir) full_dir = os.path.relpath(target_dir, src_dir) thumb = thumb.replace(os.sep, "/") ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_') template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE return template.format(snippet=escape(snippet), thumbnail=thumb, ref_name=ref_name)
Generates RST to place a thumbnail in a gallery
def ping_interval(self, value): if not isinstance(value, int): raise TypeError("ping interval must be int") if value < 0: raise ValueError("ping interval must be greater then 0") self._ping_interval = value
Setter for ping_interval property. :param int value: interval in sec between two ping values.
def get_webhook(self): api = self._get_api(mds.NotificationsApi) return Webhook(api.get_webhook())
Get the current callback URL if it exists. :return: The currently set webhook
def parse_excel(file_path: str, entrez_id_header, log_fold_change_header, adjusted_p_value_header, entrez_delimiter, base_mean_header=None) -> List[Gene]: logger.info("In parse_excel()") df = pd.read_excel(file_path) return handle_dataframe( df, entrez_id_name=entrez_id_header, log2_fold_change_name=log_fold_change_header, adjusted_p_value_name=adjusted_p_value_header, entrez_delimiter=entrez_delimiter, base_mean=base_mean_header, )
Read an excel file on differential expression values as Gene objects. :param str file_path: The path to the differential expression file to be parsed. :param config.Params params: An object that includes paths, cutoffs and other information. :return list: A list of Gene objects.
def role_get(auth=None, **kwargs): cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_role(**kwargs)
Get a single role CLI Example: .. code-block:: bash salt '*' keystoneng.role_get name=role1 salt '*' keystoneng.role_get name=role1 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.role_get name=1eb6edd5525e4ac39af571adee673559
def selectNumber(self): le = self.lineEdit() text = asUnicode(le.text()) if self.opts['suffix'] == '': le.setSelection(0, len(text)) else: try: index = text.index(' ') except ValueError: return le.setSelection(0, index)
Select the numerical portion of the text to allow quick editing by the user.
def __threshold(self, ymx_i): return ymx_i - (self.S * np.diff(self.xsn).mean())
Calculates the difference threshold for a given difference local maximum. Parameters ----------- ymx_i : float The normalized y value of a local maximum.
def cont_r(self, percent=0.9, N=None): if not hasattr(self, 'F'): self.fs_r(N=self.rank) return apply_along_axis(lambda _: _/self.L[:N], 1, apply_along_axis(lambda _: _*self.r, 0, self.F[:, :N]**2))
Return the contribution of each row.
def get_cod_ids(self, formula): sql = 'select file from data where formula="- %s -"' % \ Composition(formula).hill_formula text = self.query(sql).split("\n") cod_ids = [] for l in text: m = re.search(r"(\d+)", l) if m: cod_ids.append(int(m.group(1))) return cod_ids
Queries the COD for all cod ids associated with a formula. Requires mysql executable to be in the path. Args: formula (str): Formula. Returns: List of cod ids.
def dumps(): d = {} for k, v in FILTERS.items(): d[dr.get_name(k)] = list(v) return _dumps(d)
Returns a string representation of the FILTERS dictionary.
def from_lt(rsize, ltm, ltv): dbinx = rsize / ltm[0] dbiny = rsize / ltm[1] dxcorner = (dbinx - rsize) - dbinx * ltv[0] dycorner = (dbiny - rsize) - dbiny * ltv[1] bin = (_nint(dbinx), _nint(dbiny)) corner = (_nint(dxcorner), _nint(dycorner)) return bin, corner
Compute the corner location and pixel size in units of unbinned pixels. .. note:: Translated from ``calacs/lib/fromlt.c``. Parameters ---------- rsize : int Reference pixel size. Usually 1. ltm, ltv : tuple of float See :func:`get_lt`. Returns ------- bin : tuple of int Pixel size in X and Y. corner : tuple of int Corner of subarray in X and Y.
def dec2dec(dec): d = dec.replace(':', ' ').split() if len(d) == 2: d.append(0.0) if d[0].startswith('-') or float(d[0]) < 0: return float(d[0]) - float(d[1]) / 60.0 - float(d[2]) / 3600.0 return float(d[0]) + float(d[1]) / 60.0 + float(d[2]) / 3600.0
Convert sexegessimal RA string into a float in degrees. Parameters ---------- dec : string A string separated representing the Dec. Expected format is `[+- ]hh:mm[:ss.s]` Colons can be replaced with any whit space character. Returns ------- dec : float The Dec in degrees.
def get_current(self): now = dt.now().timestamp() url = build_url(self.api_key, self.spot_id, self.fields, self.unit, now, now) return get_msw(url)
Get current forecast.
def get_uniprot_evidence_level(header): header = header.split() for item in header: item = item.split('=') try: if item[0] == 'PE': return 5 - int(item[1]) except IndexError: continue return -1
Returns uniprot protein existence evidence level for a fasta header. Evidence levels are 1-5, but we return 5 - x since sorting still demands that higher is better.
def get_entity_type(self, name, language_code=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): if 'get_entity_type' not in self._inner_api_calls: self._inner_api_calls[ 'get_entity_type'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_entity_type, default_retry=self._method_configs['GetEntityType'].retry, default_timeout=self._method_configs['GetEntityType'] .timeout, client_info=self._client_info, ) request = entity_type_pb2.GetEntityTypeRequest( name=name, language_code=language_code, ) return self._inner_api_calls['get_entity_type']( request, retry=retry, timeout=timeout, metadata=metadata)
Retrieves the specified entity type. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.EntityTypesClient() >>> >>> name = client.entity_type_path('[PROJECT]', '[ENTITY_TYPE]') >>> >>> response = client.get_entity_type(name) Args: name (str): Required. The name of the entity type. Format: ``projects/<Project ID>/agent/entityTypes/<EntityType ID>``. language_code (str): Optional. The language to retrieve entity synonyms for. If not specified, the agent's default language is used. [More than a dozen languages](https://dialogflow.com/docs/reference/language) are supported. Note: languages must be enabled in the agent, before they can be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types.EntityType` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def get_arg_names(target) -> typing.List[str]: code = getattr(target, '__code__') if code is None: return [] arg_count = code.co_argcount kwarg_count = code.co_kwonlyargcount args_index = get_args_index(target) kwargs_index = get_kwargs_index(target) arg_names = list(code.co_varnames[:arg_count]) if args_index != -1: arg_names.append(code.co_varnames[args_index]) arg_names += list(code.co_varnames[arg_count:(arg_count + kwarg_count)]) if kwargs_index != -1: arg_names.append(code.co_varnames[kwargs_index]) if len(arg_names) > 0 and arg_names[0] in ['self', 'cls']: arg_count -= 1 arg_names.pop(0) return arg_names
Gets the list of named arguments for the target function :param target: Function for which the argument names will be retrieved
def load(uid=None): app.logger.info("GET /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") try: resp = json.loads(user.datastring) except: resp = { "condition": user.cond, "counterbalance": user.counterbalance, "assignmentId": user.assignmentid, "workerId": user.workerid, "hitId": user.hitid, "bonus": user.bonus } return jsonify(**resp)
Load experiment data, which should be a JSON object and will be stored after converting to string.
def value_to_datum(self, instance, value): if value is None: return None bound = getattr(instance._origin, self.cls) if type(value) is bound: if self.use_data_setter: return value._data else: descriptors, alt_descriptors = get_pk_descriptors(bound) if len(descriptors) == 1: return getattr(value, descriptors[0][0]) elif len(descriptors) > 1: return tuple( getattr(value, name) for name, _ in descriptors ) else: raise AttributeError( "unable to perform set object no primary key " "fields defined for %s" % self.cls) else: raise TypeError( "must be %s, not %s" % (self.cls, type(value).__name__))
Convert a given Python-side value to a MAAS-side datum. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The Python-side value to validate and convert into a MAAS-side datum. :return: A datum derived from the given value.
def create_open_msg(self): asnum = self.local_as if not is_valid_old_asn(asnum): asnum = bgp.AS_TRANS bgpid = self._common_conf.router_id holdtime = self._neigh_conf.hold_time def flatten(L): if isinstance(L, list): for i in range(len(L)): for e in flatten(L[i]): yield e else: yield L opts = list(flatten( list(self._neigh_conf.get_configured_capabilities().values()))) open_msg = BGPOpen( my_as=asnum, bgp_identifier=bgpid, version=const.BGP_VERSION_NUM, hold_time=holdtime, opt_param=opts ) return open_msg
Create `Open` message using current settings. Current setting include capabilities, timers and ids.
def create(cls, name, members=None, comment=None): element = [] if members is None else element_resolver(members) json = {'name': name, 'element': element, 'comment': comment} return ElementCreator(cls, json)
Create the TCP Service group :param str name: name of tcp service group :param list element: tcp services by element or href :type element: list(str,Element) :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: TCPServiceGroup
def satisfiesExternal(cntxt: Context, n: Node, se: ShExJ.ShapeExternal, c: DebugContext) -> bool: if c.debug: print(f"id: {se.id}") extern_shape = cntxt.external_shape_for(se.id) if extern_shape: return satisfies(cntxt, n, extern_shape) cntxt.fail_reason = f"{se.id}: Shape is not in Schema" return False
Se is a ShapeExternal and implementation-specific mechansims not defined in this specification indicate success.
def _get_key_redis_key(bank, key): opts = _get_redis_keys_opts() return '{prefix}{separator}{bank}/{key}'.format( prefix=opts['key_prefix'], separator=opts['separator'], bank=bank, key=key )
Return the Redis key given the bank name and the key name.
def update(self, title=None, description=None, images=None, cover=None, layout=None, privacy=None): url = (self._imgur._base_url + "/3/album/" "{0}".format(self._delete_or_id_hash)) is_updated = self._imgur._send_request(url, params=locals(), method='POST') if is_updated: self.title = title or self.title self.description = description or self.description self.layout = layout or self.layout self.privacy = privacy or self.privacy if cover is not None: self.cover = (cover if isinstance(cover, Image) else Image({'id': cover}, self._imgur, has_fetched=False)) if images: self.images = [img if isinstance(img, Image) else Image({'id': img}, self._imgur, False) for img in images] return is_updated
Update the album's information. Arguments with the value None will retain their old values. :param title: The title of the album. :param description: A description of the album. :param images: A list of the images we want the album to contain. Can be Image objects, ids or a combination of the two. Images that images that you cannot set (non-existing or not owned by you) will not cause exceptions, but fail silently. :param privacy: The albums privacy level, can be public, hidden or secret. :param cover: The id of the cover image. :param layout: The way the album is displayed, can be blog, grid, horizontal or vertical.
def show_plot_methods(self): print_func = PlotterInterface._print_func if print_func is None: print_func = six.print_ s = "\n".join( "%s\n %s" % t for t in six.iteritems(self._plot_methods)) return print_func(s)
Print the plotmethods of this instance
def close(self): _LOGGER.debug("Joining submission queue") self._submission_queue.join() _LOGGER.debug("Joining cancel queue") self._cancel_queue.join() _LOGGER.debug("Joining poll queue") self._poll_queue.join() _LOGGER.debug("Joining load queue") self._load_queue.join() for _ in self._submission_workers: self._submission_queue.put(None) for _ in self._cancel_workers: self._cancel_queue.put(None) for _ in self._poll_workers: self._poll_queue.put((-1, None)) for _ in self._load_workers: self._load_queue.put(None) for worker in chain(self._submission_workers, self._cancel_workers, self._poll_workers, self._load_workers): worker.join() self.session.close()
Perform a clean shutdown. Waits for all the currently scheduled work to finish, kills the workers, and closes the connection pool. .. note:: Ensure your code does not submit new work while the connection is closing. Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as` construct) to ensure your code properly closes all resources. Examples: This example creates a client (based on an auto-detected configuration file), executes some code (represented by a placeholder comment), and then closes the client. >>> from dwave.cloud import Client >>> client = Client.from_config() >>> # code that uses client >>> client.close()
def is_causal_sink(graph: BELGraph, node: BaseEntity) -> bool: return has_causal_in_edges(graph, node) and not has_causal_out_edges(graph, node)
Return true if the node is a causal sink. - Does have causal in edge(s) - Doesn't have any causal out edge(s)
def verify_signature(public_key, signature, hash, hash_algo): hash_algo = _hash_algorithms[hash_algo] try: return get_publickey(public_key).verify( signature, hash, padding.PKCS1v15(), utils.Prehashed(hash_algo), ) is None except InvalidSignature: return False
Verify the given signature is correct for the given hash and public key. Args: public_key (str): PEM encoded public key signature (bytes): signature to verify hash (bytes): hash of data hash_algo (str): hash algorithm used Returns: True if the signature is valid, False otherwise
def method2jpg(output, mx, raw=False): buff = raw if not raw: buff = method2dot(mx) method2format(output, "jpg", mx, buff)
Export method to a jpg file format :param output: output filename :type output: string :param mx: specify the MethodAnalysis object :type mx: :class:`MethodAnalysis` object :param raw: use directly a dot raw buffer (optional) :type raw: string
def compute_search_volume_in_bins(found, total, ndbins, sim_to_bins_function): eff, err = compute_search_efficiency_in_bins( found, total, ndbins, sim_to_bins_function) dx = ndbins[0].upper() - ndbins[0].lower() r = ndbins[0].centres() vol = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:])) errors = bin_utils.BinnedArray(bin_utils.NDBins(ndbins[1:])) vol.array = numpy.trapz(eff.array.T * 4. * numpy.pi * r**2, r, dx) errors.array = numpy.sqrt( ((4 * numpy.pi * r**2 * err.array.T * dx)**2).sum(axis=-1) ) return vol, errors
Calculate search sensitive volume by integrating efficiency in distance bins No cosmological corrections are applied: flat space is assumed. The first dimension of ndbins must be bins over injected distance. sim_to_bins_function must maps an object to a tuple indexing the ndbins.
def reset_index(self, level=None, drop=False, name=None, inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = ibase.default_index(len(self)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if inplace: self.index = new_index self.name = name or self.name else: return self._constructor(self._values.copy(), index=new_index).__finalize__(self) elif inplace: raise TypeError('Cannot reset_index inplace on a Series ' 'to create a DataFrame') else: df = self.to_frame(name) return df.reset_index(level=level, drop=drop)
Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). Returns ------- Series or DataFrame When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 To update the Series in place, without generating a new one set `inplace` to True. Note that it also requires ``drop=True``. >>> s.reset_index(inplace=True, drop=True) >>> s 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3
def _setup_metric_group_definitions(self): metric_group_definitions = dict() for mg_info in self.properties['metric-group-infos']: mg_name = mg_info['group-name'] mg_def = MetricGroupDefinition( name=mg_name, resource_class=_resource_class_from_group(mg_name), metric_definitions=dict()) for i, m_info in enumerate(mg_info['metric-infos']): m_name = m_info['metric-name'] m_def = MetricDefinition( index=i, name=m_name, type=_metric_type(m_info['metric-type']), unit=_metric_unit_from_name(m_name)) mg_def.metric_definitions[m_name] = m_def metric_group_definitions[mg_name] = mg_def return metric_group_definitions
Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property.
def _ValidateValue(value, type_check): if inspect.isclass(type_check): return isinstance(value, type_check) if isinstance(type_check, tuple): return _ValidateTuple(value, type_check) elif callable(type_check): return type_check(value) else: raise TypeError("Invalid type check '%s'" % repr(type_check))
Validate a single value with type_check.
def getAspect(obj1, obj2, aspList): ap = _getActivePassive(obj1, obj2) aspDict = _aspectDict(ap['active'], ap['passive'], aspList) if not aspDict: aspDict = { 'type': const.NO_ASPECT, 'orb': 0, 'separation': 0, } aspProp = _aspectProperties(ap['active'], ap['passive'], aspDict) return Aspect(aspProp)
Returns an Aspect object for the aspect between two objects considering a list of possible aspect types.
def get_formset(self, request, obj=None, **kwargs): FormSet = super(TranslatableInlineModelAdmin, self).get_formset(request, obj, **kwargs) FormSet.language_code = self.get_form_language(request, obj) if self.inline_tabs: available_languages = self.get_available_languages(obj, FormSet) FormSet.language_tabs = self.get_language_tabs(request, obj, available_languages, css_class='parler-inline-language-tabs') FormSet.language_tabs.allow_deletion = self._has_translatable_parent_model() return FormSet
Return the formset, and provide the language information to the formset.
def cmd(self, cmd, verbose=False): command = cmd.format(maildir=self.directory) if verbose: print(command) p = Popen([ "ssh", "-T", self.host, command ], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() return stdout
Executes the specified command on the remote host. The cmd must be format safe, this means { and } must be doubled, thusly: echo /var/local/maildir/{{cur,new}} the cmd can include the format word 'maildir' to be replaced by self.directory. eg: echo {maildir}/{{cur,new}}
def put_pipeline_definition(pipeline_id, pipeline_objects, parameter_objects=None, parameter_values=None, region=None, key=None, keyid=None, profile=None): parameter_objects = parameter_objects or [] parameter_values = parameter_values or [] client = _get_client(region, key, keyid, profile) r = {} try: response = client.put_pipeline_definition( pipelineId=pipeline_id, pipelineObjects=pipeline_objects, parameterObjects=parameter_objects, parameterValues=parameter_values, ) if response['errored']: r['error'] = response['validationErrors'] else: r['result'] = response except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: r['error'] = six.text_type(e) return r
Add tasks, schedules, and preconditions to the specified pipeline. This function is idempotent and will replace an existing definition. CLI example: .. code-block:: bash salt myminion boto_datapipeline.put_pipeline_definition my_pipeline_id my_pipeline_objects
def add_query_params(url, params): def encode(s): return force_bytes(s, settings.DEFAULT_CHARSET) params = dict([(encode(k), encode(v)) for k, v in params.items() if v]) parts = list(urlparse(url)) query = dict(parse_qsl(parts[4])) query.update(params) parts[4] = urlencode(query) return urlunparse(parts)
Inject additional query parameters into an existing URL. If parameters already exist with the same name, they will be overwritten. Parameters with empty values are ignored. Return the modified URL as a string.
def request(uri, method, data, token=''): socket.setdefaulttimeout(__timeout__) obj = urllib.build_opener(urllib.HTTPHandler) encoded = json.JSONEncoder(object).encode(data) data_utf8 = encoded.encode('utf-8') req = urllib.Request(uri, data=data_utf8) if encoded: req.add_header('Content-Type', 'application/json') if token: req.add_header('x-authentication-token', token) req.get_method = lambda: method try: res = obj.open(req) return res except urllib.URLError as e: sys.stderr.write("ERROR: %s\n" % e) exit(1) except urllib.HTTPError as e: sys.stderr.write("ERROR: %s\n" % e) exit(1)
Request to TonicDNS API. Arguments: uri: TonicDNS API URI method: TonicDNS API request method data: Post data to TonicDNS API token: TonicDNS API authentication token
def stem_leaf_plot(data, vmin, vmax, bins, digit=1, title=None): assert bins > 0 range = vmax - vmin step = range * 1. / bins if isinstance(range, int): step = int(ceil(step)) step = step or 1 bins = np.arange(vmin, vmax + step, step) hist, bin_edges = np.histogram(data, bins=bins) bin_edges = bin_edges[:len(hist)] asciiplot(bin_edges, hist, digit=digit, title=title) print("Last bin ends in {0}, inclusive.".format(vmax), file=sys.stderr) return bin_edges, hist
Generate stem and leaf plot given a collection of numbers
def requiredGPU_MB(self, n): from darknet.core import darknet_with_cuda if (darknet_with_cuda()): free = getFreeGPU_MB() print("Yolo: requiredGPU_MB: required, free", n, free) if (free == -1): return True return (free>=n) else: return True
Required GPU memory in MBytes
def snippets(self): return [strip_suffix(f, '.yaml') for f in self._stripped_files if self._snippets_pattern.match(f)]
Get all snippets in this DAP
def covariance(self): mu = self.moments_central if mu[0, 0] != 0: m = mu / mu[0, 0] covariance = self._check_covariance( np.array([[m[0, 2], m[1, 1]], [m[1, 1], m[2, 0]]])) return covariance * u.pix**2 else: return np.empty((2, 2)) * np.nan * u.pix**2
The covariance matrix of the 2D Gaussian function that has the same second-order moments as the source.
def _init_sub_dsp(self, dsp, fringe, outputs, no_call, initial_dist, index, full_name): sol = self.__class__( dsp, {}, outputs, False, None, None, no_call, False, wait_in=self._wait_in.get(dsp, None), index=self.index + index, full_name=full_name ) sol.sub_sol = self.sub_sol for f in sol.fringe: item = (initial_dist + f[0], (2,) + f[1][1:], f[-1]) heapq.heappush(fringe, item) return sol
Initialize the dispatcher as sub-dispatcher and update the fringe. :param fringe: Heapq of closest available nodes. :type fringe: list[(float | int, bool, (str, Dispatcher)] :param outputs: Ending data nodes. :type outputs: list[str], iterable :param no_call: If True data node estimation function is not used. :type no_call: bool
def node_from_nid(self, nid): for node in self.iflat_nodes(): if node.node_id == nid: return node raise ValueError("Cannot find node with node id: %s" % nid)
Return the node in the `Flow` with the given `nid` identifier
def showLicense(): print("Trying to recover the contents of the license...\n") try: text = urllib.urlopen(LICENSE_URL).read() print("License retrieved from " + emphasis(LICENSE_URL) + ".") raw_input("\n\tPress " + emphasis("<ENTER>") + " to print it.\n") print(text) except: print(warning("The license could not be downloaded and printed."))
Method that prints the license if requested. It tries to find the license online and manually download it. This method only prints its contents in plain text.
def _get_user_agent(self): user_agent = request.headers.get('User-Agent') if user_agent: user_agent = user_agent.encode('utf-8') return user_agent or ''
Retrieve the request's User-Agent, if available. Taken from Flask Login utils.py.
def add_errors(self, *errors: Union[BaseSchemaError, SchemaErrorCollection]) -> None: for error in errors: self._error_cache.add(error)
Adds errors to the error store for the schema
def get_forum(self): pk = self.kwargs.get(self.forum_pk_url_kwarg, None) if not pk: return if not hasattr(self, '_forum'): self._forum = get_object_or_404(Forum, pk=pk) return self._forum
Returns the considered forum.
def _compute_inter_event_std(self, C, C_PGA, pga1100, mag, vs30): tau_0 = self._compute_std_0(C['s3'], C['s4'], mag) tau_b_pga = self._compute_std_0(C_PGA['s3'], C_PGA['s4'], mag) delta_amp = self._compute_partial_derivative_site_amp(C, pga1100, vs30) std_inter = np.sqrt(tau_0 ** 2 + (delta_amp ** 2) * (tau_b_pga ** 2) + 2 * delta_amp * tau_0 * tau_b_pga * C['rho']) return std_inter
Compute inter event standard deviation, equation 25, page 82.
def init_backends(): global _BACKENDS, _ACTIVE_BACKENDS try: from .cffi_backend import CFFIBackend except ImportError: pass else: _BACKENDS.append(CFFIBackend) from .ctypes_backend import CTypesBackend from .null_backend import NullBackend _BACKENDS.append(CTypesBackend) _ACTIVE_BACKENDS = _BACKENDS[:] _BACKENDS.append(NullBackend)
Loads all backends
def babel_extract(config, input, output, target, keywords): click.echo( click.style( "Starting Extractions config:{0} input:{1} output:{2} keywords:{3}".format( config, input, output, keywords ), fg="green", ) ) keywords = " -k ".join(keywords) os.popen( "pybabel extract -F {0} -k {1} -o {2} {3}".format( config, keywords, output, input ) ) click.echo(click.style("Starting Update target:{0}".format(target), fg="green")) os.popen("pybabel update -N -i {0} -d {1}".format(output, target)) click.echo(click.style("Finish, you can start your translations", fg="green"))
Babel, Extracts and updates all messages marked for translation
def _restore_counts_maps(self): for c in self.components: c.restore_counts_maps() if hasattr(self.like.components[0].logLike, 'setCountsMap'): self._init_roi_model() else: self.write_xml('tmp') self._like = SummedLikelihood() for i, c in enumerate(self._components): c._create_binned_analysis() self._like.addComponent(c.like) self._init_roi_model() self.load_xml('tmp')
Revert counts maps to their state prior to injecting any simulated components.
def getConfig(self): config = {} config["name"] = self.city config["intervals"] = self.__intervals config["last_date"] = self.__lastDay config["excludedUsers"] = [] config["excludedLocations"] = [] for e in self.__excludedUsers: config["excludedUsers"].append(e) for e in self.__excludedLocations: config["excludedLocations"].append(e) config["locations"] = self.__locations return config
Return the configuration of the city. :return: configuration of the city. :rtype: dict.
def compute_texptime(imageObjectList): expnames = [] exptimes = [] start = [] end = [] for img in imageObjectList: expnames += img.getKeywordList('_expname') exptimes += img.getKeywordList('_exptime') start += img.getKeywordList('_expstart') end += img.getKeywordList('_expend') exptime = 0. expstart = min(start) expend = max(end) exposure = None for n in range(len(expnames)): if expnames[n] != exposure: exposure = expnames[n] exptime += exptimes[n] return (exptime,expstart,expend)
Add up the exposure time for all the members in the pattern, since 'drizzle' doesn't have the necessary information to correctly set this itself.
def _readintle(self, length, start): ui = self._readuintle(length, start) if not ui >> (length - 1): return ui tmp = (~(ui - 1)) & ((1 << length) - 1) return -tmp
Read bits and interpret as a little-endian signed int.
def load_robots_txt(self, url_info: URLInfo, text: str): key = self.url_info_key(url_info) parser = robotexclusionrulesparser.RobotExclusionRulesParser() parser.parse(text) self._parsers[key] = parser
Load the robot.txt file.
def _list(self, foldername="INBOX", reverse=False, since=None): folder = self.folder \ if foldername == "INBOX" \ else self._getfolder(foldername) def sortcmp(d): try: return d[1].date except: return -1 lst = folder.items() if not since else folder.items_since(since) sorted_lst = sorted(lst, key=sortcmp, reverse=1 if reverse else 0) itemlist = [(folder, key, msg) for key,msg in sorted_lst] return itemlist
Do structured list output. Sorts the list by date, possibly reversed, filtered from 'since'. The returned list is: foldername, message key, message object
def edge_val_set(self, graph, orig, dest, idx, key, branch, turn, tick, value): if (branch, turn, tick) in self._btts: raise TimeError self._btts.add((branch, turn, tick)) graph, orig, dest, key, value = map(self.pack, (graph, orig, dest, key, value)) self._edgevals2set.append( (graph, orig, dest, idx, key, branch, turn, tick, value) )
Set this key of this edge to this value.
def _render(template, callable_, args, data, as_unicode=False): if as_unicode: buf = util.FastEncodingBuffer(as_unicode=True) elif template.bytestring_passthrough: buf = compat.StringIO() else: buf = util.FastEncodingBuffer( as_unicode=as_unicode, encoding=template.output_encoding, errors=template.encoding_errors) context = Context(buf, **data) context._outputting_as_unicode = as_unicode context._set_with_template(template) _render_context(template, callable_, context, *args, **_kwargs_for_callable(callable_, data)) return context._pop_buffer().getvalue()
create a Context and return the string output of the given template and template callable.
def children(self, vertex): return [self.head(edge) for edge in self.out_edges(vertex)]
Return the list of immediate children of the given vertex.
def help_cli_search(self): help = '%sSearch: %s returns sample_sets, a sample_set is a set/list of md5s.' % (color.Yellow, color.Green) help += '\n\n\t%sSearch for all samples in the database that are known bad pe files,' % (color.Green) help += '\n\t%sthis command returns the sample_set containing the matching items'% (color.Green) help += '\n\t%s> my_bad_exes = search([\'bad\', \'exe\'])' % (color.LightBlue) help += '\n\n\t%sRun workers on this sample_set:' % (color.Green) help += '\n\t%s> pe_outputs = pe_features(my_bad_exes) %s' % (color.LightBlue, color.Normal) help += '\n\n\t%sLoop on the generator (or make a DataFrame see >help dataframe)' % (color.Green) help += '\n\t%s> for output in pe_outputs: %s' % (color.LightBlue, color.Normal) help += '\n\t\t%s print output %s' % (color.LightBlue, color.Normal) return help
Help for Workbench CLI Search
def serialize_for_transport(obj_pyxb, pretty=False, strip_prolog=False, xslt_url=None): return serialize_gen(obj_pyxb, 'utf-8', pretty, strip_prolog, xslt_url)
Serialize PyXB object to XML ``bytes`` with UTF-8 encoding for transport over the network, filesystem storage and other machine usage. Args: obj_pyxb: PyXB object PyXB object to serialize. pretty: bool True: Use pretty print formatting for human readability. strip_prolog: True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``), from the resulting XML doc. xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: bytes: UTF-8 encoded XML document See Also: ``serialize_for_display()``
def rounder(input_number, digit=5): if isinstance(input_number, tuple): tuple_list = list(input_number) tuple_str = [] for i in tuple_list: if isfloat(i): tuple_str.append(str(numpy.around(i, digit))) else: tuple_str.append(str(i)) return "(" + ",".join(tuple_str) + ")" if isfloat(input_number): return str(numpy.around(input_number, digit)) return str(input_number)
Round input number and convert to str. :param input_number: input number :type input_number : anything :param digit: scale (the number of digits to the right of the decimal point in a number.) :type digit : int :return: round number as str
def all_sharded_cluster_links(cluster_id, shard_id=None, router_id=None, rel_to=None): return [ sharded_cluster_link(rel, cluster_id, shard_id, router_id, self_rel=(rel == rel_to)) for rel in ( 'get-sharded-clusters', 'get-sharded-cluster-info', 'sharded-cluster-command', 'delete-sharded-cluster', 'add-shard', 'get-shards', 'get-configsvrs', 'get-routers', 'add-router' ) ]
Get a list of all links to be included with ShardedClusters.
def AllTypes(): return [AssetType.CreditFlag, AssetType.DutyFlag, AssetType.GoverningToken, AssetType.UtilityToken, AssetType.Currency, AssetType.Share, AssetType.Invoice, AssetType.Token]
Get a list of all available asset types. Returns: list: of AssetType items.
def choose_form(self, number=None, xpath=None, name=None, **kwargs): id_ = kwargs.pop('id', None) if id_ is not None: try: self._lxml_form = self.select('//form[@id="%s"]' % id_).node() except IndexError: raise DataNotFound("There is no form with id: %s" % id_) elif name is not None: try: self._lxml_form = self.select( '//form[@name="%s"]' % name).node() except IndexError: raise DataNotFound('There is no form with name: %s' % name) elif number is not None: try: self._lxml_form = self.tree.forms[number] except IndexError: raise DataNotFound('There is no form with number: %s' % number) elif xpath is not None: try: self._lxml_form = self.select(xpath).node() except IndexError: raise DataNotFound( 'Could not find form with xpath: %s' % xpath) else: raise GrabMisuseError('choose_form methods requires one of ' '[number, id, name, xpath] arguments')
Set the default form. :param number: number of form (starting from zero) :param id: value of "id" attribute :param name: value of "name" attribute :param xpath: XPath query :raises: :class:`DataNotFound` if form not found :raises: :class:`GrabMisuseError` if method is called without parameters Selected form will be available via `form` attribute of `Grab` instance. All form methods will work with default form. Examples:: # Select second form g.choose_form(1) # Select by id g.choose_form(id="register") # Select by name g.choose_form(name="signup") # Select by xpath g.choose_form(xpath='//form[contains(@action, "/submit")]')
def _init_deferred_buffers(self): self._transfer_list = collections.deque() self._crnt_cmd = _Command(self._packet_size) self._commands_to_read = collections.deque() self._command_response_buf = bytearray()
Initialize or reinitalize all the deferred transfer buffers Calling this method will drop all pending transactions so use with care.
def validate(self, value): if isinstance(value, (str, unicode)) and not re.match(self.__pattern, value): raise orb.errors.ColumnValidationError(self, 'The email provided is not valid.') else: return super(EmailColumn, self).validate(value)
Validates the value provided is a valid email address, at least, on paper. :param value: <str> :return: <bool>
def publish(self, topic, message=None, qos=0, retain=False): logger.info('Publish topic: %s, message: %s, qos: %s, retain: %s' % (topic, message, qos, retain)) self._mid = -1 self._mqttc.on_publish = self._on_publish result, mid = self._mqttc.publish(topic, message, int(qos), retain) if result != 0: raise RuntimeError('Error publishing: %s' % result) timer_start = time.time() while time.time() < timer_start + self._loop_timeout: if mid == self._mid: break; self._mqttc.loop() if mid != self._mid: logger.warn('mid wasn\'t matched: %s' % mid)
Publish a message to a topic with specified qos and retained flag. It is required that a connection has been established using `Connect` keyword before using this keyword. `topic` topic to which the message will be published `message` message payload to publish `qos` qos of the message `retain` retained flag Examples: | Publish | test/test | test message | 1 | ${false} |
def iterate(self, train=None, valid=None, max_updates=None, **kwargs): r self._compile(**kwargs) if valid is None: valid = train iteration = 0 training = validation = None while max_updates is None or iteration < max_updates: if not iteration % self.validate_every: try: validation = self.evaluate(valid) except KeyboardInterrupt: util.log('interrupted!') break if self._test_patience(validation): util.log('patience elapsed!') break try: training = self._step(train) except KeyboardInterrupt: util.log('interrupted!') break iteration += 1 self._log(training, iteration) yield training, validation self.set_params('best')
r'''Optimize a loss iteratively using a training and validation dataset. This method yields a series of monitor values to the caller. After every optimization epoch, a pair of monitor dictionaries is generated: one evaluated on the training dataset during the epoch, and another evaluated on the validation dataset at the most recent validation epoch. The validation monitors might not be updated during every optimization iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Additional keyword arguments supplied here will set the global optimizer attributes. Parameters ---------- train : sequence or :class:`Dataset <downhill.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : sequence or :class:`Dataset <downhill.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Defaults to the training data. max_updates : int, optional If specified, halt optimization after this many gradient updates have been processed. If not provided, uses early stopping to decide when to halt. Yields ------ train_monitors : dict A dictionary mapping monitor names to values, evaluated on the training dataset. valid_monitors : dict A dictionary containing monitor values evaluated on the validation dataset.
def _fill_untouched(idx, ret, fill_value): untouched = np.ones_like(ret, dtype=bool) untouched[idx] = False ret[untouched] = fill_value
any elements of ret not indexed by idx are set to fill_value.
def GetAnalyzersInformation(cls): analyzer_information = [] for _, analyzer_class in cls.GetAnalyzers(): description = getattr(analyzer_class, 'DESCRIPTION', '') analyzer_information.append((analyzer_class.NAME, description)) return analyzer_information
Retrieves the analyzers information. Returns: list[tuple]: containing: str: analyzer name. str: analyzer description.
def _handle_tag_definetext2(self): obj = _make_object("DefineText2") self._generic_definetext_parser(obj, self._get_struct_rgba) return obj
Handle the DefineText2 tag.
def _update_length(self): action_length = 4 + len(self.field.pack()) overflow = action_length % 8 self.length = action_length if overflow: self.length = action_length + 8 - overflow
Update the length field of the struct.
def parse_properties(node): d = dict() for child in node.findall('properties'): for subnode in child.findall('property'): cls = None try: if "type" in subnode.keys(): module = importlib.import_module('builtins') cls = getattr(module, subnode.get("type")) except AttributeError: logger.info("Type [} Not a built-in type. Defaulting to string-cast.") d[subnode.get('name')] = cls(subnode.get('value')) if cls is not None else subnode.get('value') return d
Parse a Tiled xml node and return a dict that represents a tiled "property" :param node: etree element :return: dict
def imports(): from .core.import_hooks import ExtensionImporter importer = ExtensionImporter() sys.meta_path.append(importer) yield sys.meta_path.remove(importer)
Install the import hook to load python extensions from app's lib folder during the context of this block. This method is preferred as it's faster than using install.