code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
async def parse_get_revoc_reg_def_response(get_revoc_ref_def_response: str) -> (str, str): logger = logging.getLogger(__name__) logger.debug("parse_get_revoc_reg_def_response: >>> get_revoc_ref_def_response: %r", get_revoc_ref_def_response) if not hasattr(parse_get_revoc_reg_def_response, "cb"): logger.debug("parse_get_revoc_reg_def_response: Creating callback") parse_get_revoc_reg_def_response.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) c_get_revoc_ref_def_response = c_char_p(get_revoc_ref_def_response.encode('utf-8')) (revoc_reg_def_id, revoc_reg_def_json) = await do_call('indy_parse_get_revoc_reg_def_response', c_get_revoc_ref_def_response, parse_get_revoc_reg_def_response.cb) res = (revoc_reg_def_id.decode(), revoc_reg_def_json.decode()) logger.debug("parse_get_revoc_reg_def_response: <<< res: %r", res) return res
Parse a GET_REVOC_REG_DEF response to get Revocation Registry Definition in the format compatible with Anoncreds API. :param get_revoc_ref_def_response: response of GET_REVOC_REG_DEF request. :return: Revocation Registry Definition Id and Revocation Registry Definition json. { "id": string - ID of the Revocation Registry, "revocDefType": string - Revocation Registry type (only CL_ACCUM is supported for now), "tag": string - Unique descriptive ID of the Registry, "credDefId": string - ID of the corresponding CredentialDefinition, "value": Registry-specific data { "issuanceType": string - Type of Issuance(ISSUANCE_BY_DEFAULT or ISSUANCE_ON_DEMAND), "maxCredNum": number - Maximum number of credentials the Registry can serve. "tailsHash": string - Hash of tails. "tailsLocation": string - Location of tails file. "publicKeys": <public_keys> - Registry's public key. }, "ver": string - version of revocation registry definition json. }
def update(self): console = self.console aux = self.aux state = yield from self._get_container_state() yield from self.reset() yield from self.create() self.console = console self.aux = aux if state == "running": yield from self.start()
Destroy an recreate the container with the new settings
def new_method_return(self) : "creates a new DBUS.MESSAGE_TYPE_METHOD_RETURN that is a reply to this Message." result = dbus.dbus_message_new_method_return(self._dbobj) if result == None : raise CallFailed("dbus_message_new_method_return") return \ type(self)(result)
creates a new DBUS.MESSAGE_TYPE_METHOD_RETURN that is a reply to this Message.
def hull_moving_average(data, period): catch_errors.check_for_period_error(data, period) hma = wma( 2 * wma(data, int(period/2)) - wma(data, period), int(np.sqrt(period)) ) return hma
Hull Moving Average. Formula: HMA = WMA(2*WMA(n/2) - WMA(n)), sqrt(n)
def get_similar_items(self, items=None, k=10, verbose=False): if items is None: get_all_items = True items = _SArray() else: get_all_items = False if isinstance(items, list): items = _SArray(items) def check_type(arg, arg_name, required_type, allowed_types): if not isinstance(arg, required_type): raise TypeError("Parameter " + arg_name + " must be of type(s) " + (", ".join(allowed_types) ) + "; Type '" + str(type(arg)) + "' not recognized.") check_type(items, "items", _SArray, ["SArray", "list"]) check_type(k, "k", int, ["int"]) return self.__proxy__.get_similar_items(items, k, verbose, get_all_items)
Get the k most similar items for each item in items. Each type of recommender has its own model for the similarity between items. For example, the item_similarity_recommender will return the most similar items according to the user-chosen similarity; the factorization_recommender will return the nearest items based on the cosine similarity between latent item factors. Parameters ---------- items : SArray or list; optional An :class:`~turicreate.SArray` or list of item ids for which to get similar items. If 'None', then return the `k` most similar items for all items in the training set. k : int, optional The number of similar items for each item. verbose : bool, optional Progress printing is shown. Returns ------- out : SFrame A SFrame with the top ranked similar items for each item. The columns `item`, 'similar', 'score' and 'rank', where `item` matches the item column name specified at training time. The 'rank' is between 1 and `k` and 'score' gives the similarity score of that item. The value of the score depends on the method used for computing item similarities. Examples -------- >>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"], 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]}) >>> m = turicreate.item_similarity_recommender.create(sf) >>> nn = m.get_similar_items()
def create_disk(name, size): ret = False cmd = 'vmctl create {0} -s {1}'.format(name, size) result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if result['retcode'] == 0: ret = True else: raise CommandExecutionError( 'Problem encountered creating disk image', info={'errors': [result['stderr']], 'changes': ret} ) return ret
Create a VMM disk with the specified `name` and `size`. size: Size in megabytes, or use a specifier such as M, G, T. CLI Example: .. code-block:: bash salt '*' vmctl.create_disk /path/to/disk.img size=10G
def _sethex(self, hexstring): hexstring = tidy_input_string(hexstring) hexstring = hexstring.replace('0x', '') length = len(hexstring) if length % 2: hexstring += '0' try: try: data = bytearray.fromhex(hexstring) except TypeError: data = bytearray.fromhex(unicode(hexstring)) except ValueError: raise CreationError("Invalid symbol in hex initialiser.") self._setbytes_unsafe(data, length * 4, 0)
Reset the bitstring to have the value given in hexstring.
def get_encodings(): encodings = [__salt_system_encoding__] try: sys_enc = sys.getdefaultencoding() except ValueError: sys_enc = None if sys_enc and sys_enc not in encodings: encodings.append(sys_enc) for enc in ['utf-8', 'latin-1']: if enc not in encodings: encodings.append(enc) return encodings
return a list of string encodings to try
def decrement(name, tags=None): def wrap(f): @wraps(f) def decorator(*args, **kwargs): stats = client() ret = f(*args, **kwargs) stats.decr(name, tags=tags) return ret return decorator return wrap
Function decorator for decrementing a statsd stat whenever a function is invoked. >>> from statsdecor.decorators import decrement >>> @decrement('my.metric') >>> def my_func(): >>> pass
def get_credit_notes_per_page(self, per_page=1000, page=1, params=None): return self._get_resource_per_page(resource=CREDIT_NOTES, per_page=per_page, page=page, params=params)
Get credit notes per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
def list_team_codes(): cleanlist = sorted(TEAM_DATA, key=lambda k: (k["league"]["name"], k["code"])) leaguenames = sorted(list(set([team["league"]["name"] for team in cleanlist]))) for league in leaguenames: teams = [team for team in cleanlist if team["league"]["name"] == league] click.secho(league, fg="green", bold=True) for team in teams: if team["code"] != "null": click.secho(u"{0}: {1}".format(team["code"], team["name"]), fg="yellow") click.secho("")
List team names in alphabetical order of team ID, per league.
def on_purchase_completed(self, mapping={'payload': 'payload','name':'name','status':'status','token':'token'}, convert={}, default={}): def decorator(f): self._intent_view_funcs['Connections.Response'] = f self._intent_mappings['Connections.Response'] = mapping self._intent_converts['Connections.Response'] = convert self._intent_defaults['Connections.Response'] = default @wraps(f) def wrapper(*args, **kwargs): self._flask_view_func(*args, **kwargs) return f return decorator
Decorator routes an Connections.Response to the wrapped function. Request is sent when Alexa completes the purchase flow. See https://developer.amazon.com/docs/in-skill-purchase/add-isps-to-a-skill.html#handle-results The wrapped view function may accept parameters from the Request. In addition to locale, requestId, timestamp, and type @ask.on_purchase_completed( mapping={'payload': 'payload','name':'name','status':'status','token':'token'}) def completed(payload, name, status, token): logger.info(payload) logger.info(name) logger.info(status) logger.info(token)
def related_to(self): params = [] constraints = self.in_constraints if self.is_constraint is not None: constraints.append(self.is_constraint) for constraint in constraints: for var in constraint._vars: param = var.get_parameter() if param not in params and param.uniqueid != self.uniqueid: params.append(param) return params
returns a list of all parameters that are either constrained by or constrain this parameter
def has_port_by_ref(self, port_ref): with self._mutex: if self.get_port_by_ref(self, port_ref): return True return False
Check if this component has a port by the given reference to a CORBA PortService object.
def assure_image(fnc): @wraps(fnc) def _wrapped(self, img, *args, **kwargs): if not isinstance(img, Image): img = self._manager.get(img) return fnc(self, img, *args, **kwargs) return _wrapped
Converts a image ID passed as the 'image' parameter to a image object.
def is_valid(self, name=None, debug=False): valid_tags = self.action_tree invalid = False for item in self.current_tree: try: if item in valid_tags or self.ALL_TAGS in valid_tags: valid_tags = valid_tags[item if item in valid_tags else self.ALL_TAGS] else: valid_tags = None invalid = True break except (KeyError, TypeError) as e: invalid = True break if debug: print name, not invalid and valid_tags is not None return not invalid and valid_tags is not None
Check to see if the current xml path is to be processed.
def update_status(self): task = self.make_request( TaskRunFailed, href=self.href) return Task(task)
Gets the current status of this task and returns a new task object. :raises TaskRunFailed: fail to update task status
def nmb_weights_hidden(self) -> int: nmb = 0 for idx_layer in range(self.nmb_layers-1): nmb += self.nmb_neurons[idx_layer] * self.nmb_neurons[idx_layer+1] return nmb
Number of hidden weights. >>> from hydpy import ANN >>> ann = ANN(None) >>> ann(nmb_inputs=2, nmb_neurons=(4, 3, 2), nmb_outputs=3) >>> ann.nmb_weights_hidden 18
def get_sampleS(self, res, DS=None, resMode='abs', ind=None, offsetIn=0., Out='(X,Y,Z)', Ind=None): if Ind is not None: assert self.dgeom['Multi'] kwdargs = dict(DS=DS, dSMode=resMode, ind=ind, DIn=offsetIn, VIn=self.dgeom['VIn'], VType=self.Id.Type, VLim=np.ascontiguousarray(self.Lim), nVLim=self.noccur, Out=Out, margin=1.e-9, Multi=self.dgeom['Multi'], Ind=Ind) args = [self.Poly, self.dgeom['P1Min'][0], self.dgeom['P1Max'][0], self.dgeom['P2Min'][1], self.dgeom['P2Max'][1], res] pts, dS, ind, reseff = _comp._Ves_get_sampleS(*args, **kwdargs) return pts, dS, ind, reseff
Sample, with resolution res, the surface defined by DS or ind An optionnal offset perpendicular to the surface can be used (offsetIn>0 => inwards) Parameters ---------- res : float / list of 2 floats Desired resolution of the surfacic sample float : same resolution for all directions of the sample list : [dl,dXPhi] where: dl : res. along polygon contours (cross-section) dXPhi : res. along axis (toroidal/linear direction) DS : None / list of 3 lists of 2 floats Limits of the domain in which the sample should be computed None : whole surface of the object list : [D1,D2,D3], where Di is a len()=2 list (increasing floats, setting limits along coordinate i) [DR,DZ,DPhi]: in toroidal geometry (self.Id.Type=='Tor') [DX,DY,DZ] : in linear geometry (self.Id.Type=='Lin') resMode : str Flag, specifies if res is absolute or relative to element sizes 'abs' : res is an absolute distance 'rel' : if res=0.1, each polygon segment is divided in 10, as is the toroidal/linear length ind : None / np.ndarray of int If provided, DS is ignored and the sample points corresponding to the provided indices are returned Example (assuming obj is a Ves object) > # We create a 5x5 cm2 sample of the whole surface > pts, dS, ind, reseff = obj.get_sample(0.05) > # Perform operations, save only the points indices (save space) > ... > # Retrieve the points from their indices (requires same res) > pts2, dS2, ind2, reseff2 = obj.get_sample(0.05, ind=ind) > np.allclose(pts,pts2) True offsetIn: float Offset distance from the actual surface of the object Inwards if positive Useful to avoid numerical errors Out : str Flag indicating the coordinate system of returned points e.g. : '(X,Y,Z)' or '(R,Z,Phi)' Ind : None / iterable of ints Array of indices of the entities to be considered (only when multiple entities, i.e.: self.nLim>1) Returns ------- pts : np.ndarray / list of np.ndarrays Sample points coordinates, as a (3,N) array. A list is returned if the object has multiple entities dS : np.ndarray / list of np.ndarrays The surface (in m^2) associated to each point ind : np.ndarray / list of np.ndarrays The index of each point reseff : np.ndarray / list of np.ndarrays Effective resolution in both directions after sample computation
def rolling_window(a, axis, window, center, fill_value): pads = [(0, 0) for s in a.shape] if center: start = int(window / 2) end = window - 1 - start pads[axis] = (start, end) else: pads[axis] = (window - 1, 0) a = np.pad(a, pads, mode='constant', constant_values=fill_value) return _rolling_window(a, window, axis)
rolling window with padding.
def check_dimensions(self, dataset): results = [] required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-orthogonal feature types') message = '{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).' message += ' If it\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have' message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a' message += ' coordinate variabel with dimension (z).' for variable in util.get_geophysical_variables(dataset): is_valid = util.is_timeseries_profile_single_station(dataset, variable) is_valid = is_valid or util.is_timeseries_profile_multi_station(dataset, variable) required_ctx.assert_true( is_valid, message.format(variable) ) results.append(required_ctx.to_result()) return results
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset. :param netCDF4.Dataset dataset: An open netCDF dataset
def tabLayout(self): self.childWindow.column += 1 if self.childWindow.column > Layout.BUTTONS_NUMBER: self.childWindow.column = 0 self.childWindow.row += 1
For all tabs, specify the number of buttons in a row
def add_mongo_config_simple(app, connection_string, collection_name): split_string = connection_string.split(":") config = {"host": "localhost", "port": 27017, "db": "sacred"} if len(split_string) > 0 and len(split_string[-1]) > 0: config["db"] = split_string[-1] if len(split_string) > 1: config["port"] = int(split_string[-2]) if len(split_string) > 2: config["host"] = split_string[-3] app.config["data"] = PyMongoDataAccess.build_data_access( config["host"], config["port"], config["db"], collection_name)
Configure the app to use MongoDB. :param app: Flask Application :type app: Flask :param connection_string: in format host:port:database or database (default: sacred) :type connection_string: str :param collection_name: Name of the collection :type collection_name: str
def draw(self): from calysto.display import display, clear_output canvas = self.render() clear_output(wait=True) display(canvas)
Render and draw the world and robots.
def unnest_collection(collection, df_list): for item in collection['link']['item']: if item['class'] == 'dataset': df_list.append(Dataset.read(item['href']).write('dataframe')) elif item['class'] == 'collection': nested_collection = request(item['href']) unnest_collection(nested_collection, df_list)
Unnest collection structure extracting all its datasets and converting \ them to Pandas Dataframes. Args: collection (OrderedDict): data in JSON-stat format, previously \ deserialized to a python object by \ json.load() or json.loads(), df_list (list): list variable which will contain the converted \ datasets. Returns: Nothing.
def handle_existing_user(self, provider, user, access, info): "Login user and redirect." login(self.request, user) return redirect(self.get_login_redirect(provider, user, access))
Login user and redirect.
def calcChebyshev(coeffs, validDomain, freqs): logger = logging.getLogger(__name__) domain = (validDomain[1] - validDomain[0])[0] bins = -1 + 2* n.array([ (freqs[i]-validDomain[0,i])/domain for i in range(len(freqs))]) ncoeffs = len(coeffs[0])/2 rr = n.array([n.polynomial.chebyshev.chebval(bins[i], coeffs[i,:ncoeffs]) for i in range(len(coeffs))]) ll = n.array([n.polynomial.chebyshev.chebval(bins[i], coeffs[i,ncoeffs:]) for i in range(len(coeffs))]) return rr,ll
Given a set of coefficients, this method evaluates a Chebyshev approximation. Used for CASA bandpass reading. input coeffs and freqs are numpy arrays
def install(*pkgs, **kwargs): attributes = kwargs.get('attributes', False) if not pkgs: return "Plese specify a package or packages to upgrade" cmd = _quietnix() cmd.append('--install') if kwargs.get('attributes', False): cmd.extend(_zip_flatten('--attr', pkgs)) else: cmd.extend(pkgs) out = _run(cmd) installs = list(itertools.chain.from_iterable( [s.split()[1:] for s in out['stderr'].splitlines() if s.startswith('installing')] )) return [_strip_quotes(s) for s in installs]
Installs a single or multiple packages via nix :type pkgs: list(str) :param pkgs: packages to update :param bool attributes: Pass the list of packages or single package as attribues, not package names. default: False :return: Installed packages. Example element: ``gcc-3.3.2`` :rtype: list(str) .. code-block:: bash salt '*' nix.install package [package2 ...] salt '*' nix.install attributes=True attr.name [attr.name2 ...]
def toFilename(url): urlp = urlparse(url) path = urlp.path if not path: path = "file_{}".format(int(time.time())) value = re.sub(r'[^\w\s\.\-]', '-', path).strip().lower() return re.sub(r'[-\s]+', '-', value).strip("-")[-200:]
gets url and returns filename
def compute_positive_association(self, visible, hidden_probs, hidden_states): if self.visible_unit_type == 'bin': positive = tf.matmul(tf.transpose(visible), hidden_states) elif self.visible_unit_type == 'gauss': positive = tf.matmul(tf.transpose(visible), hidden_probs) else: positive = None return positive
Compute positive associations between visible and hidden units. :param visible: visible units :param hidden_probs: hidden units probabilities :param hidden_states: hidden units states :return: positive association = dot(visible.T, hidden)
def FromArchive(cls, path, actions_dict, resources_dict, temp_dir=None): if not path.endswith(".ship"): raise ArgumentError("Attempted to unpack a recipe archive from a file that did not end in .ship", path=path) name = os.path.basename(path)[:-5] if temp_dir is None: temp_dir = tempfile.mkdtemp() extract_path = os.path.join(temp_dir, name) archive = zipfile.ZipFile(path, "r") archive.extractall(extract_path) recipe_yaml = os.path.join(extract_path, 'recipe_script.yaml') return cls.FromFile(recipe_yaml, actions_dict, resources_dict, name=name)
Create a RecipeObject from a .ship archive. This archive should have been generated from a previous call to iotile-ship -a <path to yaml file> or via iotile-build using autobuild_shiparchive(). Args: path (str): The path to the recipe file that we wish to load actions_dict (dict): A dictionary of named RecipeActionObject types that is used to look up all of the steps listed in the recipe file. resources_dict (dict): A dictionary of named RecipeResource types that is used to look up all of the shared resources listed in the recipe file. file_format (str): The file format of the recipe file. Currently we only support yaml. temp_dir (str): An optional temporary directory where this archive should be unpacked. Otherwise a system wide temporary directory is used.
def absent(name, driver=None): ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} volume = _find_volume(name) if not volume: ret['result'] = True ret['comment'] = 'Volume \'{0}\' already absent'.format(name) return ret try: ret['changes']['removed'] = __salt__['docker.remove_volume'](name) ret['result'] = True except Exception as exc: ret['comment'] = ('Failed to remove volume \'{0}\': {1}' .format(name, exc)) return ret
Ensure that a volume is absent. .. versionadded:: 2015.8.4 .. versionchanged:: 2017.7.0 This state was renamed from **docker.volume_absent** to **docker_volume.absent** name Name of the volume Usage Examples: .. code-block:: yaml volume_foo: docker_volume.absent
def hl_canvas2table_box(self, canvas, tag): self.treeview.clear_selection() cobj = canvas.get_object_by_tag(tag) if cobj.kind != 'rectangle': return canvas.delete_object_by_tag(tag, redraw=False) if self.maskhltag: try: canvas.delete_object_by_tag(self.maskhltag, redraw=True) except Exception: pass try: obj = canvas.get_object_by_tag(self.masktag) except Exception: return if obj.kind != 'compound': return if len(self._maskobjs) == 0: return for i, mobj in enumerate(self._maskobjs): mask1 = self._rgbtomask(mobj) rgbimage = mobj.get_image() mask2 = rgbimage.get_shape_mask(cobj) if np.any(mask1 & mask2): self._highlight_path(self._treepaths[i])
Highlight all masks inside user drawn box on table.
def get_keyword_query(self, **kw): query = dict() indexes = self.catalog.get_indexes() for k, v in kw.iteritems(): if k.lower() == "uid": k = "UID" if k.lower() == "portal_type": if v: v = _.to_list(v) if k not in indexes: logger.warn("Skipping unknown keyword parameter '%s=%s'" % (k, v)) continue if v is None: logger.warn("Skip None value in kw parameter '%s=%s'" % (k, v)) continue logger.debug("Adding '%s=%s' to query" % (k, v)) query[k] = v return query
Generates a query from the given keywords. Only known indexes make it into the generated query. :returns: Catalog query :rtype: dict
def copy_data_ext(self, model, field, dest=None, idx=None, astype=None): if not dest: dest = field assert dest not in self._states + self._algebs self.__dict__[dest] = self.read_data_ext( model, field, idx, astype=astype) if idx is not None: if len(idx) == self.n: self.link_to(model, idx, self.idx)
Retrieve the field of another model and store it as a field. :param model: name of the source model being a model name or a group name :param field: name of the field to retrieve :param dest: name of the destination field in ``self`` :param idx: idx of elements to access :param astype: type cast :type model: str :type field: str :type dest: str :type idx: list, matrix :type astype: None, list, matrix :return: None
def to_bed(call, sample, work_dir, calls, data): out_file = os.path.join(work_dir, "%s-%s-flat.bed" % (sample, call["variantcaller"])) if call.get("vrn_file") and not utils.file_uptodate(out_file, call["vrn_file"]): with file_transaction(data, out_file) as tx_out_file: convert_fn = CALLER_TO_BED.get(call["variantcaller"]) if convert_fn: vrn_file = call["vrn_file"] if call["variantcaller"] in SUBSET_BY_SUPPORT: ecalls = [x for x in calls if x["variantcaller"] in SUBSET_BY_SUPPORT[call["variantcaller"]]] if len(ecalls) > 0: vrn_file = _subset_by_support(call["vrn_file"], ecalls, data) convert_fn(vrn_file, call["variantcaller"], tx_out_file) if utils.file_exists(out_file): return out_file
Create a simplified BED file from caller specific input.
def vlog(self, msg, *args): if self.verbose: self.log(msg, *args)
Logs a message to stderr only if verbose is enabled.
def calc_sasa(dssp_df): infodict = {'ssb_sasa': dssp_df.exposure_asa.sum(), 'ssb_mean_rel_exposed': dssp_df.exposure_rsa.mean(), 'ssb_size': len(dssp_df)} return infodict
Calculation of SASA utilizing the DSSP program. DSSP must be installed for biopython to properly call it. Install using apt-get on Ubuntu or from: http://swift.cmbi.ru.nl/gv/dssp/ Input: PDB or CIF structure file Output: SASA (integer) of structure
def update(self, sensor, reading): parents = list(self._child_to_parents[sensor]) for parent in parents: self.recalculate(parent, (sensor,))
Update callback used by sensors to notify obervers of changes. Parameters ---------- sensor : :class:`katcp.Sensor` object The sensor whose value has changed. reading : (timestamp, status, value) tuple Sensor reading as would be returned by sensor.read()
def random_id(length): def char(): return random.choice(string.ascii_letters + string.digits) return "".join(char() for _ in range(length))
Generates a random ID of given length
def free_cache(ctx, *elts): for elt in elts: if isinstance(elt, Hashable): cache = __STATIC_ELEMENTS_CACHE__ else: cache = __UNHASHABLE_ELTS_CACHE__ elt = id(elt) if elt in cache: del cache[elt] if not elts: __STATIC_ELEMENTS_CACHE__.clear() __UNHASHABLE_ELTS_CACHE__.clear()
Free properties bound to input cached elts. If empty, free the whole cache.
def maybe_inspect_zip(models): r if not(is_zip_file(models)): return models if len(models) > 1: return models if len(models) < 1: raise AssertionError('No models at all') return zipfile.ZipFile(models[0]).namelist()
r''' Detect if models is a list of protocolbuffer files or a ZIP file. If the latter, then unzip it and return the list of protocolbuffer files that were inside.
def after_model_change(self, form, User, is_created): if is_created and form.notification.data is True: send_reset_password_instructions(User)
Send password instructions if desired.
def create_relationship(manager, handle_id, other_handle_id, rel_type): meta_type = get_node_meta_type(manager, handle_id) if meta_type == 'Location': return create_location_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Logical': return create_logical_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Relation': return create_relation_relationship(manager, handle_id, other_handle_id, rel_type) elif meta_type == 'Physical': return create_physical_relationship(manager, handle_id, other_handle_id, rel_type) other_meta_type = get_node_meta_type(manager, other_handle_id) raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
Makes a relationship from node to other_node depending on which meta_type the nodes are. Returns the relationship or raises NoRelationshipPossible exception.
def highlightBlock(self, string): prev_data = self.currentBlock().previous().userData() if prev_data is not None: self._lexer._saved_state_stack = prev_data.syntax_stack elif hasattr(self._lexer, '_saved_state_stack'): del self._lexer._saved_state_stack index = 0 for token, text in self._lexer.get_tokens(string): length = len(text) self.setFormat(index, length, self._get_format(token)) index += length if hasattr(self._lexer, '_saved_state_stack'): data = PygmentsBlockUserData( syntax_stack=self._lexer._saved_state_stack) self.currentBlock().setUserData(data) del self._lexer._saved_state_stack
Highlight a block of text.
def get_alt_lengths(self): out = [] for i in six.moves.range(len(self.genotype)): valid_alt = self.get_alt_length(individual=i) if not valid_alt: out.append(None) else: out.append(max(valid_alt)-len(self.ref)) return out
Returns the longest length of the variant. For deletions, return is negative, SNPs return 0, and insertions are +. None return corresponds to no variant in interval for specified individual
def beautify_file(self, path): error = False if(path == '-'): data = sys.stdin.read() result, error = self.beautify_string(data, '(stdin)') sys.stdout.write(result) else: data = self.read_file(path) result, error = self.beautify_string(data, path) if(data != result): if(self.check_only): if not error: error = (result != data) else: if(self.backup): self.write_file(path+'.bak', data) self.write_file(path, result) return error
Beautify bash script file.
def __cloudflare_list_zones(self, *, account, **kwargs): done = False zones = [] page = 1 while not done: kwargs['page'] = page response = self.__cloudflare_request(account=account, path='/zones', args=kwargs) info = response['result_info'] if 'total_pages' not in info or page == info['total_pages']: done = True else: page += 1 zones += response['result'] return zones
Helper function to list all zones registered in the CloudFlare system. Returns a `list` of the zones Args: account (:obj:`CloudFlareAccount`): A CloudFlare Account object **kwargs (`dict`): Extra arguments to pass to the API endpoint Returns: `list` of `dict`
def _get_vm_by_name(name, allDetails=False): vms = get_resources_vms(includeConfig=allDetails) if name in vms: return vms[name] log.info('VM with name "%s" could not be found.', name) return False
Since Proxmox works based op id's rather than names as identifiers this requires some filtering to retrieve the required information.
def network_lopf(network, snapshots=None, solver_name="glpk", solver_io=None, skip_pre=False, extra_functionality=None, solver_logfile=None, solver_options={}, keep_files=False, formulation="angles", ptdf_tolerance=0., free_memory={},extra_postprocessing=None): snapshots = _as_snapshots(network, snapshots) network_lopf_build_model(network, snapshots, skip_pre=skip_pre, formulation=formulation, ptdf_tolerance=ptdf_tolerance) if extra_functionality is not None: extra_functionality(network,snapshots) network_lopf_prepare_solver(network, solver_name=solver_name, solver_io=solver_io) return network_lopf_solve(network, snapshots, formulation=formulation, solver_logfile=solver_logfile, solver_options=solver_options, keep_files=keep_files, free_memory=free_memory, extra_postprocessing=extra_postprocessing)
Linear optimal power flow for a group of snapshots. Parameters ---------- snapshots : list or index slice A list of snapshots to optimise, must be a subset of network.snapshots, defaults to network.snapshots solver_name : string Must be a solver name that pyomo recognises and that is installed, e.g. "glpk", "gurobi" solver_io : string, default None Solver Input-Output option, e.g. "python" to use "gurobipy" for solver_name="gurobi" skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. extra_functionality : callable function This function must take two arguments `extra_functionality(network,snapshots)` and is called after the model building is complete, but before it is sent to the solver. It allows the user to add/change constraints and add/change the objective function. solver_logfile : None|string If not None, sets the logfile option of the solver. solver_options : dictionary A dictionary with additional options that get passed to the solver. (e.g. {'threads':2} tells gurobi to use only 2 cpus) keep_files : bool, default False Keep the files that pyomo constructs from OPF problem construction, e.g. .lp file - useful for debugging formulation : string Formulation of the linear power flow equations to use; must be one of ["angles","cycles","kirchhoff","ptdf"] ptdf_tolerance : float Value below which PTDF entries are ignored free_memory : set, default {'pyomo'} Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series data away while the solver runs (as a pickle to disk) and/or free `pyomo` data after the solution has been extracted. extra_postprocessing : callable function This function must take three arguments `extra_postprocessing(network,snapshots,duals)` and is called after the model has solved and the results are extracted. It allows the user to extract further information about the solution, such as additional shadow prices. Returns ------- None
def get_fitness(self, chromosome): fitness = self.fitness_cache.get(chromosome.dna) if fitness is None: fitness = self.eval_fitness(chromosome) self.fitness_cache[chromosome.dna] = fitness return fitness
Get the fitness score for a chromosome, using the cached value if available.
def run_shell(args: dict) -> int: if args.get('project_directory'): return run_batch(args) shell = CauldronShell() if in_project_directory(): shell.cmdqueue.append('open "{}"'.format(os.path.realpath(os.curdir))) shell.cmdloop() return 0
Run the shell sub command
def timeout(limit, handler): def wrapper(f): def wrapped_f(*args, **kwargs): old_handler = signal.getsignal(signal.SIGALRM) signal.signal(signal.SIGALRM, timeout_handler) signal.alarm(limit) try: res = f(*args, **kwargs) except Timeout: handler(limit, f, args, kwargs) else: return res finally: signal.signal(signal.SIGALRM, old_handler) signal.alarm(0) return wrapped_f return wrapper
A decorator ensuring that the decorated function tun time does not exceeds the argument limit. :args limit: the time limit :type limit: int :args handler: the handler function called when the decorated function times out. :type handler: callable Example: >>>def timeout_handler(limit, f, *args, **kwargs): ... print "{func} call timed out after {lim}s.".format( ... func=f.__name__, lim=limit) ... >>>@timeout(limit=5, handler=timeout_handler) ... def work(foo, bar, baz="spam") ... time.sleep(10) >>>work("foo", "bar", "baz") # time passes... work call timed out after 5s. >>>
def find_parent_id_for_component(self, component_id): cursor = self.db.cursor() sql = "SELECT parentResourceComponentId FROM ResourcesComponents WHERE resourceComponentId=%s" count = cursor.execute(sql, (component_id,)) if count > 0: return (ArchivistsToolkitClient.RESOURCE_COMPONENT, cursor.fetchone()) return ( ArchivistsToolkitClient.RESOURCE, self.find_resource_id_for_component(component_id), )
Given the ID of a component, returns the parent component's ID. :param string component_id: The ID of the component. :return: A tuple containing: * The type of the parent record; valid values are ArchivesSpaceClient.RESOURCE and ArchivesSpaceClient.RESOURCE_COMPONENT. * The ID of the parent record. :rtype tuple:
def unify_mp(b, partition_name): with b.progress.start('coalesce_mp',0,message="MP coalesce {}".format(partition_name)) as ps: r = b.unify_partition(partition_name, None, ps) return r
Unify all of the segment partitions for a parent partition, then run stats on the MPR file
def combine_ctrlpts_weights(ctrlpts, weights=None): if weights is None: weights = [1.0 for _ in range(len(ctrlpts))] ctrlptsw = [] for pt, w in zip(ctrlpts, weights): temp = [float(c * w) for c in pt] temp.append(float(w)) ctrlptsw.append(temp) return ctrlptsw
Multiplies control points by the weights to generate weighted control points. This function is dimension agnostic, i.e. control points can be in any dimension but weights should be 1D. The ``weights`` function parameter can be set to None to let the function generate a weights vector composed of 1.0 values. This feature can be used to convert B-Spline basis to NURBS basis. :param ctrlpts: unweighted control points :type ctrlpts: list, tuple :param weights: weights vector; if set to None, a weights vector of 1.0s will be automatically generated :type weights: list, tuple or None :return: weighted control points :rtype: list
def dump_stats(self, pattern): if not isinstance(pattern, basestring): raise TypeError("pattern can only be an instance of type basestring") self._call("dumpStats", in_p=[pattern])
Dumps VM statistics. in pattern of type str The selection pattern. A bit similar to filename globbing.
def up_alpha_beta(returns, factor_returns, **kwargs): return up(returns, factor_returns, function=alpha_beta_aligned, **kwargs)
Computes alpha and beta for periods when the benchmark return is positive. Parameters ---------- see documentation for `alpha_beta`. Returns ------- float Alpha. float Beta.
def put(self, item, block=True, timeout=None): if self.full(): if not block: raise Full() current = compat.getcurrent() waketime = None if timeout is None else time.time() + timeout if timeout is not None: scheduler.schedule_at(waketime, current) self._waiters.append((current, waketime)) scheduler.state.mainloop.switch() if timeout is not None: if not scheduler._remove_timer(waketime, current): self._waiters.remove((current, waketime)) raise Full() if self._waiters and not self.full(): scheduler.schedule(self._waiters.popleft()[0]) if not self._open_tasks: self._jobs_done.clear() self._open_tasks += 1 self._put(item)
put an item into the queue .. note:: if the queue was created with a `maxsize` and it is currently :meth:`full`, this method will block the calling coroutine until another coroutine :meth:`get`\ s an item. :param item: the object to put into the queue, can be any type :param block: whether to block if the queue is already :meth:`full` (default ``True``) :type block: bool :param timeout: the maximum time in seconds to block waiting. with the default of ``None``, it can wait indefinitely. this is unused if `block` is ``False``. :type timeout: int, float or None :raises: :class:`Full` if the queue is :meth:`full` and `block` is ``False``, or if `timeout` expires.
def _execfile(filename, globals, locals=None): mode = 'rb' if sys.version_info < (2, 7): mode += 'U' with open(filename, mode) as stream: script = stream.read() if locals is None: locals = globals code = compile(script, filename, 'exec') exec(code, globals, locals)
Python 3 implementation of execfile.
def get_ip_reports(self, ips): api_name = 'virustotal-ip-address-reports' (all_responses, ips) = self._bulk_cache_lookup(api_name, ips) responses = self._request_reports("ip", ips, 'ip-address/report') for ip, response in zip(ips, responses): if self._cache: self._cache.cache_value(api_name, ip, response) all_responses[ip] = response return all_responses
Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value.
def parse_mtl(mtl): if hasattr(mtl, 'decode'): mtl = mtl.decode('utf-8') mtllib = None mtllibs = [] for line in str.splitlines(str(mtl).strip()): line_split = line.strip().split() if len(line_split) <= 1: continue key = line_split[0] if key == 'newmtl': if mtllib: mtllibs.append(mtllib) mtllib = {'newmtl': line_split[1], 'map_Kd': None, 'Kd': None} elif key == 'map_Kd': mtllib[key] = line_split[1] elif key == 'Kd': mtllib[key] = [float(x) for x in line_split[1:]] if mtllib: mtllibs.append(mtllib) return mtllibs
Parse a loaded MTL file. Parameters ------------- mtl : str or bytes Data from an MTL file Returns ------------ mtllibs : list of dict Each dict has keys: newmtl, map_Kd, Kd
def spam(self, tag=None, fromdate=None, todate=None): return self.call("GET", "/stats/outbound/spam", tag=tag, fromdate=fromdate, todate=todate)
Gets a total count of recipients who have marked your email as spam.
def in_download_archive(track): global arguments if not arguments['--download-archive']: return archive_filename = arguments.get('--download-archive') try: with open(archive_filename, 'a+', encoding='utf-8') as file: logger.debug('Contents of {0}:'.format(archive_filename)) file.seek(0) track_id = '{0}'.format(track['id']) for line in file: logger.debug('"'+line.strip()+'"') if line.strip() == track_id: return True except IOError as ioe: logger.error('Error trying to read download archive...') logger.debug(ioe) return False
Returns True if a track_id exists in the download archive
def select_time(da, **indexer): if not indexer: selected = da else: key, val = indexer.popitem() time_att = getattr(da.time.dt, key) selected = da.sel(time=time_att.isin(val)).dropna(dim='time') return selected
Select entries according to a time period. Parameters ---------- da : xarray.DataArray Input data. **indexer : {dim: indexer, }, optional Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values, month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are considered. Returns ------- xr.DataArray Selected input values.
def get_image_code(self, id_code, access_token=None, user_id=None): if access_token: self.req.credential.set_token(access_token) if user_id: self.req.credential.set_user_id(user_id) if not self.check_credentials(): raise CredentialsError('credentials invalid') return self.req.get('/Codes/' + id_code + '/export/png/url')
Get the image of a code, by its id
def data_url(content, mimetype=None): if isinstance(content, pathlib.Path): if not mimetype: mimetype = guess_type(content.name)[0] with content.open('rb') as fp: content = fp.read() else: if isinstance(content, text_type): content = content.encode('utf8') return "data:{0};base64,{1}".format( mimetype or 'application/octet-stream', b64encode(content).decode())
Returns content encoded as base64 Data URI. :param content: bytes or str or Path :param mimetype: mimetype for :return: str object (consisting only of ASCII, though) .. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme
def get_composition_mdata(): return { 'children': { 'element_label': { 'text': 'children', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id[] object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': True, 'default_id_values': [], 'syntax': 'ID', 'id_set': [], }, }
Return default mdata map for Composition
def run(plugin_name, *args, **kwargs): plugindir = nago.settings.get_option('plugin_dir') plugin = plugindir + "/" + plugin_name if not os.path.isfile(plugin): raise ValueError("Plugin %s not found" % plugin) command = [plugin] + list(args) p = subprocess.Popen(command, stdout=subprocess.PIPE,stderr=subprocess.PIPE,) stdout, stderr = p.communicate('through stdin to stdout') result = {} result['stdout'] = stdout result['stderr'] = stderr result['return_code'] = p.returncode return result
Run a specific plugin
def stop(self): self._running = False if self._sleep_task: self._sleep_task.cancel() self._sleep_task = None
Stop listening.
def GameTypeEnum(ctx): return Enum( ctx, RM=0, Regicide=1, DM=2, Scenario=3, Campaign=4, KingOfTheHill=5, WonderRace=6, DefendTheWonder=7, TurboRandom=8 )
Game Type Enumeration.
def align(self,inputwords, outputwords): alignment = [] cursor = 0 for inputword in inputwords: if len(outputwords) > cursor and outputwords[cursor] == inputword: alignment.append(cursor) cursor += 1 elif len(outputwords) > cursor+1 and outputwords[cursor+1] == inputword: alignment.append(cursor+1) cursor += 2 else: alignment.append(None) cursor += 1 return alignment
For each inputword, provides the index of the outputword
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1, batch_norm_decay=0.9997, batch_norm_epsilon=0.001): with scopes.arg_scope([ops.conv2d, ops.fc], weight_decay=weight_decay): with scopes.arg_scope([ops.conv2d], stddev=stddev, activation=tf.nn.relu, batch_norm_params={ 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon}) as arg_scope: yield arg_scope
Yields the scope with the default parameters for inception_v3. Args: weight_decay: the weight decay for weights variables. stddev: standard deviation of the truncated guassian weight distribution. batch_norm_decay: decay for the moving average of batch_norm momentums. batch_norm_epsilon: small float added to variance to avoid dividing by zero. Yields: a arg_scope with the parameters needed for inception_v3.
def get_registered(option_hooks=None, event_hooks=None, command_hooks=None, root_access=None, task_active=True): plugins = [] for _, item in _registered: plugin, type_info = item if task_active: if type_info.get('disabled'): continue else: if plugin.options or plugin.task_only: continue if not option_hooks is None: if option_hooks != bool(type_info.get('option')): continue if not event_hooks is None: if event_hooks != bool(type_info.get('event')): continue if not command_hooks is None: if command_hooks != bool(type_info.get('command')): continue if not root_access is None: if root_access != plugin.needs_root: continue plugins.append(plugin) return plugins
Returns a generator of registered plugins matching filters. `option_hooks` Boolean to include or exclude plugins using option hooks. `event_hooks` Boolean to include or exclude task event plugins. `command_hooks` Boolean to include or exclude command plugins. `root_access` Boolean to include or exclude root plugins. `task_active` Set to ``False`` to not filter by task-based plugins. Returns list of ``Plugin`` instances.
def shortDescription(self): cd = getattr(self,'classDescription',None) if cd: sd = getattr(cd,'shortDescription','') d = getattr(cd,'description','') return sd if sd else d return ''
Overrides property from Event base class.
def handler_for(obj): for handler_type in handlers: if isinstance(obj, handler_type): return handlers[handler_type] try: for handler_type in handlers: if issubclass(obj, handler_type): return handlers[handler_type] except TypeError: pass
return the handler for the object type
def discard(self, element): try: i = int(element) set.discard(self, i) except ValueError: pass
Remove element from the RangeSet if it is a member. If the element is not a member, do nothing.
def is_russian(self): russian_chars = 0 for char in RUSSIAN_CHARS: if char in self.name: russian_chars += 1 return russian_chars > len(RUSSIAN_CHARS) / 2.0
Checks if file path is russian :return: True iff document has a russian name
def sync_db(): with cd('/'.join([deployment_root(),'env',env.project_fullname,'project',env.project_package_name,'sitesettings'])): venv = '/'.join([deployment_root(),'env',env.project_fullname,'bin','activate']) sites = _get_django_sites() site_ids = sites.keys() site_ids.sort() for site in site_ids: for settings_file in _sitesettings_files(): site_settings = '.'.join([env.project_package_name,'sitesettings',settings_file.replace('.py','')]) if env.verbosity: print " * django-admin.py syncdb --noinput --settings=%s"% site_settings output = sudo(' '.join(['source',venv,'&&',"django-admin.py syncdb --noinput --settings=%s"% site_settings]), user='site_%s'% site) if env.verbosity: print output
Runs the django syncdb command
def _set_repo_option(repo, option): if not option: return opt = option.split('=') if len(opt) != 2: return if opt[0] == 'trusted': repo['trusted'] = opt[1] == 'yes' else: repo[opt[0]] = opt[1]
Set the option to repo
def verify_axis_labels(self, expected, actual, source_name): if not getattr(self, '_checked_axis_labels', False): self._checked_axis_labels = defaultdict(bool) if not self._checked_axis_labels[source_name]: if actual is None: log.warning("%s instance could not verify (missing) axis " "expected %s, got None", self.__class__.__name__, expected) else: if expected != actual: raise AxisLabelsMismatchError("{} expected axis labels " "{}, got {} instead".format( self.__class__.__name__, expected, actual)) self._checked_axis_labels[source_name] = True
Verify that axis labels for a given source are as expected. Parameters ---------- expected : tuple A tuple of strings representing the expected axis labels. actual : tuple or None A tuple of strings representing the actual axis labels, or `None` if they could not be determined. source_name : str The name of the source being checked. Used for caching the results of checks so that the check is only performed once. Notes ----- Logs a warning in case of `actual=None`, raises an error on other mismatches.
def _handle_iorder(self, state): if self.opts['state_auto_order']: for name in state: for s_dec in state[name]: if not isinstance(s_dec, six.string_types): continue if not isinstance(state[name], dict): continue if not isinstance(state[name][s_dec], list): continue found = False if s_dec.startswith('_'): continue for arg in state[name][s_dec]: if isinstance(arg, dict): if arg: if next(six.iterkeys(arg)) == 'order': found = True if not found: if not isinstance(state[name][s_dec], list): continue state[name][s_dec].append( {'order': self.iorder} ) self.iorder += 1 return state
Take a state and apply the iorder system
def get_statements_noprior(self): stmt_lists = [v for k, v in self.stmts.items() if k != 'prior'] stmts = [] for s in stmt_lists: stmts += s return stmts
Return a list of all non-prior Statements in a single list. Returns ------- stmts : list[indra.statements.Statement] A list of all the INDRA Statements in the model (excluding the prior).
def create_fw_db(self, fw_id, fw_name, tenant_id): fw_dict = {'fw_id': fw_id, 'name': fw_name, 'tenant_id': tenant_id} self.update_fw_dict(fw_dict)
Create FW dict.
def build_dictionary(self): d = {} for t in self.all_tags_of_type(DefinitionTag, recurse_into_sprites = False): if t.characterId in d: raise ValueError('illegal redefinition of character') d[t.characterId] = t return d
Return a dictionary of characterIds to their defining tags.
def virtualchain_set_opfields( op, **fields ): for f in fields.keys(): if f not in indexer.RESERVED_KEYS: log.warning("Unsupported virtualchain field '%s'" % f) for f in fields.keys(): if f in indexer.RESERVED_KEYS: op[f] = fields[f] return op
Pass along virtualchain-reserved fields to a virtualchain operation. This layer of indirection is meant to help with future compatibility, so virtualchain implementations do not try to set operation fields directly.
def add(self, entity): do_append = self.__check_new(entity) if do_append: self.__entities.append(entity)
Adds the given entity to this cache. :param entity: Entity to add. :type entity: Object implementing :class:`everest.interfaces.IEntity`. :raises ValueError: If the ID of the entity to add is ``None`` (unless the `allow_none_id` constructor argument was set).
def access_token(self): access_token = generate_token(length=self.access_token_length[1]) token_secret = generate_token(self.secret_length) client_key = request.oauth.client_key self.save_access_token(client_key, access_token, request.oauth.resource_owner_key, secret=token_secret) return urlencode([(u'oauth_token', access_token), (u'oauth_token_secret', token_secret)])
Create an OAuth access token for an authorized client. Defaults to /access_token. Invoked by client applications.
def _extract_centerdistance(image, mask = slice(None), voxelspacing = None): image = numpy.array(image, copy=False) if None == voxelspacing: voxelspacing = [1.] * image.ndim centers = [(x - 1) / 2. for x in image.shape] indices = numpy.indices(image.shape, dtype=numpy.float) for dim_indices, c, vs in zip(indices, centers, voxelspacing): dim_indices -= c dim_indices *= vs return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel()
Internal, single-image version of `centerdistance`.
def reconfigure_log_level(self): if Global.LOGGER: Global.LOGGER.debug('reconfiguring logger level') stream_handlers = filter(lambda x: type(x) is logging.StreamHandler, self._logger_instance.handlers) for x in stream_handlers: x.level = Global.CONFIG_MANAGER.log_level return self.get_logger()
Returns a new standard logger instance
def getParameter(self, name): return lock_and_call( lambda: Parameter(self._impl.getParameter(name)), self._lock )
Get the parameter with the corresponding name. Args: name: Name of the parameter to be found. Raises: TypeError: if the specified parameter does not exist.
def mtr_tr_dense(sz): n = 2 ** sz hparams = mtf_bitransformer_base() hparams.d_model = 1024 hparams.max_length = 256 hparams.batch_size = 128 hparams.d_ff = int(4096 * n) hparams.d_kv = 128 hparams.encoder_num_heads = int(8 * n) hparams.decoder_num_heads = int(8 * n) hparams.learning_rate_decay_steps = 51400 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" hparams.label_smoothing = 0.1 hparams.layer_prepostprocess_dropout = 0.1 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 return hparams
Series of machine translation models. All models are trained on sequences of 256 tokens. You can use the dataset translate_enfr_wmt32k_packed. 154000 steps = 3 epochs. Args: sz: an integer Returns: a hparams
def get_conversations(self): cs = self.data["data"] res = [] for c in cs: res.append(Conversation(c)) return res
Returns list of Conversation objects
def add_url_rule(self, host, rule_string, endpoint, **options): rule = Rule(rule_string, host=host, endpoint=endpoint, **options) self.url_map.add(rule)
Add a url rule to the app instance. The url rule is the same with Flask apps and other Werkzeug apps. :param host: the matched hostname. e.g. "www.python.org" :param rule_string: the matched path pattern. e.g. "/news/<int:id>" :param endpoint: the endpoint name as a dispatching key such as the qualified name of the object.
def fit(self, X, y=None, **kwargs): super(PCADecomposition, self).fit(X=X, y=y, **kwargs) self.pca_transformer.fit(X) self.pca_components_ = self.pca_transformer.named_steps['pca'].components_ return self
Fits the PCA transformer, transforms the data in X, then draws the decomposition in either 2D or 3D space as a scatter plot. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features. y : ndarray or Series of length n An array or series of target or class values. Returns ------- self : visualizer Returns self for use in Pipelines
def make_fitness(function, greater_is_better): if not isinstance(greater_is_better, bool): raise ValueError('greater_is_better must be bool, got %s' % type(greater_is_better)) if function.__code__.co_argcount != 3: raise ValueError('function requires 3 arguments (y, y_pred, w),' ' got %d.' % function.__code__.co_argcount) if not isinstance(function(np.array([1, 1]), np.array([2, 2]), np.array([1, 1])), numbers.Number): raise ValueError('function must return a numeric.') return _Fitness(function, greater_is_better)
Make a fitness measure, a metric scoring the quality of a program's fit. This factory function creates a fitness measure object which measures the quality of a program's fit and thus its likelihood to undergo genetic operations into the next generation. The resulting object is able to be called with NumPy vectorized arguments and return a resulting floating point score quantifying the quality of the program's representation of the true relationship. Parameters ---------- function : callable A function with signature function(y, y_pred, sample_weight) that returns a floating point number. Where `y` is the input target y vector, `y_pred` is the predicted values from the genetic program, and sample_weight is the sample_weight vector. greater_is_better : bool Whether a higher value from `function` indicates a better fit. In general this would be False for metrics indicating the magnitude of the error, and True for metrics indicating the quality of fit.
def eval(self): if self.magic: return self.magic if not self.filename: return file_pattern.format(self.alias, self.ext) return self.path
Returns a filename to be used for script output.
def _receive(self): preamble = self._read(1) if not preamble: return None elif ord(preamble) != SBP_PREAMBLE: if self._verbose: print("Host Side Unhandled byte: 0x%02x" % ord(preamble)) return None hdr = self._readall(5) msg_crc = crc16(hdr) msg_type, sender, msg_len = struct.unpack("<HHB", hdr) data = self._readall(msg_len) msg_crc = crc16(data, msg_crc) crc = self._readall(2) crc, = struct.unpack("<H", crc) if crc != msg_crc: if self._verbose: print("crc mismatch: 0x%04X 0x%04X" % (msg_crc, crc)) return None msg = SBP(msg_type, sender, msg_len, data, crc) try: msg = self._dispatch(msg) except Exception as exc: warnings.warn("SBP dispatch error: %s" % (exc,)) return msg
Read and build SBP message.
def list_nodes(): ret = {} nodes = list_nodes_full() for node in nodes: ret[node] = { 'id': nodes[node]['UUID'], 'image': nodes[node]['Guest OS'], 'name': nodes[node]['Name'], 'state': None, 'private_ips': [], 'public_ips': [], } ret[node]['size'] = '{0} RAM, {1} CPU'.format( nodes[node]['Memory size'], nodes[node]['Number of CPUs'], ) return ret
Return a list of registered VMs CLI Example: .. code-block:: bash salt '*' vboxmanage.list_nodes
async def connect_to_endpoints(self, *endpoints: ConnectionConfig) -> None: self._throw_if_already_connected(*endpoints) await asyncio.gather( *(self._await_connect_to_endpoint(endpoint) for endpoint in endpoints), loop=self.event_loop )
Connect to the given endpoints and await until all connections are established.