code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def islice(self, start=None, stop=None, reverse=False): _len = self._len if not _len: return iter(()) start, stop, step = self._slice(slice(start, stop)) if start >= stop: return iter(()) _pos = self._pos min_pos, min_idx = _pos(start) if stop == _len: max_pos = len(self._lists) - 1 max_idx = len(self._lists[-1]) else: max_pos, max_idx = _pos(stop) return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
Returns an iterator that slices `self` from `start` to `stop` index, inclusive and exclusive respectively. When `reverse` is `True`, values are yielded from the iterator in reverse order. Both `start` and `stop` default to `None` which is automatically inclusive of the beginning and end.
def resolve(self, ref, document=None): try: url = self._urljoin_cache(self.resolution_scope, ref) if document is None: resolved = self._remote_cache(url) else: _, fragment = urldefrag(url) resolved = self.resolve_fragment(document, fragment) except jsonschema.RefResolutionError as e: message = e.args[0] if self._scopes_stack: message = '{} (from {})'.format( message, self._format_stack(self._scopes_stack)) raise SchemaError(message) if isinstance(resolved, dict) and '$ref' in resolved: if url in self._scopes_stack: raise SchemaError( 'Circular reference in schema: {}'.format( self._format_stack(self._scopes_stack + [url]))) try: self.push_scope(url) return self.resolve(resolved['$ref']) finally: self.pop_scope() else: return url, resolved
Resolve a fragment within the schema. If the resolved value contains a $ref, it will attempt to resolve that as well, until it gets something that is not a reference. Circular references will raise a SchemaError. :param str ref: URI to resolve. :param dict document: Optional schema in which to resolve the URI. :returns: a tuple of the final, resolved URI (after any recursion) and resolved value in the schema that the URI references. :raises SchemaError:
def _validate_mandatory_keys(mandatory, validated, data, to_validate): errors = [] for key, sub_schema in mandatory.items(): if key not in data: errors.append('missing key: %r' % (key,)) continue try: validated[key] = sub_schema(data[key]) except NotValid as ex: errors.extend(['%r: %s' % (key, arg) for arg in ex.args]) to_validate.remove(key) return errors
Validate the manditory keys.
async def unignore_all(self, ctx): channels = [c for c in ctx.message.server.channels if c.type is discord.ChannelType.text] await ctx.invoke(self.unignore, *channels)
Unignores all channels in this server from being processed. To use this command you must have the Manage Channels permission or have the Bot Admin role.
def param_describe(params, quant=95, axis=0): par = np.mean(params, axis=axis) lo, up = perc(quant) p_up = np.percentile(params, up, axis=axis) p_lo = np.percentile(params, lo, axis=axis) return par, p_lo, p_up
Get mean + quantile range from bootstrapped params.
def appendDatastore(self, store): if not isinstance(store, Datastore): raise TypeError("stores must be of type %s" % Datastore) self._stores.append(store)
Appends datastore `store` to this collection.
def chi_perp_from_mass1_mass2_xi2(mass1, mass2, xi2): q = q_from_mass1_mass2(mass1, mass2) a1 = 2 + 3 * q / 2 a2 = 2 + 3 / (2 * q) return q**2 * a2 / a1 * xi2
Returns the in-plane spin from mass1, mass2, and xi2 for the secondary mass.
def tail(self, path, tail_length=1024, append=False): if not path: raise InvalidInputException("tail: no path given") block_size = self.serverdefaults()['blockSize'] if tail_length > block_size: raise InvalidInputException("tail: currently supports length up to the block size (%d)" % (block_size,)) if tail_length <= 0: raise InvalidInputException("tail: tail_length cannot be less than or equal to zero") processor = lambda path, node: self._handle_tail(path, node, tail_length, append) for item in self._find_items([path], processor, include_toplevel=True, include_children=False, recurse=False): if item: yield item
Show the end of the file - default 1KB, supports up to the Hadoop block size. :param path: Path to read :type path: string :param tail_length: The length to read from the end of the file - default 1KB, up to block size. :type tail_length: int :param append: Currently not implemented :type append: bool :returns: a generator that yields strings
def raw_command(self, lun, netfn, raw_bytes): return self.interface.send_and_receive_raw(self.target, lun, netfn, raw_bytes)
Send the raw command data and return the raw response. lun: the logical unit number netfn: the network function raw_bytes: the raw message as bytestring Returns the response as bytestring.
def admin_tools_render_menu_css(context, menu=None): if menu is None: menu = get_admin_menu(context) context.update({ 'template': 'admin_tools/menu/css.html', 'css_files': menu.Media.css, }) return context
Template tag that renders the menu css files,, it takes an optional ``Menu`` instance as unique argument, if not given, the menu will be retrieved with the ``get_admin_menu`` function.
def _tree_view_builder(self, indent=0, is_root=True): def pad_text(indent): return " " * indent + "|-- " lines = list() if is_root: lines.append(SP_DIR) lines.append( "%s%s (%s)" % (pad_text(indent), self.shortname, self.fullname) ) indent += 1 for pkg in self.sub_packages.values(): lines.append(pkg._tree_view_builder(indent=indent, is_root=False)) lines.append( "%s%s (%s)" % ( pad_text(indent), "__init__.py", self.fullname, ) ) for mod in self.sub_modules.values(): lines.append( "%s%s (%s)" % ( pad_text(indent), mod.shortname + ".py", mod.fullname, ) ) return "\n".join(lines)
Build a text to represent the package structure.
def rename(args): p = OptionParser(rename.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, switch = args switch = DictFile(switch) gff = Gff(ingff3) for g in gff: id, = g.attributes["ID"] newname = switch.get(id, id) g.attributes["ID"] = [newname] if "Parent" in g.attributes: parents = g.attributes["Parent"] g.attributes["Parent"] = [switch.get(x, x) for x in parents] g.update_attributes() print(g)
%prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3.
def sort_values(expr, by, ascending=True): if not isinstance(by, list): by = [by, ] by = [it(expr) if inspect.isfunction(it) else it for it in by] return SortedCollectionExpr(expr, _sorted_fields=by, _ascending=ascending, _schema=expr._schema)
Sort the collection by values. `sort` is an alias name for `sort_values` :param expr: collection :param by: the sequence or sequences to sort :param ascending: Sort ascending vs. descending. Sepecify list for multiple sort orders. If this is a list of bools, must match the length of the by :return: Sorted collection :Example: >>> df.sort_values(['name', 'id']) # 1 >>> df.sort(['name', 'id'], ascending=False) # 2 >>> df.sort(['name', 'id'], ascending=[False, True]) # 3 >>> df.sort([-df.name, df.id]) # 4, equal to #3
def _get(self): c = self.theLookahead self.theLookahead = None if c == None: c = self.instream.read(1) if c >= ' ' or c == '\n': return c if c == '': return '\000' if c == '\r': return '\n' return ' '
return the next character from stdin. Watch out for lookahead. If the character is a control character, translate it to a space or linefeed.
def unwrap(self, value, session=None): self.validate_unwrap(value) ret = [] for field, value in izip(self.types, value): ret.append(field.unwrap(value, session=session)) return tuple(ret)
Validate and then unwrap ``value`` for object creation. :param value: list returned from the database.
def _load_significant_pathways_file(path_to_file): feature_pathway_df = pd.read_table( path_to_file, header=0, usecols=["feature", "side", "pathway"]) feature_pathway_df = feature_pathway_df.sort_values( by=["feature", "side"]) return feature_pathway_df
Read in the significant pathways file as a pandas.DataFrame.
def policy_net(rng_key, batch_observations_shape, num_actions, bottom_layers=None): if bottom_layers is None: bottom_layers = [] bottom_layers.extend([layers.Dense(num_actions), layers.LogSoftmax()]) net = layers.Serial(*bottom_layers) return net.initialize(batch_observations_shape, rng_key), net
A policy net function.
def _times_to_hours_after_local_midnight(times): times = times.tz_localize(None) hrs = 1 / NS_PER_HR * ( times.astype(np.int64) - times.normalize().astype(np.int64)) return np.array(hrs)
convert local pandas datetime indices to array of hours as floats
def on_open(self, info): self.ip = info.ip self.request = info self.open()
sockjs-tornado on_open handler
def strip_ip_port(ip_address): if '.' in ip_address: cleaned_ip = ip_address.split(':')[0] elif ']:' in ip_address: cleaned_ip = ip_address.rpartition(':')[0][1:-1] else: cleaned_ip = ip_address return cleaned_ip
Strips the port from an IPv4 or IPv6 address, returns a unicode object.
def get_attribute(self): attributes = ['dependencies', 'publics', 'members', 'types', 'executables'] if self.context.el_type in [Function, Subroutine]: attribute = attributes[4] elif self.context.el_type == CustomType: attribute = attributes[3] else: attribute = attributes[2] return attribute
Gets the appropriate module attribute name for a collection corresponding to the context's element type.
def insert_text(self, s, from_undo=False): return super().insert_text( ''.join(c for c in s if c in '0123456789'), from_undo )
Natural numbers only.
def currentSelected(self): if self.commaRadioButton.isChecked(): return ',' elif self.semicolonRadioButton.isChecked(): return ';' elif self.tabRadioButton.isChecked(): return '\t' elif self.otherRadioButton.isChecked(): return self.otherSeparatorLineEdit.text() return
Returns the currently selected delimiter character. Returns: str: One of `,`, `;`, `\t`, `*other*`.
def cli(env): manager = AccountManager(env.client) summary = manager.get_summary() env.fout(get_snapshot_table(summary))
Prints some various bits of information about an account
def process(self): var_name = self.name_edt.text() try: self.var_name = str(var_name) except UnicodeEncodeError: self.var_name = to_text_string(var_name) if self.text_widget.get_as_data(): self.clip_data = self._get_table_data() elif self.text_widget.get_as_code(): self.clip_data = try_to_eval( to_text_string(self._get_plain_text())) else: self.clip_data = to_text_string(self._get_plain_text()) self.accept()
Process the data from clipboard
def parts(self): try: return self._pparts except AttributeError: self._pparts = tuple(self._parts) return self._pparts
An object providing sequence-like access to the components in the filesystem path.
def find_parent_split(node, orientation): if (node and node.orientation == orientation and len(node.children) > 1): return node if not node or node.type == "workspace": return None return find_parent_split(node.parent, orientation)
Find the first parent split relative to the given node according to the desired orientation
def filtre(liste_base, criteres) -> groups.Collection: def choisi(ac): for cat, li in criteres.items(): v = ac[cat] if not (v in li): return False return True return groups.Collection(a for a in liste_base if choisi(a))
Return a filter list, bases on criteres :param liste_base: Acces list :param criteres: Criteria { `attribut`:[valeurs,...] }
def _schemaPrepareInsert(self, store): for name, atr in self.getSchema(): atr.prepareInsert(self, store)
Prepare each attribute in my schema for insertion into a given store, either by upgrade or by creation. This makes sure all references point to this store and all relative paths point to this store's files directory.
def set(self, value: Optional[bool]): prev_value = self._value self._value = value if self._value != prev_value: for event in self._events: event.set()
Sets current status of a check :param value: ``True`` (healthy), ``False`` (unhealthy), or ``None`` (unknown)
def mapping(self): return ( self._user_id_mapping, self._user_feature_mapping, self._item_id_mapping, self._item_feature_mapping, )
Return the constructed mappings. Invert these to map internal indices to external ids. Returns ------- (user id map, user feature map, item id map, item id map): tuple of dictionaries
def pmag_angle(D1,D2): D1 = numpy.array(D1) if len(D1.shape) > 1: D1 = D1[:,0:2] else: D1 = D1[:2] D2 = numpy.array(D2) if len(D2.shape) > 1: D2 = D2[:,0:2] else: D2 = D2[:2] X1 = dir2cart(D1) X2 = dir2cart(D2) angles = [] for k in range(X1.shape[0]): angle = numpy.arccos(numpy.dot(X1[k],X2[k]))*180./numpy.pi angle = angle%360. angles.append(angle) return numpy.array(angles)
finds the angle between lists of two directions D1,D2
def get_schema_type_name(node, context): query_path = node.query_path if query_path not in context.query_path_to_location_info: raise AssertionError( u'Unable to find type name for query path {} with context {}.'.format( query_path, context)) location_info = context.query_path_to_location_info[query_path] return location_info.type.name
Return the GraphQL type name of a node.
def timedelta_to_days(td): seconds_in_day = 24. * 3600. days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day return days
Convert a `datetime.timedelta` object to a total number of days. Parameters ---------- td : `datetime.timedelta` instance Returns ------- days : float Total number of days in the `datetime.timedelta` object. Examples -------- >>> td = datetime.timedelta(4.5) >>> td datetime.timedelta(4, 43200) >>> timedelta_to_days(td) 4.5
def h5ToDict(h5, readH5pyDataset=True): h = h5py.File(h5, "r") ret = unwrapArray(h, recursive=True, readH5pyDataset=readH5pyDataset) if readH5pyDataset: h.close() return ret
Read a hdf5 file into a dictionary
def _is_valid_api_url(self, url): data = {} try: r = requests.get(url, proxies=self.proxy_servers) content = to_text_string(r.content, encoding='utf-8') data = json.loads(content) except Exception as error: logger.error(str(error)) return data.get('ok', 0) == 1
Callback for is_valid_api_url.
def pagerank_lazy_push(s, r, w_i, a_i, push_node, rho, lazy): A = rho*r[push_node] B = (1-rho)*(1 - lazy)*r[push_node] C = (1-rho)*lazy*(r[push_node]) s[push_node] += A r[push_node] = C r[a_i] += B * w_i
Performs a random step with a self-loop. Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October). Local graph partitioning using pagerank vectors. In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE.
def compose(*functions): return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, identity)
Function composition on a series of functions. Remember that function composition runs right-to-left: `f . g . h = f(g(h(x)))`. As a unix pipeline, it would be written: `h | g | f`. From https://mathieularose.com/function-composition-in-python/.
def get_slice(self, thin_start=None, thin_interval=None, thin_end=None): if thin_start is None: thin_start = int(self.thin_start) else: thin_start = int(thin_start) if thin_interval is None: thin_interval = self.thin_interval else: thin_interval = int(numpy.ceil(thin_interval)) if thin_end is None: thin_end = self.thin_end else: thin_end = int(thin_end) return slice(thin_start, thin_end, thin_interval)
Formats a slice using the given arguments that can be used to retrieve a thinned array from an InferenceFile. Parameters ---------- thin_start : int, optional The starting index to use. If None, will use the ``thin_start`` attribute. thin_interval : int, optional The interval to use. If None, will use the ``thin_interval`` attribute. thin_end : int, optional The end index to use. If None, will use the ``thin_end`` attribute. Returns ------- slice : The slice needed.
def _request_address(self): if not self._request_address_val: template = ( 'https://sb-ssl.google.com/safebrowsing/api/lookup' '?client={0}&key={1}&appver={2}&pver={3}' ) self._request_address_val = template.format( self.client_name, self.api_key, self.app_version, self.protocol_version ) return self._request_address_val
Get address of a POST request to the service.
def connect(self, timeout=None): assert not self._running maybe_timeout = future_timeout_manager(timeout) self._logger.debug('Starting katcp client') self.katcp_client.start() try: yield maybe_timeout(self.katcp_client.until_running()) self._logger.debug('Katcp client running') except tornado.gen.TimeoutError: self.katcp_client.stop() raise if timeout: yield maybe_timeout(self.katcp_client.until_connected()) self._logger.debug('Katcp client connected') self._running = True self._state_loop()
Connect to KATCP interface, starting what is needed Parameters ---------- timeout : float, None Time to wait until connected. No waiting if None. Raises ------ :class:`tornado.gen.TimeoutError` if the connect timeout expires
def _invert_all(self): set = self._datastore.setbyte get = self._datastore.getbyte for p in xrange(self._datastore.byteoffset, self._datastore.byteoffset + self._datastore.bytelength): set(p, 256 + ~get(p))
Invert every bit.
def get_entry_text(self, fname): if fname.split('.')[-1] == 'gz': with gz.open(fname, 'rt') as f: filetext = f.read() else: with codecs.open(fname, 'r') as f: filetext = f.read() return filetext
Retrieve the raw text from a file.
def generate_megaman_data(sampling=2): data = get_megaman_image() x = np.arange(sampling * data.shape[1]) / float(sampling) y = np.arange(sampling * data.shape[0]) / float(sampling) X, Y = map(np.ravel, np.meshgrid(x, y)) C = data[np.floor(Y.max() - Y).astype(int), np.floor(X).astype(int)] return np.vstack([X, Y]).T, C
Generate 2D point data of the megaman image
def remove(self, workflow_id): try: db = self._client[self.database] fs = GridFSProxy(GridFS(db.unproxied_object)) for grid_doc in fs.find({"workflow_id": workflow_id}, no_cursor_timeout=True): fs.delete(grid_doc._id) col = db[WORKFLOW_DATA_COLLECTION_NAME] return col.delete_one({"_id": ObjectId(workflow_id)}) except ConnectionFailure: raise DataStoreNotConnected()
Removes a document specified by its id from the data store. All associated GridFs documents are deleted as well. Args: workflow_id (str): The id of the document that represents a workflow run. Raises: DataStoreNotConnected: If the data store is not connected to the server.
def get_home_position(boatd=None): if boatd is None: boatd = Boatd() content = boatd.get('/waypoints') home = content.get('home', None) if home is not None: lat, lon = home return Point(lat, lon) else: return None
Get the current home position from boatd. :returns: The configured home position :rtype: Points
def block_stat(self, multihash, **kwargs): args = (multihash,) return self._client.request('/block/stat', args, decoder='json', **kwargs)
Returns a dict with the size of the block with the given hash. .. code-block:: python >>> c.block_stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') {'Key': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', 'Size': 258} Parameters ---------- multihash : str The base58 multihash of an existing block to stat Returns ------- dict : Information about the requested block
def bootstrap_css(): rendered_urls = [render_link_tag(bootstrap_css_url())] if bootstrap_theme_url(): rendered_urls.append(render_link_tag(bootstrap_theme_url())) return mark_safe("".join([url for url in rendered_urls]))
Return HTML for Bootstrap CSS. Adjust url in settings. If no url is returned, we don't want this statement to return any HTML. This is intended behavior. Default value: ``None`` This value is configurable, see Settings section **Tag name**:: bootstrap_css **Usage**:: {% bootstrap_css %} **Example**:: {% bootstrap_css %}
def _convert_from(data): try: module, klass_name = data['__class__'].rsplit('.', 1) klass = getattr(import_module(module), klass_name) except (ImportError, AttributeError, KeyError): return data return deserialize(klass, data['__value__'])
Internal function that will be hooked to the native `json.loads` Find the right deserializer for a given value, taking into account the internal deserializer registry.
def format_usage(self, usage): msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ") return msg
Ensure there is only one newline between usage and the first heading if there is no description.
def set_is_valid_rss(self): if self.title and self.link and self.description: self.is_valid_rss = True else: self.is_valid_rss = False
Check to if this is actually a valid RSS feed
def execute(self, command): pData = c_char_p(command) cbData = DWORD(len(command) + 1) hDdeData = DDE.ClientTransaction(pData, cbData, self._hConv, HSZ(), CF_TEXT, XTYP_EXECUTE, TIMEOUT_ASYNC, LPDWORD()) if not hDdeData: raise DDEError("Unable to send command", self._idInst) DDE.FreeDataHandle(hDdeData)
Execute a DDE command.
def inflect(self): if self._inflect is None: import inflect self._inflect = inflect.engine() return self._inflect
Return instance of inflect.
def primitives(): z = randomZ(orderG1()) P,Q = randomG1(),randomG1() R = generatorG1() g1Add = P + Q g1ScalarMultiply = z*P g1GeneratorMultiply = z*R g1Hash = hashG1(hash_in) P,Q = randomG2(),randomG2() R = generatorG2() g2Add = P + Q g2ScalarMultiply = z*P g2GeneratorMultiply = z*R g2hash = hashG2(hash_in) P = randomGt() Q = randomGt() gtMult = P * Q gtExp = P**z x,y = (randomG1(), randomG2()) R = pair(x,y)
Perform primitive operations for profiling
def pipeline_name(self): if 'pipeline_name' in self.data and self.data.pipeline_name: return self.data.get('pipeline_name') elif self.stage.pipeline is not None: return self.stage.pipeline.data.name else: return self.stage.data.pipeline_name
Get pipeline name of current job instance. Because instantiating job instance could be performed in different ways and those return different results, we have to check where from to get name of the pipeline. :return: pipeline name.
def save(self, path): with io.open(path, 'wb') as fout: return pickle.dump(dict(self.weights), fout)
Save the pickled model weights.
def _validate_edata(self, edata): if edata is None: return True if not (isinstance(edata, dict) or _isiterable(edata)): return False edata = [edata] if isinstance(edata, dict) else edata for edict in edata: if (not isinstance(edict, dict)) or ( isinstance(edict, dict) and ( ("field" not in edict) or ("field" in edict and (not isinstance(edict["field"], str))) or ("value" not in edict) ) ): return False return True
Validate edata argument of raise_exception_if method.
def lte(max_value): def validate(value): if value > max_value: return e("{} is not less than or equal to {}", value, max_value) return validate
Validates that a field value is less than or equal to the value given to this validator.
def disconnect(self, func=None): if func is None: self._connections = [] else: self._connections.remove(func)
Disconnect a function call to the signal. If None, all connections are disconnected
def blend(self, other, percent=0.5): dest = 1.0 - percent rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb))) a = (self.__a * percent) + (other.__a * dest) return Color(rgb, 'rgb', a, self.__wref)
blend this color with the other one. Args: :other: the grapefruit.Color to blend with this one. Returns: A grapefruit.Color instance which is the result of blending this color on the other one. >>> c1 = Color.from_rgb(1, 0.5, 0, 0.2) >>> c2 = Color.from_rgb(1, 1, 1, 0.6) >>> c3 = c1.blend(c2) >>> c3 Color(1.0, 0.75, 0.5, 0.4)
def in_chain(cls, client, chain_id, expiration_dates=[]): request_url = "https://api.robinhood.com/options/instruments/" params = { "chain_id": chain_id, "expiration_dates": ",".join(expiration_dates) } data = client.get(request_url, params=params) results = data['results'] while data['next']: data = client.get(data['next']) results.extend(data['results']) return results
fetch all option instruments in an options chain - expiration_dates = optionally scope
def _special_value_rows(em): if em.tagName == 'textarea': return convertToIntRange(em.getAttribute('rows', 2), minValue=1, maxValue=None, invalidDefault=2) else: return em.getAttribute('rows', '')
_special_value_rows - Handle "rows" special attribute, which differs if tagName is a textarea or frameset
def _constrain_L2_grad(op, grad): inp = op.inputs[0] inp_norm = tf.norm(inp) unit_inp = inp / inp_norm grad_projection = dot(unit_inp, grad) parallel_grad = unit_inp * grad_projection is_in_ball = tf.less_equal(inp_norm, 1) is_pointed_inward = tf.less(grad_projection, 0) allow_grad = tf.logical_or(is_in_ball, is_pointed_inward) clip_grad = tf.logical_not(allow_grad) clipped_grad = tf.cond(clip_grad, lambda: grad - parallel_grad, lambda: grad) return clipped_grad
Gradient for constrained optimization on an L2 unit ball. This function projects the gradient onto the ball if you are on the boundary (or outside!), but leaves it untouched if you are inside the ball. Args: op: the tensorflow op we're computing the gradient for. grad: gradient we need to backprop Returns: (projected if necessary) gradient.
def thumbnail_url(source, alias): try: thumb = get_thumbnailer(source)[alias] except Exception: return '' return thumb.url
Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
def load_weight(weight_file: str, weight_name: str, weight_file_cache: Dict[str, Dict]) -> mx.nd.NDArray: logger.info('Loading input weight file: %s', weight_file) if weight_file.endswith(".npy"): return np.load(weight_file) elif weight_file.endswith(".npz"): if weight_file not in weight_file_cache: weight_file_cache[weight_file] = np.load(weight_file) return weight_file_cache[weight_file][weight_name] else: if weight_file not in weight_file_cache: weight_file_cache[weight_file] = mx.nd.load(weight_file) return weight_file_cache[weight_file]['arg:%s' % weight_name].asnumpy()
Load wight fron a file or the cache if it was loaded before. :param weight_file: Weight file. :param weight_name: Weight name. :param weight_file_cache: Cache of loaded files. :return: Loaded weight.
def _getResourceClass(self): if self.fromVirtualEnv: subcls = VirtualEnvResource elif os.path.isdir(self._resourcePath): subcls = DirectoryResource elif os.path.isfile(self._resourcePath): subcls = FileResource elif os.path.exists(self._resourcePath): raise AssertionError("Neither a file or a directory: '%s'" % self._resourcePath) else: raise AssertionError("No such file or directory: '%s'" % self._resourcePath) return subcls
Return the concrete subclass of Resource that's appropriate for auto-deploying this module.
def merge_list_members(self, list_, record_data, merge_rule): list_ = list_.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) merge_rule = merge_rule.get_soap_object(self.client) return MergeResult(self.call('mergeListMembers', list_, record_data, merge_rule))
Responsys.mergeListMembers call Accepts: InteractObject list_ RecordData record_data ListMergeRule merge_rule Returns a MergeResult
def _doIdRes(self, message, endpoint, return_to): self._idResCheckForFields(message) if not self._checkReturnTo(message, return_to): raise ProtocolError( "return_to does not match return URL. Expected %r, got %r" % (return_to, message.getArg(OPENID_NS, 'return_to'))) endpoint = self._verifyDiscoveryResults(message, endpoint) logging.info("Received id_res response from %s using association %s" % (endpoint.server_url, message.getArg(OPENID_NS, 'assoc_handle'))) self._idResCheckSignature(message, endpoint.server_url) self._idResCheckNonce(message, endpoint) signed_list_str = message.getArg(OPENID_NS, 'signed', no_default) signed_list = signed_list_str.split(',') signed_fields = ["openid." + s for s in signed_list] return SuccessResponse(endpoint, message, signed_fields)
Handle id_res responses that are not cancellations of immediate mode requests. @param message: the response paramaters. @param endpoint: the discovered endpoint object. May be None. @raises ProtocolError: If the message contents are not well-formed according to the OpenID specification. This includes missing fields or not signing fields that should be signed. @raises DiscoveryFailure: If the subject of the id_res message does not match the supplied endpoint, and discovery on the identifier in the message fails (this should only happen when using OpenID 2) @returntype: L{Response}
def remove(self, *values): for value in objecttools.extract(values, (str, variabletools.Variable)): try: deleted_something = False for fn2var in list(self._type2filename2variable.values()): for fn_, var in list(fn2var.items()): if value in (fn_, var): del fn2var[fn_] deleted_something = True if not deleted_something: raise ValueError( f'`{repr(value)}` is neither a registered ' f'filename nor a registered variable.') except BaseException: objecttools.augment_excmessage( f'While trying to remove the given object `{value}` ' f'of type `{objecttools.classname(value)}` from the ' f'actual Variable2AuxFile object')
Remove the defined variables. The variables to be removed can be selected in two ways. But the first example shows that passing nothing or an empty iterable to method |Variable2Auxfile.remove| does not remove any variable: >>> from hydpy import dummies >>> v2af = dummies.v2af >>> v2af.remove() >>> v2af.remove([]) >>> from hydpy import print_values >>> print_values(v2af.filenames) file1, file2 >>> print_values(v2af.variables, width=30) eqb(5000.0), eqb(10000.0), eqd1(100.0), eqd2(50.0), eqi1(2000.0), eqi2(1000.0) The first option is to pass auxiliary file names: >>> v2af.remove('file1') >>> print_values(v2af.filenames) file2 >>> print_values(v2af.variables) eqb(10000.0), eqd1(100.0), eqd2(50.0) The second option is, to pass variables of the correct type and value: >>> v2af = dummies.v2af >>> v2af.remove(v2af.eqb[0]) >>> print_values(v2af.filenames) file1, file2 >>> print_values(v2af.variables) eqb(10000.0), eqd1(100.0), eqd2(50.0), eqi1(2000.0), eqi2(1000.0) One can pass multiple variables or iterables containing variables at once: >>> v2af = dummies.v2af >>> v2af.remove(v2af.eqb, v2af.eqd1, v2af.eqd2) >>> print_values(v2af.filenames) file1 >>> print_values(v2af.variables) eqi1(2000.0), eqi2(1000.0) Passing an argument that equals neither a registered file name or a registered variable results in the following exception: >>> v2af.remove('test') Traceback (most recent call last): ... ValueError: While trying to remove the given object `test` of type \ `str` from the actual Variable2AuxFile object, the following error occurred: \ `'test'` is neither a registered filename nor a registered variable.
def add_effect(self, effect): effect.register_scene(self._scene) self._effects.append(effect)
Add an Effect to the Frame. :param effect: The Effect to be added.
def clear_repository_helper(reserve_fn, clear_fn, retry=5, reservation=None): if reservation is None: reservation = reserve_fn() reservation = _clear_repository(reserve_fn, clear_fn, INITIATE_ERASE, retry, reservation) time.sleep(0.5) reservation = _clear_repository(reserve_fn, clear_fn, GET_ERASE_STATUS, retry, reservation)
Helper function to start repository erasure and wait until finish. This helper is used by clear_sel and clear_sdr_repository.
def wait_for_ajax_calls_to_complete(self, timeout=5): from selenium.webdriver.support.ui import WebDriverWait WebDriverWait(self.driver, timeout).until(lambda s: s.execute_script("return jQuery.active === 0"))
Waits until there are no active or pending ajax requests. Raises TimeoutException should silence not be had. :param timeout: time to wait for silence (default: 5 seconds) :return: None
def url_param(param, default=None): if request.args.get(param): return request.args.get(param, default) if request.form.get('form_data'): form_data = json.loads(request.form.get('form_data')) url_params = form_data.get('url_params') or {} return url_params.get(param, default) return default
Read a url or post parameter and use it in your SQL Lab query When in SQL Lab, it's possible to add arbitrary URL "query string" parameters, and use those in your SQL code. For instance you can alter your url and add `?foo=bar`, as in `{domain}/superset/sqllab?foo=bar`. Then if your query is something like SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at runtime and replaced by the value in the URL. As you create a visualization form this SQL Lab query, you can pass parameters in the explore view as well as from the dashboard, and it should carry through to your queries. :param param: the parameter to lookup :type param: str :param default: the value to return in the absence of the parameter :type default: str
def sign_message(body: ByteString, secret: Text) -> Text: return 'sha1={}'.format( hmac.new(secret.encode(), body, sha1).hexdigest() )
Compute a message's signature.
def register_lookup_handler(lookup_type, handler_or_path): handler = handler_or_path if isinstance(handler_or_path, basestring): handler = load_object_from_string(handler_or_path) LOOKUP_HANDLERS[lookup_type] = handler if type(handler) != type: logger = logging.getLogger(__name__) logger.warning("Registering lookup `%s`: Please upgrade to use the " "new style of Lookups." % lookup_type) warnings.warn( "Lookup `%s`: Please upgrade to use the new style of Lookups" "." % lookup_type, DeprecationWarning, stacklevel=2, )
Register a lookup handler. Args: lookup_type (str): Name to register the handler under handler_or_path (OneOf[func, str]): a function or a path to a handler
def edit(self, layer, item, delete=False): if hasattr(self, layer): layer_obj = getattr(self, layer) else: raise AttributeError('missing layer: %s', layer) if delete: return layer_obj for k, v in item.iteritems(): if k in layer_obj.layer: layer_obj.edit(k, v) else: raise AttributeError('missing layer item: %s', k) if k in self.model[layer]: self.model[layer][k].update(v) else: raise AttributeError('missing model layer item: %s', k)
Edit model. :param layer: Layer of model to edit :type layer: str :param item: Items to edit. :type item: dict :param delete: Flag to return :class:`~simkit.core.layers.Layer` to delete item. :type delete: bool
def delete(args): with _catalog(args) as cat: n = len(cat) cat.delete(args.args[0]) args.log.info('{0} objects deleted'.format(n - len(cat))) return n - len(cat)
cdstarcat delete OID Delete an object specified by OID from CDSTAR.
def find_executable(name): for pt in os.environ.get('PATH', '').split(':'): candidate = os.path.join(pt, name) if os.path.exists(candidate): return candidate
Finds the actual path to a named command. The first one on $PATH wins.
def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update): for resource in filestore_resources: for created_resource in self.data['resources']: if resource['name'] == created_resource['name']: merge_two_dictionaries(resource.data, created_resource) del resource['url'] resource.update_in_hdx() merge_two_dictionaries(created_resource, resource.data) break self.init_resources() self.separate_resources() if create_default_views: self.create_default_views() if hxl_update: self.hxl_update()
Helper method to create files in filestore by updating resources. Args: filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None
def get_ploidy(items, region=None): chrom = chromosome_special_cases(region[0] if isinstance(region, (list, tuple)) else None) ploidy = _configured_ploidy(items) sexes = _configured_genders(items) if chrom == "mitochondrial": return ploidy.get("mitochondrial", 1) elif chrom == "X": if "female" in sexes or "f" in sexes: return ploidy.get("female", ploidy["default"]) elif "male" in sexes or "m" in sexes: return ploidy.get("male", 1) else: return ploidy.get("female", ploidy["default"]) elif chrom == "Y": return 1 else: return ploidy["default"]
Retrieve ploidy of a region, handling special cases.
def custom_object_prefix_strict(instance): if (instance['type'] not in enums.TYPES and instance['type'] not in enums.RESERVED_OBJECTS and not CUSTOM_TYPE_PREFIX_RE.match(instance['type'])): yield JSONError("Custom object type '%s' should start with 'x-' " "followed by a source unique identifier (like a " "domain name with dots replaced by hyphens), a hyphen " "and then the name." % instance['type'], instance['id'], 'custom-prefix')
Ensure custom objects follow strict naming style conventions.
def upsert(self, insert_index, val, fn=None): fn = fn or (lambda current, passed: passed) self._magnitude = 0 position = self.position_for_index(insert_index) if position < len(self.elements) and self.elements[position] == insert_index: self.elements[position + 1] = fn(self.elements[position + 1], val) else: self.elements.insert(position, val) self.elements.insert(position, insert_index)
Inserts or updates an existing index within the vector. Args: - insert_index (int): The index at which the element should be inserted. - val (int|float): The value to be inserted into the vector. - fn (callable, optional): An optional callable taking two arguments, the current value and the passed value to generate the final inserted value at the position in case of collision.
def peek(self, session, address, width): if width == 8: return self.peek_8(session, address) elif width == 16: return self.peek_16(session, address) elif width == 32: return self.peek_32(session, address) elif width == 64: return self.peek_64(session, address) raise ValueError('%s is not a valid size. Valid values are 8, 16, 32 or 64' % width)
Read an 8, 16, 32, or 64-bit value from the specified address. Corresponds to viPeek* functions of the VISA library. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :param width: Number of bits to read. :return: Data read from bus, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode`
def _auth(url, user, passwd, realm): basic = _HTTPBasicAuthHandler() basic.add_password(realm=realm, uri=url, user=user, passwd=passwd) digest = _HTTPDigestAuthHandler() digest.add_password(realm=realm, uri=url, user=user, passwd=passwd) return _build_opener(basic, digest)
returns a authentication handler.
def process_alt(header, ref, alt_str): if "]" in alt_str or "[" in alt_str: return record.BreakEnd(*parse_breakend(alt_str)) elif alt_str[0] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.FORWARD, alt_str[1:]) elif alt_str[-1] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.REVERSE, alt_str[:-1]) elif alt_str[0] == "<" and alt_str[-1] == ">": inner = alt_str[1:-1] return record.SymbolicAllele(inner) else: return process_sub(ref, alt_str)
Process alternative value using Header in ``header``
def isscalar(cls, dataset, dim): if not dataset.data: return True ds = cls._inner_dataset_template(dataset) isscalar = [] for d in dataset.data: ds.data = d isscalar.append(ds.interface.isscalar(ds, dim)) return all(isscalar)
Tests if dimension is scalar in each subpath.
def get_requires(self, profile=None): out = [] for req in self.requires: if ((req.profile and not profile) or (req.profile and profile and req.profile != profile)): continue out.append(req) return out
Get filtered list of Require objects in this Feature :param str profile: Return Require objects with this profile or None to return all Require objects. :return: list of Require objects
def view_dupl_sources_time(token, dstore): info = dstore['source_info'] items = sorted(group_array(info.value, 'source_id').items()) tbl = [] tot_time = 0 for source_id, records in items: if len(records) > 1: calc_time = records['calc_time'].sum() tot_time += calc_time + records['split_time'].sum() tbl.append((source_id, calc_time, len(records))) if tbl and info.attrs.get('has_dupl_sources'): tot = info['calc_time'].sum() + info['split_time'].sum() percent = tot_time / tot * 100 m = '\nTotal time in duplicated sources: %d/%d (%d%%)' % ( tot_time, tot, percent) return rst_table(tbl, ['source_id', 'calc_time', 'num_dupl']) + m else: return 'There are no duplicated sources'
Display the time spent computing duplicated sources
def add_children(d, key, **kwarg): if kwarg: d[key] = {_meta: kwarg} else: d[key] = dict()
Add a children with key and attributes. If children already EXISTS, OVERWRITE it. Usage:: >>> from pprint import pprint as ppt >>> DT.add_children(d, "VA", name="virginia", population=100*1000) >>> DT.add_children(d, "MD", name="maryland", population=200*1000) >>> ppt(d) {'_meta': {'population': 27800000, '_rootname': 'US'}, 'MD': {'_meta': {'name': 'maryland', 'population': 200000}}, 'VA': {'_meta': {'name': 'virginia', 'population': 100000}}} >>> DT.add_children(d["VA"], "arlington", name="arlington county", population=5000) >>> DT.add_children(d["VA"], "vienna", name="vienna county", population=5000) >>> DT.add_children(d["MD"], "bethesta", name="montgomery country", population=5800) >>> DT.add_children(d["MD"], "germentown", name="fredrick country", population=1400) >>> DT.add_children(d["VA"]["arlington"], "riverhouse", name="RiverHouse 1400", population=437) >>> DT.add_children(d["VA"]["arlington"], "crystal plaza", name="Crystal plaza South", population=681) >>> DT.add_children(d["VA"]["arlington"], "loft", name="loft hotel", population=216) >>> ppt(d) {'MD': {'_meta': {'name': 'maryland', 'population': 200000}, 'bethesta': {'_meta': {'name': 'montgomery country', 'population': 5800}}, 'germentown': {'_meta': {'name': 'fredrick country', 'population': 1400}}}, 'VA': {'_meta': {'name': 'virginia', 'population': 100000}, 'arlington': {'_meta': {'name': 'arlington county', 'population': 5000}, 'crystal plaza': {'_meta': {'name': 'Crystal plaza South', 'population': 681}}, 'loft': {'_meta': {'name': 'loft hotel', 'population': 216}}, 'riverhouse': {'_meta': {'name': 'RiverHouse 1400', 'population': 437}}}, 'vienna': {'_meta': {'name': 'vienna county', 'population': 1500}}}, '_meta': {'_rootname': 'US', 'population': 27800000.0}}
def _responsify(api_spec, error, status): result_json = api_spec.model_to_json(error) r = jsonify(result_json) r.status_code = status return r
Take a bravado-core model representing an error, and return a Flask Response with the given error code and error instance as body
def dbfpack(fn, usecols=None): loadfunc = pulldbf.dbf_asdict cp = ChannelPack(loadfunc) cp.load(fn, usecols) names = pulldbf.channel_names(fn, usecols) cp.set_channel_names(names) return cp
Return a ChannelPack instance loaded with dbf data file fn. This is a lazy function to get a loaded instance, using pulldbf module.
def call(self, additional_fields, restriction, shape, depth, max_items, offset): from .folders import Folder roots = {f.root for f in self.folders} if len(roots) != 1: raise ValueError('FindFolder must be called with folders in the same root hierarchy (%r)' % roots) root = roots.pop() for elem in self._paged_call(payload_func=self.get_payload, max_items=max_items, **dict( additional_fields=additional_fields, restriction=restriction, shape=shape, depth=depth, page_size=self.chunk_size, offset=offset, )): if isinstance(elem, Exception): yield elem continue yield Folder.from_xml(elem=elem, root=root)
Find subfolders of a folder. :param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects :param shape: The set of attributes to return :param depth: How deep in the folder structure to search for folders :param max_items: The maximum number of items to return :param offset: the offset relative to the first item in the item collection. Usually 0. :return: XML elements for the matching folders
def edit_block(object): @functools.wraps(object) def edit_block_wrapper(*args, **kwargs): if args: cursor = foundations.common.get_first_item(args).textCursor() cursor.beginEditBlock() value = None try: value = object(*args, **kwargs) finally: if args: cursor.endEditBlock() return value return edit_block_wrapper
Handles edit blocks undo states. :param object: Object to decorate. :type object: object :return: Object. :rtype: object
def downsample_completeness_table(comp_table, sample_width=0.1, mmax=None): new_comp_table = [] for i in range(comp_table.shape[0] - 1): mvals = np.arange(comp_table[i, 1], comp_table[i + 1, 1], d_m) new_comp_table.extend([[comp_table[i, 0], mval] for mval in mvals]) if mmax and (mmax > comp_table[-1, 1]): new_comp_table.extend( [[comp_table[-1, 0], mval] for mval in np.arange(comp_table[-1, 1], mmax + d_m, d_m)]) return np.array(new_comp_table)
Re-sample the completeness table to a specified sample_width
def find_cuda(): for fldr in os.environ['PATH'].split(os.pathsep): cuda_path = join(fldr, 'nvcc') if os.path.exists(cuda_path): cuda_path = os.path.dirname(os.path.dirname(cuda_path)) break cuda_path = None if cuda_path is None: print 'w> nvcc compiler could not be found from the PATH!' return None lcuda_path = os.path.join(cuda_path, 'lib64') if 'LD_LIBRARY_PATH' in os.environ.keys(): if lcuda_path in os.environ['LD_LIBRARY_PATH'].split(os.pathsep): print 'i> found CUDA lib64 in LD_LIBRARY_PATH: ', lcuda_path elif os.path.isdir(lcuda_path): print 'i> found CUDA lib64 in : ', lcuda_path else: print 'w> folder for CUDA library (64-bit) could not be found!' return cuda_path, lcuda_path
Locate the CUDA environment on the system.
def _fail_early(message, **kwds): import json output = dict(kwds) output.update({ 'msg': message, 'failed': True, }) print(json.dumps(output)) sys.exit(1)
The module arguments are dynamically generated based on the Opsview version. This means that fail_json isn't available until after the module has been properly initialized and the schemas have been loaded.
def _search(self, limit, format): url = self.QUERY_URL.format(requests.utils.quote("'{}'".format(self.query)), min(50, limit), self.current_offset, format) r = requests.get(url, auth=("", self.api_key)) try: json_results = r.json() except ValueError as vE: if not self.safe: raise PyBingVideoException("Request returned with code %s, error msg: %s" % (r.status_code, r.text)) else: print ("[ERROR] Request returned with code %s, error msg: %s. \nContinuing in 5 seconds." % (r.status_code, r.text)) time.sleep(5) packaged_results = [VideoResult(single_result_json) for single_result_json in json_results['d']['results']] self.current_offset += min(50, limit, len(packaged_results)) return packaged_results
Returns a list of result objects, with the url for the next page bing search url.
def presets_dir(): default_presets_dir = os.path.join( os.path.expanduser("~"), ".be", "presets") presets_dir = os.environ.get(BE_PRESETSDIR) or default_presets_dir if not os.path.exists(presets_dir): os.makedirs(presets_dir) return presets_dir
Return presets directory
def build_reduce(function: Callable[[Any, Any], Any] = None, *, init: Any = NONE): _init = init def _build_reduce(function: Callable[[Any, Any], Any]): @wraps(function) def _wrapper(init=NONE) -> Reduce: init = _init if init is NONE else init if init is NONE: raise TypeError('init argument has to be defined') return Reduce(function, init=init) return _wrapper if function: return _build_reduce(function) return _build_reduce
Decorator to wrap a function to return a Reduce operator. :param function: function to be wrapped :param init: optional initialization for state
def get_servo_angle(self): servoposition = self.get_servo_position() if (self.servomodel==0x06) or (self.servomodel == 0x04): return scale(servoposition, 10627, 22129, -159.9, 159.6) else: return scale(servoposition, 21, 1002, -150, 150)
Gets the current angle of the servo in degrees Args: none Returns: int : the current servo angle