code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def shiftImage(u, v, t, img, interpolation=cv2.INTER_LANCZOS4): ny,nx = u.shape sy, sx = np.mgrid[:float(ny):1,:float(nx):1] sx += u*t sy += v*t return cv2.remap(img.astype(np.float32), (sx).astype(np.float32), (sy).astype(np.float32), interpolation)
remap an image using velocity field
def plot(self, entity): df = self._binary_df[[entity]] resampled = df.resample("s").ffill() resampled.columns = ["value"] fig, ax = plt.subplots(1, 1, figsize=(16, 2)) ax.fill_between(resampled.index, y1=0, y2=1, facecolor="royalblue", label="off") ax.fill_between( resampled.index, y1=0, y2=1, where=(resampled["value"] > 0), facecolor="red", label="on", ) ax.set_title(entity) ax.set_xlabel("Date") ax.set_frame_on(False) ax.set_yticks([]) plt.legend(loc=(1.01, 0.7)) plt.show() return
Basic plot of a single binary sensor data. Parameters ---------- entity : string The entity to plot
def absolute_url(self): if self.is_root(): return utils.concat_urls(self.url) return utils.concat_urls(self.parent.absolute_url, self.url)
Get the absolute url of ``self``. Returns: str: the absolute url.
def set_hash_key(self, file): filehasher = hashlib.md5() while True: data = file.read(8192) if not data: break filehasher.update(data) file.seek(0) self.hash_key = filehasher.hexdigest()
Calculate and store hash key for file.
def getParent(self, returned_properties=None): parent_tag = ("rtc_cm:com.ibm.team.workitem.linktype." "parentworkitem.parent") rp = returned_properties parent = (self.rtc_obj ._get_paged_resources("Parent", workitem_id=self.identifier, customized_attr=parent_tag, page_size="5", returned_properties=rp)) if parent: return parent[0] return None
Get the parent workitem of this workitem If no parent, None will be returned. :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :return: a :class:`rtcclient.workitem.Workitem` object :rtype: rtcclient.workitem.Workitem
def setval(self, varname, value): if varname in self: self[varname]['value'] = value else: self[varname] = Variable(self.default_type, value=value)
Set the value of the variable with the given name.
def generate(data, format="auto"): try: with open(data) as in_file: if format == 'auto': format = data.split('.')[-1] data = in_file.read() except: if format == 'auto': format = 'smi' return format_converter.convert(data, format, 'json')
Converts input chemical formats to json and optimizes structure. Args: data: A string or file representing a chemical format: The format of the `data` variable (default is 'auto') The `format` can be any value specified by Open Babel (http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto' option uses the extension for files (ie. my_file.mol -> mol) and defaults to SMILES (smi) for strings.
def print_result_for_plain_cgi_script(contenttype: str, headers: TYPE_WSGI_RESPONSE_HEADERS, content: bytes, status: str = '200 OK') -> None: headers = [ ("Status", status), ("Content-Type", contenttype), ("Content-Length", str(len(content))), ] + headers sys.stdout.write("\n".join([h[0] + ": " + h[1] for h in headers]) + "\n\n") sys.stdout.write(content)
Writes HTTP request result to stdout.
def get_cached_source_variable(self, source_id, variable, default=None): source_id = int(source_id) try: return self._retrieve_cached_source_variable( source_id, variable) except UncachedVariable: return default
Get the cached value of a source variable. If the variable is not cached return the default value.
def CurrentNode(self): ret = libxml2mod.xmlTextReaderCurrentNode(self._o) if ret is None:raise treeError('xmlTextReaderCurrentNode() failed') __tmp = xmlNode(_obj=ret) return __tmp
Hacking interface allowing to get the xmlNodePtr correponding to the current node being accessed by the xmlTextReader. This is dangerous because the underlying node may be destroyed on the next Reads.
def to_hostnames_list(ref, tab): res = [] for host in tab: if hasattr(host, 'host_name'): res.append(host.host_name) return res
Convert Host list into a list of host_name :param ref: Not used :type ref: :param tab: Host list :type tab: list[alignak.objects.host.Host] :return: host_name list :rtype: list
def list_to_tree(cls, files): def attach(branch, trunk): parts = branch.split('/', 1) if len(parts) == 1: trunk[FILE_MARKER].append(parts[0]) else: node, others = parts if node not in trunk: trunk[node] = defaultdict(dict, ((FILE_MARKER, []), )) attach(others, trunk[node]) tree = defaultdict(dict, ((FILE_MARKER, []), )) for line in files: attach(line, tree) return tree
Converts a list of filenames into a directory tree structure.
def base64_user_pass(self): if self._username is None or self._password is None: return None hash_ = base64.urlsafe_b64encode(bytes_("{username}:{password}".format( username=self._username, password=self._password ))) return "Basic {0}".format(unicode_(hash_))
Composes a basic http auth string, suitable for use with the _replicator database, and other places that need it. :returns: Basic http authentication string
def json_obj(self, method, params=None, auth=True): if params is None: params = {} obj = { 'jsonrpc': '2.0', 'method': method, 'params': params, 'auth': self.__auth if auth else None, 'id': self.id, } return json.dumps(obj)
Return JSON object expected by the Zabbix API
def get_annotation(self, key, result_format='list'): value = self.get('_annotations_by_key', {}).get(key) if not value: return value if result_format == 'one': return value[0] return value
Is a convenience method for accessing annotations on models that have them
def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None): with warnings.catch_warnings(): warnings.simplefilter("ignore") if cloud is None: return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint) if not isinstance(cloud, dict): cloud = {} return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud)
Preprocess data. Produce output that can be used by training efficiently. Args: train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet. If eval_dataset is None, the pipeline will randomly split train_dataset into train/eval set with 7:3 ratio. output_dir: The output directory to use. Preprocessing will create a sub directory under it for each run, and also update "latest" file which points to the latest preprocessed directory. Users are responsible for cleanup. Can be local or GCS path. eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet. If specified, it will be used for evaluation during training, and train_dataset will be completely used for training. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but not None, it will run in cloud. Otherwise, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait.
def compare(sig1, sig2): if isinstance(sig1, six.text_type): sig1 = sig1.encode("ascii") if isinstance(sig2, six.text_type): sig2 = sig2.encode("ascii") if not isinstance(sig1, six.binary_type): raise TypeError( "First argument must be of string, unicode or bytes type not " "'%s'" % type(sig1) ) if not isinstance(sig2, six.binary_type): raise TypeError( "Second argument must be of string, unicode or bytes type not " "'%r'" % type(sig2) ) res = binding.lib.fuzzy_compare(sig1, sig2) if res < 0: raise InternalError("Function returned an unexpected error code") return res
Computes the match score between two fuzzy hash signatures. Returns a value from zero to 100 indicating the match score of the two signatures. A match score of zero indicates the signatures did not match. :param Bytes|String sig1: First fuzzy hash signature :param Bytes|String sig2: Second fuzzy hash signature :return: Match score (0-100) :rtype: Integer :raises InternalError: If lib returns an internal error :raises TypeError: If sig is not String, Unicode or Bytes
def _assert_keys_match(keys1, keys2): if set(keys1) != set(keys2): raise ValueError('{} {}'.format(list(keys1), list(keys2)))
Ensure the two list of keys matches.
def _init(self): self._entry_points = {} for entry_point in self.raw_entry_points: if entry_point.dist.project_name != self.reserved.get( entry_point.name, entry_point.dist.project_name): logger.error( "registry '%s' for '%s' is reserved for package '%s'", entry_point.name, self.registry_name, self.reserved[entry_point.name], ) continue if self.get_record(entry_point.name): logger.warning( "registry '%s' for '%s' is already registered.", entry_point.name, self.registry_name, ) existing = self._entry_points[entry_point.name] logger.debug( "registered '%s' from '%s'", existing, existing.dist) logger.debug( "discarded '%s' from '%s'", entry_point, entry_point.dist) continue logger.debug( "recording '%s' from '%s'", entry_point, entry_point.dist) self._entry_points[entry_point.name] = entry_point
Turn the records into actual usable keys.
def __build_markable_token_mapper(self, coreference_layer=None, markable_layer=None): tok2markables = defaultdict(set) markable2toks = defaultdict(list) markable2chains = defaultdict(list) coreference_chains = get_pointing_chains(self.docgraph, layer=coreference_layer) for chain_id, chain in enumerate(coreference_chains): for markable_node_id in chain: markable2chains[markable_node_id].append(chain_id) singleton_id = len(coreference_chains) for markable_node_id in select_nodes_by_layer(self.docgraph, markable_layer): span = get_span(self.docgraph, markable_node_id) markable2toks[markable_node_id] = span for token_node_id in span: tok2markables[token_node_id].add(markable_node_id) if markable_node_id not in markable2chains: markable2chains[markable_node_id] = [singleton_id] singleton_id += 1 return tok2markables, markable2toks, markable2chains
Creates mappings from tokens to the markable spans they belong to and the coreference chains these markables are part of. Returns ------- tok2markables : dict (str -> set of str) Maps from a token (node ID) to all the markables (node IDs) it is part of. markable2toks : dict (str -> list of str) Maps from a markable (node ID) to all the tokens (node IDs) that belong to it. markable2chains : dict (str -> list of int) Maps from a markable (node ID) to all the chains (chain ID) it belongs to.
def read(self, domain, type_name, search_command, body=None): return self._request(domain, type_name, search_command, 'GET', body)
Read entry in ThreatConnect Data Store Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. body (str): JSON body
def get_node_host(name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host
Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache
def check_that_operator_can_be_applied_to_produces_items(op, g1, g2): g1_tmp_copy = g1.spawn() g2_tmp_copy = g2.spawn() sample_item_1 = next(g1_tmp_copy) sample_item_2 = next(g2_tmp_copy) try: op(sample_item_1, sample_item_2) except TypeError: raise TypeError(f"Operator '{op.__name__}' cannot be applied to items produced by {g1} and {g2} " f"(which have type {type(sample_item_1)} and {type(sample_item_2)}, respectively)")
Helper function to check that the operator `op` can be applied to items produced by g1 and g2.
async def list(self) -> List[str]: LOGGER.debug('NodePoolManager.list >>>') rv = [p['pool'] for p in await pool.list_pools()] LOGGER.debug('NodePoolManager.list <<< %s', rv) return rv
Return list of pool names configured, empty list for none. :return: list of pool names.
def get(context, request, key=None): registry_records = api.get_registry_records_by_keyword(key) size = req.get_batch_size() start = req.get_batch_start() batch = api.make_batch(registry_records, size, start) return { "pagesize": batch.get_pagesize(), "next": batch.make_next_url(), "previous": batch.make_prev_url(), "page": batch.get_pagenumber(), "pages": batch.get_numpages(), "count": batch.get_sequence_length(), "items": [registry_records], "url": api.url_for("senaite.jsonapi.v1.registry", key=key), }
Return all registry items if key is None, otherwise try to fetch the registry key
def get_biome_color_based_on_elevation(world, elev, x, y, rng): v = world.biome_at((x, y)).name() biome_color = _biome_satellite_colors[v] noise = (0, 0, 0) if world.is_land((x, y)): noise = rng.randint(-NOISE_RANGE, NOISE_RANGE, size=3) if elev > HIGH_MOUNTAIN_ELEV: noise = add_colors(noise, HIGH_MOUNTAIN_NOISE_MODIFIER) biome_color = average_colors(biome_color, MOUNTAIN_COLOR) elif elev > MOUNTAIN_ELEV: noise = add_colors(noise, MOUNTAIN_NOISE_MODIFIER) biome_color = average_colors(biome_color, MOUNTAIN_COLOR) elif elev > HIGH_HILL_ELEV: noise = add_colors(noise, HIGH_HILL_NOISE_MODIFIER) elif elev > HILL_ELEV: noise = add_colors(noise, HILL_NOISE_MODIFIER) modification_amount = int(elev / BASE_ELEVATION_INTENSITY_MODIFIER) base_elevation_modifier = (modification_amount, modification_amount, modification_amount) this_tile_color = add_colors(biome_color, noise, base_elevation_modifier) return this_tile_color
This is the "business logic" for determining the base biome color in satellite view. This includes generating some "noise" at each spot in a pixel's rgb value, potentially modifying the noise based on elevation, and finally incorporating this with the base biome color. The basic rules regarding noise generation are: - Oceans have no noise added - land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value - land tiles with high elevations further modify the noise by set amounts (to drain some of the color and make the map look more like mountains) The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough. Finally, the noise plus the biome color are added and returned. rng refers to an instance of a random number generator used to draw the random samples needed by this function.
def use_federated_book_view(self): self._book_view = FEDERATED for session in self._get_provider_sessions(): try: session.use_federated_book_view() except AttributeError: pass
Pass through to provider CommentLookupSession.use_federated_book_view
def add(self, private_key): if not isinstance(private_key, PaillierPrivateKey): raise TypeError("private_key should be of type PaillierPrivateKey, " "not %s" % type(private_key)) self.__keyring[private_key.public_key] = private_key
Add a key to the keyring. Args: private_key (PaillierPrivateKey): a key to add to this keyring.
def execute_pending_service_agreements(storage_path, account, actor_type, did_resolver_fn): keeper = Keeper.get_instance() for (agreement_id, did, _, price, files, start_time, _) in get_service_agreements(storage_path): ddo = did_resolver_fn(did) for service in ddo.services: if service.type != 'Access': continue consumer_provider_tuple = keeper.escrow_access_secretstore_template.get_agreement_data( agreement_id) if not consumer_provider_tuple: continue consumer, provider = consumer_provider_tuple did = ddo.did service_agreement = ServiceAgreement.from_service_dict(service.as_dictionary()) condition_ids = service_agreement.generate_agreement_condition_ids( agreement_id, did, consumer, provider, keeper) if actor_type == 'consumer': assert account.address == consumer process_agreement_events_consumer( provider, agreement_id, did, service_agreement, price, account, condition_ids, None) else: assert account.address == provider process_agreement_events_publisher( account, agreement_id, did, service_agreement, price, consumer, condition_ids)
Iterates over pending service agreements recorded in the local storage, fetches their service definitions, and subscribes to service agreement events. :param storage_path: storage path for the internal db, str :param account: :param actor_type: :param did_resolver_fn: :return:
def unfollow(self, login): resp = False if login: url = self._build_url('user', 'following', login) resp = self._boolean(self._delete(url), 204, 404) return resp
Make the authenticated user stop following login :param str login: (required) :returns: bool
def _passes_cortex_depth(line, min_depth): parts = line.split("\t") cov_index = parts[8].split(":").index("COV") passes_depth = False for gt in parts[9:]: cur_cov = gt.split(":")[cov_index] cur_depth = sum(int(x) for x in cur_cov.split(",")) if cur_depth >= min_depth: passes_depth = True return passes_depth
Do any genotypes in the cortex_var VCF line passes the minimum depth requirement?
def make_layer_stack(layers=gin.REQUIRED, num_layers=6): return LayerStack([cls() for cls in layers] * num_layers)
Configurable layer stack. Args: layers: a list of subclasses of TransformerLayer num_layers: an integer Returns: a LayerStack
def convert(cls, value, from_base, to_base): return cls.convert_from_int( cls.convert_to_int(value, from_base), to_base )
Convert value from a base to a base. :param value: the value to convert :type value: sequence of int :param int from_base: base of value :param int to_base: base of result :returns: the conversion result :rtype: list of int :raises ConvertError: if from_base is less than 2 :raises ConvertError: if to_base is less than 2 :raises ConvertError: if elements in value outside bounds Preconditions: * all integers in value must be no less than 0 * from_base, to_base must be at least 2 Complexity: O(len(value))
def to_0d_object_array(value: Any) -> np.ndarray: result = np.empty((), dtype=object) result[()] = value return result
Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.
def install(self, binder, module): ModuleAdapter(module, self._injector).configure(binder)
Add another module's bindings to a binder.
def crpss(self): crps_f = self.crps() crps_c = self.crps_climo() return 1.0 - float(crps_f) / float(crps_c)
Calculate the continous ranked probability skill score from existing data.
def console_get_default_background(con: tcod.console.Console) -> Color: return Color._new_from_cdata( lib.TCOD_console_get_default_background(_console(con)) )
Return this consoles default background color. .. deprecated:: 8.5 Use :any:`Console.default_bg` instead.
def AddComment(self, comment): if not comment: return if not self.comment: self.comment = comment else: self.comment = ''.join([self.comment, comment])
Adds a comment to the event tag. Args: comment (str): comment.
def get_app_metadata(template_dict): if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}): app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION) return ApplicationMetadata(app_metadata_dict) raise ApplicationMetadataNotFoundError( error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))
Get the application metadata from a SAM template. :param template_dict: SAM template as a dictionary :type template_dict: dict :return: Application metadata as defined in the template :rtype: ApplicationMetadata :raises ApplicationMetadataNotFoundError
def _getJavaStorageLevel(self, storageLevel): if not isinstance(storageLevel, StorageLevel): raise Exception("storageLevel must be of type pyspark.StorageLevel") newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel return newStorageLevel(storageLevel.useDisk, storageLevel.useMemory, storageLevel.useOffHeap, storageLevel.deserialized, storageLevel.replication)
Returns a Java StorageLevel based on a pyspark.StorageLevel.
def condensed_coords(i, j, n): if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) i, j = sorted([i, j]) x = i * ((2 * n) - i - 1) / 2 ix = x + j - i - 1 return int(ix)
Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int
def reversal_circuit(qubits: Qubits) -> Circuit: N = len(qubits) circ = Circuit() for n in range(N // 2): circ += SWAP(qubits[n], qubits[N-1-n]) return circ
Returns a circuit to reverse qubits
def get_keywords(self, entry): keyword_objects = [] for keyword in entry.iterfind("./keyword"): identifier = keyword.get('id') name = keyword.text keyword_hash = hash(identifier) if keyword_hash not in self.keywords: self.keywords[keyword_hash] = models.Keyword(**{'identifier': identifier, 'name': name}) keyword_objects.append(self.keywords[keyword_hash]) return keyword_objects
get list of models.Keyword objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Keyword` objects
def after_start_check(self): try: conn = HTTPConnection(self.host, self.port) conn.request('HEAD', self.url.path) status = str(conn.getresponse().status) if status == self.status or self.status_re.match(status): conn.close() return True except (HTTPException, socket.timeout, socket.error): return False
Check if defined URL returns expected status to a HEAD request.
def _apply_discrete_colormap(arr, cmap): res = np.zeros((arr.shape[1], arr.shape[2], 3), dtype=np.uint8) for k, v in cmap.items(): res[arr[0] == k] = v return np.transpose(res, [2, 0, 1])
Apply discrete colormap. Attributes ---------- arr : numpy.ndarray 1D image array to convert. color_map: dict Discrete ColorMap dictionary e.g: { 1: [255, 255, 255], 2: [255, 0, 0] } Returns ------- arr: numpy.ndarray
def interface(enode, portlbl, addr=None, up=None, shell=None): assert portlbl port = enode.ports[portlbl] if addr is not None: assert ip_interface(addr) cmd = 'ip addr add {addr} dev {port}'.format(addr=addr, port=port) response = enode(cmd, shell=shell) assert not response if up is not None: cmd = 'ip link set dev {port} {state}'.format( port=port, state='up' if up else 'down' ) response = enode(cmd, shell=shell) assert not response
Configure a interface. All parameters left as ``None`` are ignored and thus no configuration action is taken for that parameter (left "as-is"). :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param str portlbl: Port label to configure. Port label will be mapped to real port automatically. :param str addr: IPv4 or IPv6 address to add to the interface: - IPv4 address and netmask to assign to the interface in the form ``'192.168.20.20/24'``. - IPv6 address and subnets to assign to the interface in the form ``'2001::1/120'``. :param bool up: Bring up or down the interface. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell.
def process_bind_param(self, value, dialect): if self.__use_json(dialect) or value is None: return value return self.__json_codec.dumps(value, ensure_ascii=not self.__enforce_unicode)
Encode data, if required.
def register_connection(self, alias, api_key, base_url, timeout=5): if not base_url.endswith('/'): base_url += '/' self._connections[alias] = Connection(api_key, base_url, timeout)
Create and register a new connection. :param alias: The alias of the connection. If not changed with `switch_connection`, the connection with default 'alias' is used by the resources. :param api_key: The private api key. :param base_url: The api url including protocol, host, port (optional) and location. :param timeout: The time in seconds to wait for 'connect' and 'read' respectively. Use a tuple to set these values separately or None to wait forever. :return:
def makeAla(segID, N, CA, C, O, geo): CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") res = Residue((' ', segID, ' '), "ALA", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) return res
Creates an Alanine residue
def get_status(self): yield self._manager.poll_sensor(self._name) raise Return(self._reading.status)
Get a fresh sensor status from the KATCP resource Returns ------- reply : tornado Future resolving with :class:`KATCPSensorReading` object Note ---- As a side-effect this will update the reading stored in this object, and result in registered listeners being called.
def close(self, connection, reason='Closed via management api'): close_payload = json.dumps({ 'name': connection, 'reason': reason }) connection = quote(connection, '') return self.http_client.delete(API_CONNECTION % connection, payload=close_payload, headers={ 'X-Reason': reason })
Close Connection. :param str connection: Connection name :param str reason: Reason for closing connection. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: None
def db_create_table(self, table_name, columns): formatted_columns = '' for col in set(columns): formatted_columns += '"{}" text, '.format(col.strip('"').strip('\'')) formatted_columns = formatted_columns.strip(', ') create_table_sql = 'CREATE TABLE IF NOT EXISTS {} ({});'.format( table_name, formatted_columns ) try: cr = self.db_conn.cursor() cr.execute(create_table_sql) except sqlite3.Error as e: self.handle_error(e)
Create a temporary DB table. Arguments: table_name (str): The name of the table. columns (list): List of columns to add to the DB.
def get_attachment_types(self): bika_setup_catalog = api.get_tool("bika_setup_catalog") attachment_types = bika_setup_catalog(portal_type='AttachmentType', is_active=True, sort_on="sortable_title", sort_order="ascending") return attachment_types
Returns a list of available attachment types
def listen(self, listener): for message in listener.listen(): try: data = json.loads(message['data']) if data['event'] in ('canceled', 'lock_lost', 'put'): self.kill(data['jid']) except: logger.exception('Pubsub error')
Listen for events that affect our ownership of a job
def get_all(cls, include_disabled=True): if cls == BaseAccount: raise InquisitorError('get_all on BaseAccount is not supported') account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name) if not include_disabled: qry = qry.filter(Account.enabled == 1) accounts = qry.find(Account.account_type_id == account_type_id) return {res.account_id: cls(res) for res in accounts}
Returns a list of all accounts of a given type Args: include_disabled (`bool`): Include disabled accounts. Default: `True` Returns: list of account objects
def derivativeZ(self,x,y,z): xShift = self.lowerBound(y) dfdz_out = self.func.derivativeZ(x-xShift,y,z) return dfdz_out
Evaluate the first derivative with respect to z of the function at given state space points. Parameters ---------- x : np.array First input values. y : np.array Second input values; should be of same shape as x. z : np.array Third input values; should be of same shape as x. Returns ------- dfdz_out : np.array First derivative of function with respect to the third input, evaluated at (x,y,z), of same shape as inputs.
def create_session(self, user_agent, remote_address, client_version): self.session_counter += 1 self.sessions[self.session_counter] = session = self.session_class() session.user_agent = user_agent session.remote_address = remote_address session.client_version = client_version invoke_hooks(self.hooks, "session_created", self.session_counter) return self.session_counter
Create a new session. :param str user_agent: Client user agent :param str remote_addr: Remote address of client :param str client_version: Remote client version :return: The new session id :rtype: int
def parse(self, rrstr): if self._initialized: raise pycdlibexception.PyCdlibInternalError('PL record already initialized!') (su_len, su_entry_version_unused, parent_log_block_num_le, parent_log_block_num_be) = struct.unpack_from('=BBLL', rrstr[:12], 2) if su_len != RRPLRecord.length(): raise pycdlibexception.PyCdlibInvalidISO('Invalid length on rock ridge extension') if parent_log_block_num_le != utils.swab_32bit(parent_log_block_num_be): raise pycdlibexception.PyCdlibInvalidISO('Little endian block num does not equal big endian; corrupt ISO') self.parent_log_block_num = parent_log_block_num_le self._initialized = True
Parse a Rock Ridge Parent Link record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing.
def describe_api_stage(restApiId, stageName, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) stage = conn.get_stage(restApiId=restApiId, stageName=stageName) return {'stage': _convert_datetime_str(stage)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Get API stage for a given apiID and stage name CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_stage restApiId stageName
def should_trigger(self, dt): return self.composer( self.first.should_trigger, self.second.should_trigger, dt )
Composes the two rules with a lazy composer.
def get(self, *args, **kwargs): self.before_get(args, kwargs) qs = QSManager(request.args, self.schema) obj = self.get_object(kwargs, qs) self.before_marshmallow(args, kwargs) schema = compute_schema(self.schema, getattr(self, 'get_schema_kwargs', dict()), qs, qs.include) result = schema.dump(obj).data final_result = self.after_get(result) return final_result
Get object details
def items(self, *keys): if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append((key, None)) else: d.append((self.__keys[i], self[i])) return d return list((self.__keys[i], super(Record, self).__getitem__(i)) for i in range(len(self)))
Return the fields of the record as a list of key and value tuples :return:
def clip_or_fit_solutions(self, pop, idx): for k in idx: self.repair_genotype(pop[k])
make sure that solutions fit to sample distribution, this interface will probably change. In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
def get_list( self, search='', start=0, limit=0, order_by='', order_by_dir='ASC', published_only=False, minimal=False ): parameters = {} args = ['search', 'start', 'limit', 'minimal'] for arg in args: if arg in locals() and locals()[arg]: parameters[arg] = locals()[arg] if order_by: parameters['orderBy'] = order_by if order_by_dir: parameters['orderByDir'] = order_by_dir if published_only: parameters['publishedOnly'] = 'true' response = self._client.session.get( self.endpoint_url, params=parameters ) return self.process_response(response)
Get a list of items :param search: str :param start: int :param limit: int :param order_by: str :param order_by_dir: str :param published_only: bool :param minimal: bool :return: dict|str
def Group(expressions, final_function, inbetweens, name=""): lengths = [] functions = [] regex = "" i = 0 for expression in expressions: regex += inbetweens[i] regex += "(?:" + expression.regex + ")" lengths.append(sum(expression.group_lengths)) functions.append(expression.run) i += 1 regex += inbetweens[i] return Expression(regex, functions, lengths, final_function, name)
Group expressions together with ``inbetweens`` and with the output of a ``final_functions``.
def from_array(array): if array is None or not array: return None assert_type_or_raise(array, dict, parameter_name="array") from pytgbot.api_types.receivable.media import Location from pytgbot.api_types.receivable.peer import User data = {} data['id'] = u(array.get('id')) data['from_peer'] = User.from_array(array.get('from')) data['query'] = u(array.get('query')) data['offset'] = u(array.get('offset')) data['location'] = Location.from_array(array.get('location')) if array.get('location') is not None else None data['_raw'] = array return InlineQuery(**data)
Deserialize a new InlineQuery from a given dictionary. :return: new InlineQuery instance. :rtype: InlineQuery
def do(stream, action, key, default=None, dump=yaml_dump, loader=ShyamlSafeLoader): at_least_one_content = False for content in yaml.load_all(stream, Loader=loader): at_least_one_content = True value = traverse(content, key, default=default) yield act(action, value, dump=dump) if at_least_one_content is False: value = traverse(None, key, default=default) yield act(action, value, dump=dump)
Return string representations of target value in stream YAML The key is used for traversal of the YAML structure to target the value that will be dumped. :param stream: file like input yaml content :param action: string identifying one of the possible supported actions :param key: string dotted expression to traverse yaml input :param default: optional default value in case of missing end value when traversing input yaml. (default is ``None``) :param dump: callable that will be given python objet to dump in yaml (default is ``yaml_dump``) :param loader: PyYAML's *Loader subclass to parse YAML (default is ShyamlSafeLoader) :return: generator of string representation of target value per YAML docs in the given stream. :raises ActionTypeError: when there's a type mismatch between the action selected and the type of the targetted value. (ie: action 'key-values' on non-struct) :raises InvalidAction: when selected action is not a recognised valid action identifier. :raises InvalidPath: upon inexistent content when traversing YAML input following the key specification.
def get_lang_tags(index_page): dom = dhtmlparser.parseString(index_page) lang_tags = [ get_html_lang_tags(dom), get_dc_lang_tags(dom), [detect_language(dom)], get_html_tag_lang_params(dom), ] return list(sorted(set( SourceString(normalize(lang), source=lang.source) for lang in sum(lang_tags, []) )))
Collect informations about language of the page from HTML and Dublin core tags and langdetect guesses. Args: index_page (str): HTML content of the page you wish to analyze. Returns: list: List of :class:`.SourceString` objects.
def apply_pre_filters(instance, html): for post_func in appsettings.PRE_FILTER_FUNCTIONS: html = post_func(instance, html) return html
Perform optimizations in the HTML source code. :type instance: fluent_contents.models.ContentItem :raise ValidationError: when one of the filters detects a problem.
def init_edge_number(self) -> int: return len(frozenset(frozenset(edge) for edge in self.initial_edges()))
Return the number of edges present in the non-compressed graph
def generate_trajectory(group_membership, num_levels=4): delta = compute_delta(num_levels) num_params = group_membership.shape[0] num_groups = group_membership.shape[1] B = np.tril(np.ones([num_groups + 1, num_groups], dtype=int), -1) P_star = generate_p_star(num_groups) J = np.ones((num_groups + 1, num_params)) D_star = np.diag([rd.choice([-1, 1]) for _ in range(num_params)]) x_star = generate_x_star(num_params, num_levels) B_star = compute_b_star(J, x_star, delta, B, group_membership, P_star, D_star) return B_star
Return a single trajectory Return a single trajectory of size :math:`(g+1)`-by-:math:`k` where :math:`g` is the number of groups, and :math:`k` is the number of factors, both implied by the dimensions of `group_membership` Arguments --------- group_membership : np.ndarray a k-by-g matrix which notes factor membership of groups num_levels : int, default=4 The number of levels in the grid Returns ------- np.ndarray
def try_read(self, address, size): value = 0x0 for i in range(0, size): addr = address + i if addr in self._memory: value |= self._read_byte(addr) << (i * 8) else: return False, None return True, value
Try to read memory content at specified address. If any location was not written before, it returns a tuple (False, None). Otherwise, it returns (True, memory content).
def edit_command(self, payload): key = payload['key'] command = payload['command'] if self.queue[key]: if self.queue[key]['status'] in ['queued', 'stashed']: self.queue[key]['command'] = command answer = {'message': 'Command updated', 'status': 'error'} else: answer = {'message': "Entry is not 'queued' or 'stashed'", 'status': 'error'} else: answer = {'message': 'No entry with this key', 'status': 'error'} return answer
Edit the command of a specific entry.
def _cast_page(val): try: val = int(val) if val < 0: raise ValueError return val except (TypeError, ValueError): raise ValueError
Convert the page limit & offset into int's & type check
def get_agent(self): agent_id = self.get_agent_id() return Agent(identifier=agent_id.identifier, namespace=agent_id.namespace, authority=agent_id.authority)
Gets the ``Agent`` identified in this authentication credential. :return: the ``Agent`` :rtype: ``osid.authentication.Agent`` :raise: ``OperationFailed`` -- unable to complete request *compliance: mandatory -- This method must be implemented.*
def analyze(self, features, text=None, html=None, url=None, clean=None, xpath=None, fallback_to_raw=None, return_analyzed_text=None, language=None, limit_text_characters=None, **kwargs): if features is None: raise ValueError('features must be provided') features = self._convert_model(features, Features) headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('natural-language-understanding', 'V1', 'analyze') headers.update(sdk_headers) params = {'version': self.version} data = { 'features': features, 'text': text, 'html': html, 'url': url, 'clean': clean, 'xpath': xpath, 'fallback_to_raw': fallback_to_raw, 'return_analyzed_text': return_analyzed_text, 'language': language, 'limit_text_characters': limit_text_characters } url = '/v1/analyze' response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
Analyze text. Analyzes text, HTML, or a public webpage for the following features: - Categories - Concepts - Emotion - Entities - Keywords - Metadata - Relations - Semantic roles - Sentiment - Syntax (Experimental). :param Features features: Specific features to analyze the document for. :param str text: The plain text to analyze. One of the `text`, `html`, or `url` parameters is required. :param str html: The HTML file to analyze. One of the `text`, `html`, or `url` parameters is required. :param str url: The webpage to analyze. One of the `text`, `html`, or `url` parameters is required. :param bool clean: Set this to `false` to disable webpage cleaning. To learn more about webpage cleaning, see the [Analyzing webpages](https://cloud.ibm.com/docs/services/natural-language-understanding/analyzing-webpages.html) documentation. :param str xpath: An [XPath query](https://cloud.ibm.com/docs/services/natural-language-understanding/analyzing-webpages.html#xpath) to perform on `html` or `url` input. Results of the query will be appended to the cleaned webpage text before it is analyzed. To analyze only the results of the XPath query, set the `clean` parameter to `false`. :param bool fallback_to_raw: Whether to use raw HTML content if text cleaning fails. :param bool return_analyzed_text: Whether or not to return the analyzed text. :param str language: ISO 639-1 code that specifies the language of your text. This overrides automatic language detection. Language support differs depending on the features you include in your analysis. See [Language support](https://www.bluemix.net/docs/services/natural-language-understanding/language-support.html) for more information. :param int limit_text_characters: Sets the maximum number of characters that are processed by the service. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def is_valid_query(self, query): if not query: return False if len(query) < self.get_query_size_min(): return False return True
Return True if the search query is valid. e.g.: * not empty, * not too short,
def nested_insert(self, item_list): if len(item_list) == 1: self[item_list[0]] = LIVVDict() elif len(item_list) > 1: if item_list[0] not in self: self[item_list[0]] = LIVVDict() self[item_list[0]].nested_insert(item_list[1:])
Create a series of nested LIVVDicts given a list
def _update(self, datapoints): if len(datapoints) == 1: timestamp, value = datapoints[0] whisper.update(self.path, value, timestamp) else: whisper.update_many(self.path, datapoints)
This method store in the datapoints in the current database. :datapoints: is a list of tupple with the epoch timestamp and value [(1368977629,10)]
def get_found_includes(self, env, scanner, path): memo_key = (id(env), id(scanner), path) try: memo_dict = self._memo['get_found_includes'] except KeyError: memo_dict = {} self._memo['get_found_includes'] = memo_dict else: try: return memo_dict[memo_key] except KeyError: pass if scanner: result = [n.disambiguate() for n in scanner(self, env, path)] else: result = [] memo_dict[memo_key] = result return result
Return the included implicit dependencies in this file. Cache results so we only scan the file once per path regardless of how many times this information is requested.
def gzip_file(self, target_path, html): logger.debug("Gzipping to {}{}".format(self.fs_name, target_path)) data_buffer = six.BytesIO() kwargs = dict( filename=path.basename(target_path), mode='wb', fileobj=data_buffer ) if float(sys.version[:3]) >= 2.7: kwargs['mtime'] = 0 with gzip.GzipFile(**kwargs) as f: f.write(six.binary_type(html)) with self.fs.open(smart_text(target_path), 'wb') as outfile: outfile.write(data_buffer.getvalue()) outfile.close()
Zips up the provided HTML as a companion for the provided path. Intended to take advantage of the peculiarities of Amazon S3's GZIP service. mtime, an option that writes a timestamp to the output file is set to 0, to avoid having s3cmd do unnecessary uploads because of differences in the timestamp
def start(self): self.update_device_info() self.get_device_status(0) self.hook() self.thread = threading.Thread(target=self._run) self.thread.start() self.running = True
start running in background.
def withdict(parser, token): bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("{% withdict %} expects one argument") nodelist = parser.parse(('endwithdict',)) parser.delete_first_token() return WithDictNode( nodelist=nodelist, context_expr=parser.compile_filter(bits[1]) )
Take a complete context dict as extra layer.
def run_hmmbuild(self): for alignment in self.alignment_list: print 'building Hmm for', alignment alignment_full_path = self.alignment_dir + alignment query_name = alignment.split("_")[0] self.query_names.append(query_name) new_hmm= self.hmm_dir + query_name + ".hmm" hmmbuild_output = subprocess.call(["hmmbuild", new_hmm, alignment_full_path]) print 'hhbuild complete for', self.query_names
Generate hmm with hhbuild, output to file. Also stores query names.
def merge(self, resource_type, resource_properties): if resource_type not in self.template_globals: return resource_properties global_props = self.template_globals[resource_type] return global_props.merge(resource_properties)
Adds global properties to the resource, if necessary. This method is a no-op if there are no global properties for this resource type :param string resource_type: Type of the resource (Ex: AWS::Serverless::Function) :param dict resource_properties: Properties of the resource that need to be merged :return dict: Merged properties of the resource
def _import_model(models, crumbs): logger_jsons.info("enter import_model".format(crumbs)) _models = OrderedDict() try: for _idx, model in enumerate(models): if "summaryTable" in model: model["summaryTable"] = _idx_table_by_name(model["summaryTable"], "{}{}{}".format(crumbs, _idx, "summary")) if "ensembleTable" in model: model["ensembleTable"] = _idx_table_by_name(model["ensembleTable"], "{}{}{}".format(crumbs, _idx, "ensemble")) if "distributionTable" in model: model["distributionTable"] = _idx_table_by_name(model["distributionTable"], "{}{}{}".format(crumbs, _idx, "distribution")) _table_name = "{}{}".format(crumbs, _idx) _models[_table_name] = model except Exception as e: logger_jsons.error("import_model: {}".format(e)) print("Error: import_model: {}".format(e)) logger_jsons.info("exit import_model: {}".format(crumbs)) return _models
Change the nested items of the paleoModel data. Overwrite the data in-place. :param list models: Metadata :param str crumbs: Crumbs :return dict _models: Metadata
def get(path): file_path = __get_docker_file_path(path) if file_path is None: return __standardize_result(False, 'Path {} is not present'.format(path), None, None) salt_result = __read_docker_compose_file(file_path) if not salt_result['status']: return salt_result project = __load_project(path) if isinstance(project, dict): salt_result['return']['valid'] = False else: salt_result['return']['valid'] = True return salt_result
Get the content of the docker-compose file into a directory path Path where the docker-compose file is stored on the server CLI Example: .. code-block:: bash salt myminion dockercompose.get /path/where/docker-compose/stored
def set_ptr(self, ptr): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized') self.ptr = ptr
A method to set the Path Table Record associated with this Directory Record. Parameters: ptr - The path table record to associate with this Directory Record. Returns: Nothing.
def checkPos(self): soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser') poss = [] for label in soup.find_all("tr"): pos_id = label['id'] pos_list = [x for x in self.positions if x.id == pos_id] if pos_list: pos = pos_list[0] pos.update(label) else: pos = self.new_pos(label) pos.get_gain() poss.append(pos) self.positions.clear() self.positions.extend(poss) logger.debug("%d positions update" % len(poss)) return self.positions
check all positions
def add(self, callback_type, callback): with self.lock: self.callbacks[callback_type].append(callback)
Add a new listener
def addItemTag(self, item, tag): if self.inItemTagTransaction: if not tag in self.addTagBacklog: self.addTagBacklog[tag] = [] self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id}) return "OK" else: return self._modifyItemTag(item.id, 'a', tag)
Add a tag to an individal item. tag string must be in form "user/-/label/[tag]"
def authorization_error_class(response): message = response.headers.get("www-authenticate") if message: error = message.replace('"', "").rsplit("=", 1)[1] else: error = response.status_code return _auth_error_mapping[error](response)
Return an exception instance that maps to the OAuth Error. :param response: The HTTP response containing a www-authenticate error.
def list_user_permissions(name, runas=None): if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'list_user_permissions', name, '-q'], reset_system_locale=False, runas=runas, python_shell=False) return _output_to_dict(res)
List permissions for a user via rabbitmqctl list_user_permissions CLI Example: .. code-block:: bash salt '*' rabbitmq.list_user_permissions user
def _DoubleDecoder(): local_unpack = struct.unpack def InnerDecode(buffer, pos): new_pos = pos + 8 double_bytes = buffer[pos:new_pos] if ((double_bytes[7:8] in b'\x7F\xFF') and (double_bytes[6:7] >= b'\xF0') and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')): return (_NAN, new_pos) result = local_unpack('<d', double_bytes)[0] return (result, new_pos) return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
Returns a decoder for a double field. This code works around a bug in struct.unpack for not-a-number.
def is_allowed_view(perm): for view in ACL_EXCLUDED_VIEWS: module, separator, view_name = view.partition('*') if view and perm.startswith(module): return False for view in ACL_ALLOWED_VIEWS: module, separator, view_name = view.partition('*') if separator and not module and not view_name: return True elif separator and module and perm.startswith(module): return True elif separator and view_name and perm.endswith(view_name): return True elif not separator and view == perm: return True return False
Check if permission is in acl list.
def state_machine_selection_changed(self, state_machine_m, signal_name, signal_msg): if self.CORE_ELEMENT_CLASS in signal_msg.arg.affected_core_element_classes: self.update_selection_sm_prior()
Notify tree view about state machine selection
def check_origin(self, origin: str) -> bool: parsed_origin = urlparse(origin) origin = parsed_origin.netloc origin = origin.lower() host = self.request.headers.get("Host") return origin == host
Override to enable support for allowing alternate origins. The ``origin`` argument is the value of the ``Origin`` HTTP header, the url responsible for initiating this request. This method is not called for clients that do not send this header; such requests are always allowed (because all browsers that implement WebSockets support this header, and non-browser clients do not have the same cross-site security concerns). Should return ``True`` to accept the request or ``False`` to reject it. By default, rejects all requests with an origin on a host other than this one. This is a security protection against cross site scripting attacks on browsers, since WebSockets are allowed to bypass the usual same-origin policies and don't use CORS headers. .. warning:: This is an important security measure; don't disable it without understanding the security implications. In particular, if your authentication is cookie-based, you must either restrict the origins allowed by ``check_origin()`` or implement your own XSRF-like protection for websocket connections. See `these <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_ `articles <https://devcenter.heroku.com/articles/websocket-security>`_ for more. To accept all cross-origin traffic (which was the default prior to Tornado 4.0), simply override this method to always return ``True``:: def check_origin(self, origin): return True To allow connections from any subdomain of your site, you might do something like:: def check_origin(self, origin): parsed_origin = urllib.parse.urlparse(origin) return parsed_origin.netloc.endswith(".mydomain.com") .. versionadded:: 4.0
def save_signal(self,filename=None): if filename is None: filename = os.path.join(self.folder,'trsig.pkl') self.trsig.save(filename)
Saves TransitSignal. Calls :func:`TransitSignal.save`; default filename is ``trsig.pkl`` in ``self.folder``.
def activateAaPdpContextReject(ProtocolConfigurationOptions_presence=0): a = TpPd(pd=0x8) b = MessageType(mesType=0x52) c = SmCause() packet = a / b / c if ProtocolConfigurationOptions_presence is 1: d = ProtocolConfigurationOptions(ieiPCO=0x27) packet = packet / d return packet
ACTIVATE AA PDP CONTEXT REJECT Section 9.5.12
def get_userinfo(self, access_token, id_token, payload): user_response = requests.get( self.OIDC_OP_USER_ENDPOINT, headers={ 'Authorization': 'Bearer {0}'.format(access_token) }, verify=self.get_settings('OIDC_VERIFY_SSL', True)) user_response.raise_for_status() return user_response.json()
Return user details dictionary. The id_token and payload are not used in the default implementation, but may be used when overriding this method