positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def getNode(self, name, **context): """Return tree node found by name""" if name == self.name: return self else: return self.getBranch(name, **context).getNode(name, **context)
Return tree node found by name
def check_and_generate_next_task(self): """ check if the previous task is done and proceed to fire another task :return: """ # if self.task_future is None: # # there's no any ongoing task # self._update_status(False) # self.generate_next_task() # return if self.task_future is None or self.task_future.done(): task_success = False task_result = self.get_task_result(self.task_future) self.task_future = None if task_result is not None and task_result.get_exception() is None: task_success = True if isinstance(task_result, InitTaskResult): # maintain check points assert self.consumer_status == ConsumerStatus.INITIALIZING, \ ClientWorkerException("get init task result, but status is: " + str(self.consumer_status)) init_result = task_result self.next_fetch_cursor = init_result.get_cursor() self.fetch_end_cursor = init_result.end_cursor self.checkpoint_tracker.set_memory_check_point(self.next_fetch_cursor) if init_result.is_cursor_persistent(): self.checkpoint_tracker.set_persistent_check_point(self.next_fetch_cursor) elif isinstance(task_result, ProcessTaskResult): # maintain check points process_task_result = task_result roll_back_checkpoint = process_task_result.get_rollback_check_point() if roll_back_checkpoint: self.last_fetch_log_group = None self.logger.info("user defined to roll-back check-point, cancel current fetching task") self.cancel_current_fetch() self.next_fetch_cursor = roll_back_checkpoint # log task status self._sample_log_error(task_result) # update status basing on task results self._update_status(task_success) # self._generate_next_task()
check if the previous task is done and proceed to fire another task :return:
def get_active_topics_count(last_seen_timestamp=None): """ Returns count of new topics since last visit, or one day. {% get_active_topics_count as active_topic_count %} """ if not last_seen_timestamp: last_seen_timestamp = yesterday_timestamp() return Topic.objects.filter(modified_int__gt=last_seen_timestamp).count()
Returns count of new topics since last visit, or one day. {% get_active_topics_count as active_topic_count %}
def find_minimum_spanning_tree(graph): """Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph. """ mst = [] if graph.num_nodes() == 0: return mst if graph.num_edges() == 0: return mst connected_components = get_connected_components(graph) if len(connected_components) > 1: raise DisconnectedGraphError edge_list = kruskal_mst(graph) return edge_list
Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph.
def get_anki_phrases(lang='english', limit=None): """ Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."] """ lang = lang.strip().lower()[:3] lang = LANG2ANKI[lang[:2]] if lang not in ANKI_LANGUAGES else lang if lang[:2] == 'en': return get_anki_phrases_english(limit=limit) return sorted(get_data(lang).iloc[:, -1].str.strip().values)
Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."]
def print_port(self, stream=sys.stdout): """ Print port this EPC server runs on. As Emacs client reads port number from STDOUT, you need to call this just before calling :meth:`serve_forever`. :type stream: text stream :arg stream: A stream object to write port on. Default is :data:`sys.stdout`. """ stream.write(str(self.server_address[1])) stream.write("\n") stream.flush()
Print port this EPC server runs on. As Emacs client reads port number from STDOUT, you need to call this just before calling :meth:`serve_forever`. :type stream: text stream :arg stream: A stream object to write port on. Default is :data:`sys.stdout`.
def mechanism(self, mechanism): """ Sets the mechanism of this DeviceData. The ID of the channel used to communicate with the device. :param mechanism: The mechanism of this DeviceData. :type: str """ allowed_values = ["connector", "direct"] if mechanism not in allowed_values: raise ValueError( "Invalid value for `mechanism` ({0}), must be one of {1}" .format(mechanism, allowed_values) ) self._mechanism = mechanism
Sets the mechanism of this DeviceData. The ID of the channel used to communicate with the device. :param mechanism: The mechanism of this DeviceData. :type: str
def default_profiler(f, _type, _value): ''' inspects an input frame and pretty prints the following: <src-path>:<src-line> -> <function-name> <source-code> <local-variables> ---------------------------------------- ''' try: profile_print( '\n'.join([ get_frame_src(f), get_locals(f), '----------------------------------------' ]) ) except: pass
inspects an input frame and pretty prints the following: <src-path>:<src-line> -> <function-name> <source-code> <local-variables> ----------------------------------------
def _has_app(self, app, webpage): """ Determine whether the web page matches the app signature. """ # Search the easiest things first and save the full-text search of the # HTML for last for regex in app['url']: if regex.search(webpage.url): return True for name, regex in app['headers'].items(): if name in webpage.headers: content = webpage.headers[name] if regex.search(content): return True for regex in app['script']: for script in webpage.scripts: if regex.search(script): return True for name, regex in app['meta'].items(): if name in webpage.meta: content = webpage.meta[name] if regex.search(content): return True for regex in app['html']: if regex.search(webpage.html): return True
Determine whether the web page matches the app signature.
def get_cutout(self, clearance=0): " get the cutout for the shaft" return cq.Workplane('XY', origin=(0, 0, 0)) \ .circle((self.diam / 2) + clearance) \ .extrude(10)
get the cutout for the shaft
def _unwrap_func(cls, decorated_func): ''' This unwraps a decorated func, returning the inner wrapped func. This may become unnecessary with Python 3.4's inspect.unwrap(). ''' if click is not None: # Workaround for click.command() decorator not setting # __wrapped__ if isinstance(decorated_func, click.Command): return cls._unwrap_func(decorated_func.callback) if hasattr(decorated_func, '__wrapped__'): # Recursion: unwrap more if needed return cls._unwrap_func(decorated_func.__wrapped__) else: # decorated_func isn't actually decorated, no more # unwrapping to do return decorated_func
This unwraps a decorated func, returning the inner wrapped func. This may become unnecessary with Python 3.4's inspect.unwrap().
def add_user(self, username, password, full_name=None, trusted=False, readonly=False): """ Add user to SQLite database. * `username` [string] Username of new user. * `password` [string] Password of new user. * `full_name` [string] Full name of new user. * `trusted` [boolean] Whether the new user should be trusted or not. * `readonly` [boolean] Whether the new user can only read or not """ # generate salt char_set = string.ascii_letters + string.digits salt = ''.join(random.choice(char_set) for x in range(8)) sql = '''INSERT INTO user (username, pwd_salt, pwd_hash, full_name, trusted, readonly) VALUES (?, ?, ?, ?, ?, ?)''' try: self._db_curs.execute(sql, (username, salt, self._gen_hash(password, salt), full_name, trusted or False, readonly or False)) self._db_conn.commit() except (sqlite3.OperationalError, sqlite3.IntegrityError) as error: raise AuthError(error)
Add user to SQLite database. * `username` [string] Username of new user. * `password` [string] Password of new user. * `full_name` [string] Full name of new user. * `trusted` [boolean] Whether the new user should be trusted or not. * `readonly` [boolean] Whether the new user can only read or not
def evtx_chunk_xml_view(chunk): """ Generate XML representations of the records in an EVTX chunk. Does not include the XML <?xml... header. Records are ordered by chunk.records() Args: chunk (Evtx.Chunk): the chunk to render. Yields: tuple[str, Evtx.Record]: the rendered XML document and the raw record. """ for record in chunk.records(): record_str = evtx_record_xml_view(record) yield record_str, record
Generate XML representations of the records in an EVTX chunk. Does not include the XML <?xml... header. Records are ordered by chunk.records() Args: chunk (Evtx.Chunk): the chunk to render. Yields: tuple[str, Evtx.Record]: the rendered XML document and the raw record.
def get(self, attr, default=None): """Get an attribute defined by this session""" attrs = self.body.get('attributes') or {} return attrs.get(attr, default)
Get an attribute defined by this session
def rollout(policy, env, timestep_limit=None, add_noise=False, offset=0): """Do a rollout. If add_noise is True, the rollout will take noisy actions with noise drawn from that stream. Otherwise, no action noise will be added. Parameters ---------- policy: tf object policy from which to draw actions env: GymEnv environment from which to draw rewards, done, and next state timestep_limit: int, optional steps after which to end the rollout add_noise: bool, optional indicates whether exploratory action noise should be added offset: int, optional value to subtract from the reward. For example, survival bonus from humanoid """ env_timestep_limit = env.spec.max_episode_steps timestep_limit = (env_timestep_limit if timestep_limit is None else min( timestep_limit, env_timestep_limit)) rews = [] t = 0 observation = env.reset() for _ in range(timestep_limit or 999999): ac = policy.compute(observation, add_noise=add_noise, update=True)[0] observation, rew, done, _ = env.step(ac) rew -= np.abs(offset) rews.append(rew) t += 1 if done: break rews = np.array(rews, dtype=np.float32) return rews, t
Do a rollout. If add_noise is True, the rollout will take noisy actions with noise drawn from that stream. Otherwise, no action noise will be added. Parameters ---------- policy: tf object policy from which to draw actions env: GymEnv environment from which to draw rewards, done, and next state timestep_limit: int, optional steps after which to end the rollout add_noise: bool, optional indicates whether exploratory action noise should be added offset: int, optional value to subtract from the reward. For example, survival bonus from humanoid
def sort_sam(sam, sort): """ sort sam file """ tempdir = '%s/' % (os.path.abspath(sam).rsplit('/', 1)[0]) if sort is True: mapping = '%s.sorted.sam' % (sam.rsplit('.', 1)[0]) if sam != '-': if os.path.exists(mapping) is False: os.system("\ sort -k1 --buffer-size=%sG -T %s -o %s %s\ " % (sbuffer, tempdir, mapping, sam)) else: mapping = 'stdin-sam.sorted.sam' p = Popen("sort -k1 --buffer-size=%sG -T %s -o %s" \ % (sbuffer, tempdir, mapping), stdin = sys.stdin, shell = True) p.communicate() mapping = open(mapping) else: if sam == '-': mapping = sys.stdin else: mapping = open(sam) return mapping
sort sam file
def y(self): ''' np.array: The grid points in y. ''' if None not in (self.y_min, self.y_max, self.y_step) and \ self.y_min != self.y_max: y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step) else: y = np.array([]) return y
np.array: The grid points in y.
def language_list( maintenance_db, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2016.3.0 Return a list of languages in a database. CLI Example: .. code-block:: bash salt '*' postgres.language_list dbname maintenance_db The database to check user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' ret = {} query = 'SELECT lanname AS "Name" FROM pg_language' rows = psql_query( query, runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) for row in rows: ret[row['Name']] = row['Name'] return ret
.. versionadded:: 2016.3.0 Return a list of languages in a database. CLI Example: .. code-block:: bash salt '*' postgres.language_list dbname maintenance_db The database to check user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of
def get(self, task_id=None, params=None): """ Retrieve information for a particular task. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_ :arg task_id: Return the task with specified id (node_id:task_number) :arg wait_for_completion: Wait for the matching tasks to complete (default: false) :arg timeout: Maximum waiting time for `wait_for_completion` """ return self.transport.perform_request('GET', _make_path('_tasks', task_id), params=params)
Retrieve information for a particular task. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html>`_ :arg task_id: Return the task with specified id (node_id:task_number) :arg wait_for_completion: Wait for the matching tasks to complete (default: false) :arg timeout: Maximum waiting time for `wait_for_completion`
def top_k_accuracy(input:Tensor, targs:Tensor, k:int=5)->Rank0Tensor: "Computes the Top-k accuracy (target is in the top k predictions)." input = input.topk(k=k, dim=-1)[1] targs = targs.unsqueeze(dim=-1).expand_as(input) return (input == targs).max(dim=-1)[0].float().mean()
Computes the Top-k accuracy (target is in the top k predictions).
def set_options(self, **kwargs): """ Set options. @param kwargs: keyword arguments. @see: L{Options} """ p = Unskin(self.options) p.update(kwargs)
Set options. @param kwargs: keyword arguments. @see: L{Options}
def _cancel_callback(self, request_id): """Construct a cancellation callback for the given request ID.""" def callback(future): if future.cancelled(): self.notify(CANCEL_METHOD, {'id': request_id}) future.set_exception(JsonRpcRequestCancelled()) return callback
Construct a cancellation callback for the given request ID.
def check(self, value): """ check if a value is correct according to threshold arguments: value: the value to check """ if self._inclusive: return False if self._min <= value <= self._max else True else: return False if value > self._max or value < self._min else True
check if a value is correct according to threshold arguments: value: the value to check
def imshow(image, auto_subplot=False, **kwargs): """ Displays an image. Parameters ---------- image : :obj:`perception.Image` image to display auto_subplot : bool whether or not to automatically subplot for multi-channel images e.g. rgbd """ if isinstance(image, BinaryImage) or isinstance(image, GrayscaleImage): plt.imshow(image.data, cmap=plt.cm.gray, **kwargs) elif isinstance(image, ColorImage) or isinstance(image, SegmentationImage): plt.imshow(image.data, **kwargs) elif isinstance(image, DepthImage): plt.imshow(image.data, cmap=plt.cm.gray_r, **kwargs) elif isinstance(image, RgbdImage): if auto_subplot: plt.subplot(1,2,1) plt.imshow(image.color.data, **kwargs) plt.axis('off') plt.subplot(1,2,2) plt.imshow(image.depth.data, cmap=plt.cm.gray_r, **kwargs) else: plt.imshow(image.color.data, **kwargs) elif isinstance(image, GdImage): if auto_subplot: plt.subplot(1,2,1) plt.imshow(image.gray.data, cmap=plt.cm.gray, **kwargs) plt.axis('off') plt.subplot(1,2,2) plt.imshow(image.depth.data, cmap=plt.cm.gray_r, **kwargs) else: plt.imshow(image.gray.data, cmap=plt.cm.gray, **kwargs) plt.axis('off')
Displays an image. Parameters ---------- image : :obj:`perception.Image` image to display auto_subplot : bool whether or not to automatically subplot for multi-channel images e.g. rgbd
def compute(self, xt1, yt1, xt, yt, theta1t1, theta2t1, theta1, theta2): """ Accumulate the various inputs. """ dx = xt - xt1 dy = yt - yt1 if self.numPoints < self.maxPoints: self.dxValues[self.numPoints,0] = dx self.dxValues[self.numPoints,1] = dy self.thetaValues[self.numPoints,0] = theta1 self.thetaValues[self.numPoints,1] = theta2 self.numPoints += 1 # print >>sys.stderr, "Xt's: ", xt1, yt1, xt, yt, "Delta's: ", dx, dy # print >>sys.stderr, "Theta t-1: ", theta1t1, theta2t1, "t:",theta1, theta2 elif self.numPoints == self.maxPoints: print >> sys.stderr,"Max points exceeded, analyzing ",self.maxPoints,"points only" self.numPoints += 1
Accumulate the various inputs.
def is_public(self): """``True`` if this is a public key, otherwise ``False``""" return isinstance(self._key, Public) and not isinstance(self._key, Private)
``True`` if this is a public key, otherwise ``False``
def sumaclust_denovo_cluster(seq_path=None, result_path=None, shortest_len=True, similarity=0.97, threads=1, exact=False, HALT_EXEC=False ): """ Function : launch SumaClust de novo OTU picker Parameters: seq_path, filepath to reads; result_path, filepath to output OTU map; shortest_len, boolean; similarity, the similarity threshold (between (0,1]); threads, number of threads to use; exact, boolean to perform exact matching Return : clusters, list of lists """ # Sequence path is mandatory if (seq_path is None or not exists(seq_path)): raise ValueError("Error: FASTA query sequence filepath is " "mandatory input.") # Output directory is mandatory if (result_path is None or not isdir(dirname(realpath(result_path)))): raise ValueError("Error: output directory is mandatory input.") # Instantiate the object sumaclust = Sumaclust(HALT_EXEC=HALT_EXEC) # Set the OTU-map filepath sumaclust.Parameters['-O'].on(result_path) # Set the similarity threshold if similarity is not None: sumaclust.Parameters['-t'].on(similarity) # Set the option to perform exact clustering (default: False) if exact: sumaclust.Parameters['-e'].on() # Turn off option for reference sequence length to be the shortest if not shortest_len: sumaclust.Parameters['-l'].off() # Set the number of threads if threads > 0: sumaclust.Parameters['-p'].on(threads) else: raise ValueError("Number of threads must be positive.") # Launch SumaClust, # set the data string to include the read filepath # (to be passed as final arguments in the sumaclust command) app_result = sumaclust(seq_path) # Put clusters into a list of lists f_otumap = app_result['OtuMap'] clusters = [line.strip().split('\t')[1:] for line in f_otumap] # Return clusters return clusters
Function : launch SumaClust de novo OTU picker Parameters: seq_path, filepath to reads; result_path, filepath to output OTU map; shortest_len, boolean; similarity, the similarity threshold (between (0,1]); threads, number of threads to use; exact, boolean to perform exact matching Return : clusters, list of lists
def distances_indices_sorted(self, points, sign=False): """ Computes the distances from the plane to each of the points. Positive distances are on the side of the normal of the plane while negative distances are on the other side. Indices sorting the points from closest to furthest is also computed. :param points: Points for which distances are computed :param sign: Whether to add sign information in the indices sorting the points distances :return: Distances from the plane to the points (positive values on the side of the normal to the plane, negative values on the other side), as well as indices of the points from closest to furthest. For the latter, when the sign parameter is True, items of the sorting list are given as tuples of (index, sign). """ distances = [np.dot(self.normal_vector, pp) + self.d for pp in points] indices = sorted(range(len(distances)), key=lambda k: np.abs(distances[k])) if sign: indices = [(ii, int(np.sign(distances[ii]))) for ii in indices] return distances, indices
Computes the distances from the plane to each of the points. Positive distances are on the side of the normal of the plane while negative distances are on the other side. Indices sorting the points from closest to furthest is also computed. :param points: Points for which distances are computed :param sign: Whether to add sign information in the indices sorting the points distances :return: Distances from the plane to the points (positive values on the side of the normal to the plane, negative values on the other side), as well as indices of the points from closest to furthest. For the latter, when the sign parameter is True, items of the sorting list are given as tuples of (index, sign).
def MultiWritePathHistory(self, client_path_histories): """Writes a collection of hash and stat entries observed for given paths.""" for client_path, client_path_history in iteritems(client_path_histories): if client_path.client_id not in self.metadatas: raise db.UnknownClientError(client_path.client_id) path_info = rdf_objects.PathInfo( path_type=client_path.path_type, components=client_path.components) for timestamp, stat_entry in iteritems(client_path_history.stat_entries): path_record = self._GetPathRecord( client_path.client_id, path_info, set_default=False) if path_record is None: # TODO(hanuszczak): Provide more details about paths that caused that. raise db.AtLeastOneUnknownPathError([]) path_record.AddStatEntry(stat_entry, timestamp) for timestamp, hash_entry in iteritems(client_path_history.hash_entries): path_record = self._GetPathRecord( client_path.client_id, path_info, set_default=False) if path_record is None: # TODO(hanuszczak): Provide more details about paths that caused that. raise db.AtLeastOneUnknownPathError([]) path_record.AddHashEntry(hash_entry, timestamp)
Writes a collection of hash and stat entries observed for given paths.
def selectisinstance(table, field, value, complement=False): """Select rows where the given field is an instance of the given type.""" return selectop(table, field, value, isinstance, complement=complement)
Select rows where the given field is an instance of the given type.
def get_role_config_group(self, name): """ Get a role configuration group in the service by name. @param name: The name of the role config group. @return: An ApiRoleConfigGroup object. @since: API v3 """ return role_config_groups.get_role_config_group( self._get_resource_root(), self.name, name, self._get_cluster_name())
Get a role configuration group in the service by name. @param name: The name of the role config group. @return: An ApiRoleConfigGroup object. @since: API v3
def refresh_token(self, client_id, client_secret, refresh_token, grant_type='refresh_token'): """Calls oauth/token endpoint with refresh token grant type Use this endpoint to refresh an access token, using the refresh token you got during authorization. Args: grant_type (str): Denotes the flow you're using. For refresh token use refresh_token client_id (str): your application's client Id client_secret (str): your application's client Secret refresh_token (str): The refresh token returned from the initial token request. Returns: access_token, id_token """ return self.post( 'https://{}/oauth/token'.format(self.domain), data={ 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': grant_type }, headers={'Content-Type': 'application/json'} )
Calls oauth/token endpoint with refresh token grant type Use this endpoint to refresh an access token, using the refresh token you got during authorization. Args: grant_type (str): Denotes the flow you're using. For refresh token use refresh_token client_id (str): your application's client Id client_secret (str): your application's client Secret refresh_token (str): The refresh token returned from the initial token request. Returns: access_token, id_token
def effective_n(mcmc): """ Args: mcmc (MCMCResults): Pre-sliced MCMC samples to compute diagnostics for. """ if mcmc.n_chains < 2: raise ValueError( 'Calculation of effective sample size requires multiple chains ' 'of the same length.') def get_neff(x): """Compute the effective sample size for a 2D array.""" trace_value = x.T nchain, n_samples = trace_value.shape acov = np.asarray([autocov(trace_value[chain]) for chain in range(nchain)]) chain_mean = trace_value.mean(axis=1) chain_var = acov[:, 0] * n_samples / (n_samples - 1.) acov_t = acov[:, 1] * n_samples / (n_samples - 1.) mean_var = np.mean(chain_var) var_plus = mean_var * (n_samples - 1.) / n_samples var_plus += np.var(chain_mean, ddof=1) rho_hat_t = np.zeros(n_samples) rho_hat_even = 1. rho_hat_t[0] = rho_hat_even rho_hat_odd = 1. - (mean_var - np.mean(acov_t)) / var_plus rho_hat_t[1] = rho_hat_odd # Geyer's initial positive sequence max_t = 1 t = 1 while t < (n_samples - 2) and (rho_hat_even + rho_hat_odd) >= 0.: rho_hat_even = 1. - (mean_var - np.mean(acov[:, t + 1])) / var_plus rho_hat_odd = 1. - (mean_var - np.mean(acov[:, t + 2])) / var_plus if (rho_hat_even + rho_hat_odd) >= 0: rho_hat_t[t + 1] = rho_hat_even rho_hat_t[t + 2] = rho_hat_odd max_t = t + 2 t += 2 # Geyer's initial monotone sequence t = 3 while t <= max_t - 2: if (rho_hat_t[t + 1] + rho_hat_t[t + 2]) > (rho_hat_t[t - 1] + rho_hat_t[t]): rho_hat_t[t + 1] = (rho_hat_t[t - 1] + rho_hat_t[t]) / 2. rho_hat_t[t + 2] = rho_hat_t[t + 1] t += 2 ess = nchain * n_samples ess = ess / (-1. + 2. * np.sum(rho_hat_t)) return ess nvar = mcmc.data.shape[-1] n_eff = [get_neff(mcmc.data[:, :, i]) for i in range(nvar)] return pd.DataFrame({'effective_n': n_eff}, index=mcmc.levels)
Args: mcmc (MCMCResults): Pre-sliced MCMC samples to compute diagnostics for.
def delete(self, id): """ delete a time entry. """ path = partial(_path, self.adapter) path = path(id) return self._delete(path)
delete a time entry.
def copy_workspace(self, uri, new_name): ''' Copy the current workspace. Args: - uri (dict): the uri of the workspace being copied. Needs to have a did and wid key. - new_name (str): the new name of the copied workspace. Returns: - requests.Response: Onshape response data ''' payload = { 'isPublic': True, 'newName': new_name } return self._api.request('post', '/api/documents/' + uri['did'] + '/workspaces/' + uri['wvm'] + '/copy', body=payload)
Copy the current workspace. Args: - uri (dict): the uri of the workspace being copied. Needs to have a did and wid key. - new_name (str): the new name of the copied workspace. Returns: - requests.Response: Onshape response data
def Tensors(self, run, tag): """Retrieve the tensor events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.TensorEvent`s. """ accumulator = self.GetAccumulator(run) return accumulator.Tensors(tag)
Retrieve the tensor events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.TensorEvent`s.
def get(self, request, *args, **kwargs): """ Retrieve list of nodes of the specified layer """ self.get_layer() # get nodes of layer nodes = self.get_nodes(request, *args, **kwargs) return Response(nodes)
Retrieve list of nodes of the specified layer
def as_list(callable): """Convert a scalar validator in a list validator""" @wraps(callable) def wrapper(value_iter): return [callable(value) for value in value_iter] return wrapper
Convert a scalar validator in a list validator
def GetRelativePath(self, path_spec): """Returns the relative path based on a resolved path specification. The relative path is the location of the upper most path specification. The the location of the mount point is stripped off if relevant. Args: path_spec (PathSpec): path specification. Returns: str: corresponding relative path or None if the relative path could not be determined. Raises: PathSpecError: if the path specification is incorrect. """ location = getattr(path_spec, 'location', None) if location is None: raise errors.PathSpecError('Path specification missing location.') if path_spec_factory.Factory.IsSystemLevelTypeIndicator( self._file_system.type_indicator): if not location.startswith(self._mount_point.location): raise errors.PathSpecError( 'Path specification does not contain mount point.') else: if not hasattr(path_spec, 'parent'): raise errors.PathSpecError('Path specification missing parent.') if path_spec.parent != self._mount_point: raise errors.PathSpecError( 'Path specification does not contain mount point.') path_segments = self._file_system.SplitPath(location) if path_spec_factory.Factory.IsSystemLevelTypeIndicator( self._file_system.type_indicator): mount_point_path_segments = self._file_system.SplitPath( self._mount_point.location) path_segments = path_segments[len(mount_point_path_segments):] return '{0:s}{1:s}'.format( self._file_system.PATH_SEPARATOR, self._file_system.PATH_SEPARATOR.join(path_segments))
Returns the relative path based on a resolved path specification. The relative path is the location of the upper most path specification. The the location of the mount point is stripped off if relevant. Args: path_spec (PathSpec): path specification. Returns: str: corresponding relative path or None if the relative path could not be determined. Raises: PathSpecError: if the path specification is incorrect.
def monitor_session_span_command_dest_tengigabitethernet(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span") session = ET.SubElement(monitor, "session") session_number_key = ET.SubElement(session, "session-number") session_number_key.text = kwargs.pop('session_number') span_command = ET.SubElement(session, "span-command") dest_tengigabitethernet = ET.SubElement(span_command, "dest-tengigabitethernet") dest_tengigabitethernet.text = kwargs.pop('dest_tengigabitethernet') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def volume_montecarlo(found_d, missed_d, found_mchirp, missed_mchirp, distribution_param, distribution, limits_param, min_param=None, max_param=None): """ Compute sensitive volume and standard error via direct Monte Carlo integral Injections should be made over a range of distances such that sensitive volume due to signals closer than D_min is negligible, and efficiency at distances above D_max is negligible TODO : Replace this function by Collin's formula given in Usman et al .. ? OR get that coded as a new function? Parameters ----------- found_d: numpy.ndarray The distances of found injections missed_d: numpy.ndarray The distances of missed injections found_mchirp: numpy.ndarray Chirp mass of found injections missed_mchirp: numpy.ndarray Chirp mass of missed injections distribution_param: string Parameter D of the injections used to generate a distribution over distance, may be 'distance', 'chirp_distance'. distribution: string form of the distribution over the parameter, may be 'log' (uniform in log D) 'uniform' (uniform in D) 'distancesquared' (uniform in D**2) 'volume' (uniform in D***3) limits_param: string Parameter Dlim specifying limits inside which injections were made may be 'distance', 'chirp distance' min_param: float minimum value of Dlim at which injections were made; only used for log distribution, then if None the minimum actually injected value will be used max_param: float maximum value of Dlim out to which injections were made; if None the maximum actually injected value will be used Returns -------- volume: float Volume estimate volume_error: float The standard error in the volume """ d_power = { 'log' : 3., 'uniform' : 2., 'distancesquared' : 1., 'volume' : 0. }[distribution] mchirp_power = { 'log' : 0., 'uniform' : 5. / 6., 'distancesquared' : 5. / 3., 'volume' : 15. / 6. }[distribution] # establish maximum physical distance: first for chirp distance distribution if limits_param == 'chirp_distance': mchirp_standard_bns = 1.4 * 2.**(-1. / 5.) all_mchirp = numpy.concatenate((found_mchirp, missed_mchirp)) max_mchirp = all_mchirp.max() if max_param is not None: # use largest actually injected mchirp for conversion max_distance = max_param * \ (max_mchirp / mchirp_standard_bns)**(5. / 6.) else: max_distance = max(found_d.max(), missed_d.max()) elif limits_param == 'distance': if max_param is not None: max_distance = max_param else: # if no max distance given, use max distance actually injected max_distance = max(found_d.max(), missed_d.max()) else: raise NotImplementedError("%s is not a recognized parameter" % limits_param) # volume of sphere montecarlo_vtot = (4. / 3.) * numpy.pi * max_distance**3. # arrays of weights for the MC integral if distribution_param == 'distance': found_weights = found_d ** d_power missed_weights = missed_d ** d_power elif distribution_param == 'chirp_distance': # weight by a power of mchirp to rescale injection density to the # target mass distribution found_weights = found_d ** d_power * \ found_mchirp ** mchirp_power missed_weights = missed_d ** d_power * \ missed_mchirp ** mchirp_power else: raise NotImplementedError("%s is not a recognized distance parameter" % distribution_param) all_weights = numpy.concatenate((found_weights, missed_weights)) # measured weighted efficiency is w_i for a found inj and 0 for missed # MC integral is volume of sphere * (sum of found weights)/(sum of all weights) # over injections covering the sphere mc_weight_samples = numpy.concatenate((found_weights, 0 * missed_weights)) mc_sum = sum(mc_weight_samples) if limits_param == 'distance': mc_norm = sum(all_weights) elif limits_param == 'chirp_distance': # if injections are made up to a maximum chirp distance, account for # extra missed injections that would occur when injecting up to # maximum physical distance : this works out to a 'chirp volume' factor mc_norm = sum(all_weights * (max_mchirp / all_mchirp) ** (5. / 2.)) # take out a constant factor mc_prefactor = montecarlo_vtot / mc_norm # count the samples if limits_param == 'distance': Ninj = len(mc_weight_samples) elif limits_param == 'chirp_distance': # find the total expected number after extending from maximum chirp # dist up to maximum physical distance if distribution == 'log': # only need minimum distance in this one case if min_param is not None: min_distance = min_param * \ (numpy.min(all_mchirp) / mchirp_standard_bns) ** (5. / 6.) else: min_distance = min(numpy.min(found_d), numpy.min(missed_d)) logrange = numpy.log(max_distance / min_distance) Ninj = len(mc_weight_samples) + (5. / 6.) * \ sum(numpy.log(max_mchirp / all_mchirp) / logrange) else: Ninj = sum((max_mchirp / all_mchirp) ** mchirp_power) # sample variance of efficiency: mean of the square - square of the mean mc_sample_variance = sum(mc_weight_samples ** 2.) / Ninj - \ (mc_sum / Ninj) ** 2. # return MC integral and its standard deviation; variance of mc_sum scales # relative to sample variance by Ninj (Bienayme' rule) vol = mc_prefactor * mc_sum vol_err = mc_prefactor * (Ninj * mc_sample_variance) ** 0.5 return vol, vol_err
Compute sensitive volume and standard error via direct Monte Carlo integral Injections should be made over a range of distances such that sensitive volume due to signals closer than D_min is negligible, and efficiency at distances above D_max is negligible TODO : Replace this function by Collin's formula given in Usman et al .. ? OR get that coded as a new function? Parameters ----------- found_d: numpy.ndarray The distances of found injections missed_d: numpy.ndarray The distances of missed injections found_mchirp: numpy.ndarray Chirp mass of found injections missed_mchirp: numpy.ndarray Chirp mass of missed injections distribution_param: string Parameter D of the injections used to generate a distribution over distance, may be 'distance', 'chirp_distance'. distribution: string form of the distribution over the parameter, may be 'log' (uniform in log D) 'uniform' (uniform in D) 'distancesquared' (uniform in D**2) 'volume' (uniform in D***3) limits_param: string Parameter Dlim specifying limits inside which injections were made may be 'distance', 'chirp distance' min_param: float minimum value of Dlim at which injections were made; only used for log distribution, then if None the minimum actually injected value will be used max_param: float maximum value of Dlim out to which injections were made; if None the maximum actually injected value will be used Returns -------- volume: float Volume estimate volume_error: float The standard error in the volume
def montage(data, ref_chan=None, ref_to_avg=False, bipolar=None, method='average'): """Apply linear transformation to the channels. Parameters ---------- data : instance of DataRaw the data to filter ref_chan : list of str list of channels used as reference ref_to_avg : bool if re-reference to average or not bipolar : float distance in mm to consider two channels as neighbors and then compute the bipolar montage between them. method : str 'average' or 'regression'. 'average' takes the average across the channels selected as reference (it can be all) and subtract it from each channel. 'regression' keeps the residuals after regressing out the mean across channels. Returns ------- filtered_data : instance of DataRaw filtered data Notes ----- If you don't change anything, it returns the same instance of data. """ if ref_to_avg and ref_chan is not None: raise TypeError('You cannot specify reference to the average and ' 'the channels to use as reference') if ref_chan is not None: if (not isinstance(ref_chan, (list, tuple)) or not all(isinstance(x, str) for x in ref_chan)): raise TypeError('chan should be a list of strings') if ref_chan is None: ref_chan = [] # TODO: check bool for ref_chan if bipolar: if not data.attr['chan']: raise ValueError('Data should have Chan information in attr') _assert_equal_channels(data.axis['chan']) chan_in_data = data.axis['chan'][0] chan = data.attr['chan'] chan = chan(lambda x: x.label in chan_in_data) chan, trans = create_bipolar_chan(chan, bipolar) data.attr['chan'] = chan if ref_to_avg or ref_chan or bipolar: mdata = data._copy() idx_chan = mdata.index_of('chan') for i in range(mdata.number_of('trial')): if ref_to_avg or ref_chan: if ref_to_avg: ref_chan = data.axis['chan'][i] ref_data = data(trial=i, chan=ref_chan) if method == 'average': mdata.data[i] = (data(trial=i) - mean(ref_data, axis=idx_chan)) elif method == 'regression': mdata.data[i] = compute_average_regress(data(trial=i), idx_chan) elif bipolar: if not data.index_of('chan') == 0: raise ValueError('For matrix multiplication to work, ' 'the first dimension should be chan') mdata.data[i] = dot(trans, data(trial=i)) mdata.axis['chan'][i] = asarray(chan.return_label(), dtype='U') else: mdata = data return mdata
Apply linear transformation to the channels. Parameters ---------- data : instance of DataRaw the data to filter ref_chan : list of str list of channels used as reference ref_to_avg : bool if re-reference to average or not bipolar : float distance in mm to consider two channels as neighbors and then compute the bipolar montage between them. method : str 'average' or 'regression'. 'average' takes the average across the channels selected as reference (it can be all) and subtract it from each channel. 'regression' keeps the residuals after regressing out the mean across channels. Returns ------- filtered_data : instance of DataRaw filtered data Notes ----- If you don't change anything, it returns the same instance of data.
def cast_problem(problem): """ Casts problem object with known interface as OptProblem. Parameters ---------- problem : Object """ # Optproblem if isinstance(problem,OptProblem): return problem # Other else: # Type Base if (not hasattr(problem,'G') or (problem.G.shape[0] == problem.G.shape[1] and problem.G.shape[0] == problem.G.nnz and np.all(problem.G.row == problem.G.col) and np.all(problem.G.data == 1.))): return create_problem_from_type_base(problem) # Type A else: return create_problem_from_type_A(problem)
Casts problem object with known interface as OptProblem. Parameters ---------- problem : Object
def iteritems(self): """ Wow this class is messed up. I had to overwrite items when moving to python3, just because I haden't called it yet """ for (key, val) in six.iteritems(self.__dict__): if key in self._printable_exclude: continue yield (key, val)
Wow this class is messed up. I had to overwrite items when moving to python3, just because I haden't called it yet
def retro_schema(schema): """ CONVERT SCHEMA FROM 5.x to 1.x :param schema: :return: """ output = wrap({ "mappings":{ typename: { "dynamic_templates": [ retro_dynamic_template(*(t.items()[0])) for t in details.dynamic_templates ], "properties": retro_properties(details.properties) } for typename, details in schema.mappings.items() }, "settings": schema.settings }) return output
CONVERT SCHEMA FROM 5.x to 1.x :param schema: :return:
def file_upload(self, local_path, remote_path, l_st): """Upload local_path to remote_path and set permission and mtime.""" self.sftp.put(local_path, remote_path) self._match_modes(remote_path, l_st)
Upload local_path to remote_path and set permission and mtime.
def _add_channel(self, chn, color_min, color_max): """Adds a channel to the image object """ if isinstance(chn, np.ma.core.MaskedArray): chn_data = chn.data chn_mask = chn.mask else: chn_data = np.array(chn) chn_mask = False scaled = ((chn_data - color_min) * 1.0 / (color_max - color_min)) self.channels.append(np.ma.array(scaled, mask=chn_mask))
Adds a channel to the image object
def get_selection(self): """ Read text from the X selection Usage: C{clipboard.get_selection()} @return: text contents of the mouse selection @rtype: C{str} @raise Exception: if no text was found in the selection """ Gdk.threads_enter() text = self.selection.wait_for_text() Gdk.threads_leave() if text is not None: return text else: raise Exception("No text found in X selection")
Read text from the X selection Usage: C{clipboard.get_selection()} @return: text contents of the mouse selection @rtype: C{str} @raise Exception: if no text was found in the selection
def mounts(): """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" with open('/proc/mounts') as f: # [['/mount/point','/dev/path'],[...]] system_mounts = [m[1::-1] for m in [l.strip().split() for l in f.readlines()]] return system_mounts
Get a list of all mounted volumes as [[mountpoint,device],[...]]
def read(self, file_path: str) -> Iterable[Instance]: """ Returns an ``Iterable`` containing all the instances in the specified dataset. If ``self.lazy`` is False, this calls ``self._read()``, ensures that the result is a list, then returns the resulting list. If ``self.lazy`` is True, this returns an object whose ``__iter__`` method calls ``self._read()`` each iteration. In this case your implementation of ``_read()`` must also be lazy (that is, not load all instances into memory at once), otherwise you will get a ``ConfigurationError``. In either case, the returned ``Iterable`` can be iterated over multiple times. It's unlikely you want to override this function, but if you do your result should likewise be repeatedly iterable. """ lazy = getattr(self, 'lazy', None) if lazy is None: logger.warning("DatasetReader.lazy is not set, " "did you forget to call the superclass constructor?") if self._cache_directory: cache_file = self._get_cache_location_for_file_path(file_path) else: cache_file = None if lazy: return _LazyInstances(lambda: self._read(file_path), cache_file, self.deserialize_instance, self.serialize_instance) else: # First we read the instances, either from a cache or from the original file. if cache_file and os.path.exists(cache_file): instances = self._instances_from_cache_file(cache_file) else: instances = self._read(file_path) # Then some validation. if not isinstance(instances, list): instances = [instance for instance in Tqdm.tqdm(instances)] if not instances: raise ConfigurationError("No instances were read from the given filepath {}. " "Is the path correct?".format(file_path)) # And finally we write to the cache if we need to. if cache_file and not os.path.exists(cache_file): logger.info(f"Caching instances to {cache_file}") with open(cache_file, 'w') as cache: for instance in Tqdm.tqdm(instances): cache.write(self.serialize_instance(instance) + '\n') return instances
Returns an ``Iterable`` containing all the instances in the specified dataset. If ``self.lazy`` is False, this calls ``self._read()``, ensures that the result is a list, then returns the resulting list. If ``self.lazy`` is True, this returns an object whose ``__iter__`` method calls ``self._read()`` each iteration. In this case your implementation of ``_read()`` must also be lazy (that is, not load all instances into memory at once), otherwise you will get a ``ConfigurationError``. In either case, the returned ``Iterable`` can be iterated over multiple times. It's unlikely you want to override this function, but if you do your result should likewise be repeatedly iterable.
def make_chunk(chunk_type, chunk_data): """Create a raw chunk by composing chunk type and data. It calculates chunk length and CRC for you. :arg str chunk_type: PNG chunk type. :arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**. :rtype: bytes """ out = struct.pack("!I", len(chunk_data)) chunk_data = chunk_type.encode("latin-1") + chunk_data out += chunk_data + struct.pack("!I", binascii.crc32(chunk_data) & 0xffffffff) return out
Create a raw chunk by composing chunk type and data. It calculates chunk length and CRC for you. :arg str chunk_type: PNG chunk type. :arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**. :rtype: bytes
def get(self, id): # pylint: disable=invalid-name,redefined-builtin """Get a package. :param id: Package ID as an int. :return: :class:`packages.Package <packages.Package>` object :rtype: packages.Package """ schema = PackageSchema() resp = self.service.get_id(self.base, id) return self.service.decode(schema, resp)
Get a package. :param id: Package ID as an int. :return: :class:`packages.Package <packages.Package>` object :rtype: packages.Package
def set_type(self, value): """Setter for type attribute""" if self.action == "remove" and value != "probes": log = "Sources field 'type' when action is remove should always be 'probes'." raise MalFormattedSource(log) self._type = value
Setter for type attribute
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' handle = ct.c_int() if (sys.version_info[0] == 3) and (type(distanceObjectName) is str): distanceObjectName=distanceObjectName.encode('utf-8') return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value
Please have a look at the function description/documentation in the V-REP user manual
def _get_registerd_func(name_or_func): """ get a xcorr function from a str or callable. """ # get the function or register callable if callable(name_or_func): func = register_array_xcorr(name_or_func) else: func = XCOR_FUNCS[name_or_func or 'default'] assert callable(func), 'func is not callable' # ensure func has the added methods if not hasattr(func, 'registered'): func = register_array_xcorr(func) return func
get a xcorr function from a str or callable.
def task(self): """ Find the task for this build. Wraps the getTaskInfo RPC. :returns: deferred that when fired returns the Task object, or None if we could not determine the task for this build. """ # If we have no .task_id, this is a no-op to return None. if not self.task_id: return defer.succeed(None) return self.connection.getTaskInfo(self.task_id)
Find the task for this build. Wraps the getTaskInfo RPC. :returns: deferred that when fired returns the Task object, or None if we could not determine the task for this build.
def _inner_func_anot(func): """must be applied to all inner functions that return contexts. Wraps all instances of pygame.Surface in the input in Surface""" @wraps(func) def new_func(*args): return func(*_lmap(_wrap_surface, args)) return new_func
must be applied to all inner functions that return contexts. Wraps all instances of pygame.Surface in the input in Surface
def events_for_balanceproof( channelidentifiers_to_channels: ChannelMap, transfers_pair: List[MediationPairState], pseudo_random_generator: random.Random, block_number: BlockNumber, secret: Secret, secrethash: SecretHash, ) -> List[Event]: """ While it's safe do the off-chain unlock. """ events: List[Event] = list() for pair in reversed(transfers_pair): payee_knows_secret = pair.payee_state in STATE_SECRET_KNOWN payee_payed = pair.payee_state in STATE_TRANSFER_PAID payee_channel = get_payee_channel(channelidentifiers_to_channels, pair) payee_channel_open = ( payee_channel and channel.get_status(payee_channel) == CHANNEL_STATE_OPENED ) payer_channel = get_payer_channel(channelidentifiers_to_channels, pair) # The mediator must not send to the payee a balance proof if the lock # is in the danger zone, because the payer may not do the same and the # on-chain unlock may fail. If the lock is nearing it's expiration # block, then on-chain unlock should be done, and if successful it can # be unlocked off-chain. is_safe_to_send_balanceproof = False if payer_channel: is_safe_to_send_balanceproof, _ = is_safe_to_wait( pair.payer_transfer.lock.expiration, payer_channel.reveal_timeout, block_number, ) should_send_balanceproof_to_payee = ( payee_channel_open and payee_knows_secret and not payee_payed and is_safe_to_send_balanceproof ) if should_send_balanceproof_to_payee: # At this point we are sure that payee_channel exists due to the # payee_channel_open check above. So let mypy know about this assert payee_channel payee_channel = cast(NettingChannelState, payee_channel) pair.payee_state = 'payee_balance_proof' message_identifier = message_identifier_from_prng(pseudo_random_generator) unlock_lock = channel.send_unlock( channel_state=payee_channel, message_identifier=message_identifier, payment_identifier=pair.payee_transfer.payment_identifier, secret=secret, secrethash=secrethash, ) unlock_success = EventUnlockSuccess( pair.payer_transfer.payment_identifier, pair.payer_transfer.lock.secrethash, ) events.append(unlock_lock) events.append(unlock_success) return events
While it's safe do the off-chain unlock.
def _pfp__width(self): """Return the width of the field (sizeof) """ raw_output = six.BytesIO() output = bitwrap.BitwrappedStream(raw_output) self._pfp__build(output) output.flush() return len(raw_output.getvalue())
Return the width of the field (sizeof)
def drawing_update(self): '''update line drawing''' from MAVProxy.modules.mavproxy_map import mp_slipmap if self.draw_callback is None: return self.draw_line.append(self.click_position) if len(self.draw_line) > 1: self.mpstate.map.add_object(mp_slipmap.SlipPolygon('drawing', self.draw_line, layer='Drawing', linewidth=2, colour=(128,128,255)))
update line drawing
def DEFINE_integer_list(self, name, default, help, constant=False): """A helper for defining lists of integer options.""" self.AddOption( type_info.List( name=name, default=default, description=help, validator=type_info.Integer()), constant=constant)
A helper for defining lists of integer options.
def gaussian_window(t, params): """ Calculates a Gaussian window function in the time domain which will broaden peaks in the frequency domain by params["line_broadening"] Hertz. :param t: :param params: :return: """ window = suspect.basis.gaussian(t, 0, 0, params["line_broadening"]) # the above gaussian function returns an area 1 fid, for a windowing # function we need to be area preserving (first point must be 1) return window / window[0]
Calculates a Gaussian window function in the time domain which will broaden peaks in the frequency domain by params["line_broadening"] Hertz. :param t: :param params: :return:
def merge_groundings(stmts_in): """Gather and merge original grounding information from evidences. Each Statement's evidences are traversed to find original grounding information. These groundings are then merged into an overall consensus grounding dict with as much detail as possible. The current implementation is only applicable to Statements whose concept/agent roles are fixed. Complexes, Associations and Conversions cannot be handled correctly. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose groundings should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with groundings merged at the Statement level. """ def surface_grounding(stmt): # Find the "best" grounding for a given concept and its evidences # and surface that for idx, concept in enumerate(stmt.agent_list()): if concept is None: continue aggregate_groundings = {} for ev in stmt.evidence: if 'agents' in ev.annotations: groundings = ev.annotations['agents']['raw_grounding'][idx] for ns, value in groundings.items(): if ns not in aggregate_groundings: aggregate_groundings[ns] = [] if isinstance(value, list): aggregate_groundings[ns] += value else: aggregate_groundings[ns].append(value) best_groundings = get_best_groundings(aggregate_groundings) concept.db_refs = best_groundings def get_best_groundings(aggregate_groundings): best_groundings = {} for ns, values in aggregate_groundings.items(): # There are 3 possibilities here # 1. All the entries in the list are scored in which case we # get unique entries and sort them by score if all([isinstance(v, (tuple, list)) for v in values]): best_groundings[ns] = [] for unique_value in {v[0] for v in values}: scores = [v[1] for v in values if v[0] == unique_value] best_groundings[ns].append((unique_value, max(scores))) best_groundings[ns] = \ sorted(best_groundings[ns], key=lambda x: x[1], reverse=True) # 2. All the entries in the list are unscored in which case we # get the highest frequency entry elif all([not isinstance(v, (tuple, list)) for v in values]): best_groundings[ns] = max(set(values), key=values.count) # 3. There is a mixture, which can happen when some entries were # mapped with scores and others had no scores to begin with. # In this case, we again pick the highest frequency non-scored # entry assuming that the unmapped version is more reliable. else: unscored_vals = [v for v in values if not isinstance(v, (tuple, list))] best_groundings[ns] = max(set(unscored_vals), key=unscored_vals.count) return best_groundings stmts_out = [] for stmt in stmts_in: if not isinstance(stmt, (Complex, Conversion)): surface_grounding(stmt) stmts_out.append(stmt) return stmts_out
Gather and merge original grounding information from evidences. Each Statement's evidences are traversed to find original grounding information. These groundings are then merged into an overall consensus grounding dict with as much detail as possible. The current implementation is only applicable to Statements whose concept/agent roles are fixed. Complexes, Associations and Conversions cannot be handled correctly. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of INDRA Statements whose groundings should be merged. These Statements are meant to have been preassembled and potentially have multiple pieces of evidence. Returns ------- stmts_out : list[indra.statements.Statement] The list of Statements now with groundings merged at the Statement level.
def network_sub_create_notif(self, tenant_id, tenant_name, cidr): """Network create notification. """ if not self.fw_init: return self.network_create_notif(tenant_id, tenant_name, cidr)
Network create notification.
def block_by_command(cls, command): """Return the block with the given :attr:`command`. Returns None if the block is not found. """ for block in cls.blocks: if block.has_command(command): return block
Return the block with the given :attr:`command`. Returns None if the block is not found.
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extract dictionaries of coefficients specific to required # intensity measure type and for PGA C = self.COEFFS[imt] C_PGA = self.COEFFS[PGA()] # compute median pga on rock (vs30=1100), needed for site response # term calculation # For spectral accelerations at periods between 0.0 and 0.25 s, Sa (T) # cannot be less than PGA on soil, therefore if the IMT is in this # period range it is necessary to calculate PGA on soil if imt.name == 'SA' and imt.period > 0.0 and imt.period < 0.25: get_pga_site = True else: get_pga_site = False pga1100, pga_site = self._compute_imt1100(C_PGA, sites, rup, dists, get_pga_site) # Get the median ground motion mean = (self._compute_magnitude_term(C, rup.mag) + self._compute_distance_term(C, rup, dists) + self._compute_style_of_faulting_term(C, rup) + self._compute_hanging_wall_term(C, rup, dists) + self._compute_shallow_site_response(C, sites, pga1100) + self._compute_basin_response_term(C, sites.z2pt5)) # If it is necessary to ensure that Sa(T) >= PGA (see previous comment) if get_pga_site: idx = mean < np.log(pga_site) mean[idx] = np.log(pga_site[idx]) stddevs = self._get_stddevs(C, sites, pga1100, C_PGA['s_lny'], stddev_types) return mean, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
def put_user(self, username, body, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html>`_ :arg username: The username of the User :arg body: The user to add :arg refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., valid choices are: 'true', 'false', 'wait_for' """ for param in (username, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path("_security", "user", username), params=params, body=body )
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html>`_ :arg username: The username of the User :arg body: The user to add :arg refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes., valid choices are: 'true', 'false', 'wait_for'
def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases""" cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
Performing the covariance M step for spherical cases
def modprobe(module, persist=True): """Load a kernel module and configure for auto-load on reboot.""" cmd = ['modprobe', module] log('Loading kernel module %s' % module, level=INFO) subprocess.check_call(cmd) if persist: persistent_modprobe(module)
Load a kernel module and configure for auto-load on reboot.
def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt
def split_lists(d, split_keys, new_name='split', check_length=True, deepcopy=True): """split_lists key:list pairs into dicts for each item in the lists NB: will only split if all split_keys are present Parameters ---------- d : dict split_keys : list keys to split new_name : str top level key for split items check_length : bool if true, raise error if any lists are of a different length deepcopy: bool deepcopy values Examples -------- >>> from pprint import pprint >>> d = {'path_key':{'x':[1,2],'y':[3,4],'a':1}} >>> new_d = split_lists(d,['x','y']) >>> pprint(new_d) {'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}} >>> split_lists(d,['x','a']) Traceback (most recent call last): ... ValueError: "a" data at the following path is not a list ('path_key',) >>> d2 = {'path_key':{'x':[1,7],'y':[3,4,5]}} >>> split_lists(d2,['x','y']) Traceback (most recent call last): ... ValueError: lists at the following path do not have the same size ('path_key',) """ # noqa: E501 flattened = flatten2d(d) new_d = {} for key, value in flattened.items(): if set(split_keys).issubset(value.keys()): # combine_d = {} combine_d = [] sub_d = {} length = None for subkey, subvalue in value.items(): if subkey in split_keys: if not isinstance(subvalue, list): raise ValueError( '"{0}" data at the following path is not a list ' '{1}'.format(subkey, key)) if check_length and length is not None: if len(subvalue) != length: raise ValueError( 'lists at the following path ' 'do not have the same size {0}'.format(key)) if length is None: combine_d = [{subkey: v} for v in subvalue] else: for item, val in zip(combine_d, subvalue): item[subkey] = val length = len(subvalue) # new_combine = {k:{subkey:v} # for k,v in enumerate(subvalue)} # combine_d = merge([combine_d,new_combine]) else: sub_d[subkey] = subvalue try: new_d[key] = merge([sub_d, {new_name: combine_d}]) except ValueError: raise ValueError( 'split data key: {0}, already exists at ' 'this level for {1}'.format(new_name, key)) else: new_d[key] = value return unflatten(new_d, deepcopy=deepcopy)
split_lists key:list pairs into dicts for each item in the lists NB: will only split if all split_keys are present Parameters ---------- d : dict split_keys : list keys to split new_name : str top level key for split items check_length : bool if true, raise error if any lists are of a different length deepcopy: bool deepcopy values Examples -------- >>> from pprint import pprint >>> d = {'path_key':{'x':[1,2],'y':[3,4],'a':1}} >>> new_d = split_lists(d,['x','y']) >>> pprint(new_d) {'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}} >>> split_lists(d,['x','a']) Traceback (most recent call last): ... ValueError: "a" data at the following path is not a list ('path_key',) >>> d2 = {'path_key':{'x':[1,7],'y':[3,4,5]}} >>> split_lists(d2,['x','y']) Traceback (most recent call last): ... ValueError: lists at the following path do not have the same size ('path_key',)
def children(self, alias, bank_id): """ URL for getting or setting child relationships for the specified bank :param alias: :param bank_id: :return: """ return self._root + self._safe_alias(alias) + '/child/ids/' + str(bank_id)
URL for getting or setting child relationships for the specified bank :param alias: :param bank_id: :return:
def unlink_from(self, provider): ''' 解绑特定第三方平台 ''' if type(provider) != str: raise TypeError('input should be a string') self.link_with(provider, None) # self._sync_auth_data(provider) return self
解绑特定第三方平台
def EvalGeneric(self, hashers=None): """Causes the entire file to be hashed by the given hash functions. This sets up a 'finger' for fingerprinting, where the entire file is passed through a pre-defined (or user defined) set of hash functions. Args: hashers: An iterable of hash classes (e.g. out of hashlib) which will be instantiated for use. If hashers is not provided, or is provided as 'None', the default hashers will get used. To invoke this without hashers, provide an empty list. Returns: Always True, as all files are 'generic' files. """ if hashers is None: hashers = Fingerprinter.GENERIC_HASH_CLASSES hashfuncs = [x() for x in hashers] finger = Finger(hashfuncs, [Range(0, self.filelength)], {'name': 'generic'}) self.fingers.append(finger) return True
Causes the entire file to be hashed by the given hash functions. This sets up a 'finger' for fingerprinting, where the entire file is passed through a pre-defined (or user defined) set of hash functions. Args: hashers: An iterable of hash classes (e.g. out of hashlib) which will be instantiated for use. If hashers is not provided, or is provided as 'None', the default hashers will get used. To invoke this without hashers, provide an empty list. Returns: Always True, as all files are 'generic' files.
def count_args(node, results): # type: (Node, Dict[str, Base]) -> Tuple[int, bool, bool, bool] """Count arguments and check for self and *args, **kwds. Return (selfish, count, star, starstar) where: - count is total number of args (including *args, **kwds) - selfish is True if the initial arg is named 'self' or 'cls' - star is True iff *args is found - starstar is True iff **kwds is found """ count = 0 selfish = False star = False starstar = False args = results.get('args') if isinstance(args, Node): children = args.children elif isinstance(args, Leaf): children = [args] else: children = [] # Interpret children according to the following grammar: # (('*'|'**')? NAME ['=' expr] ','?)* skip = False previous_token_is_star = False for child in children: if skip: skip = False elif isinstance(child, Leaf): # A single '*' indicates the rest of the arguments are keyword only # and shouldn't be counted as a `*`. if child.type == token.STAR: previous_token_is_star = True elif child.type == token.DOUBLESTAR: starstar = True elif child.type == token.NAME: if count == 0: if child.value in ('self', 'cls'): selfish = True count += 1 if previous_token_is_star: star = True elif child.type == token.EQUAL: skip = True if child.type != token.STAR: previous_token_is_star = False return count, selfish, star, starstar
Count arguments and check for self and *args, **kwds. Return (selfish, count, star, starstar) where: - count is total number of args (including *args, **kwds) - selfish is True if the initial arg is named 'self' or 'cls' - star is True iff *args is found - starstar is True iff **kwds is found
def cache_file(package, mode): """ Yields a file-like object for the purpose of writing to or reading from the cache. The code: with cache_file(...) as f: # do stuff with f is guaranteed to convert any exceptions to warnings (*), both in the cache_file(...) call and the 'do stuff with f' block. The file is automatically closed upon exiting the with block. If getting an actual file fails, yields a DummyFile. :param package: the name of the package being checked as a string :param mode: the mode to open the file in, either 'r' or 'w' """ f = DummyFile() # We have to wrap the whole function body in this block to guarantee # catching all exceptions. In particular the yield needs to be inside # to catch exceptions coming from the with block. with exception_to_warning('use cache while checking for outdated package', OutdatedCacheFailedWarning): try: cache_path = os.path.join(tempfile.gettempdir(), get_cache_filename(package)) if mode == 'w' or os.path.exists(cache_path): f = open(cache_path, mode) finally: # Putting the yield in the finally section ensures that exactly # one thing is yielded once, otherwise @contextmanager would # raise an exception. with f: # closes the file afterards yield f
Yields a file-like object for the purpose of writing to or reading from the cache. The code: with cache_file(...) as f: # do stuff with f is guaranteed to convert any exceptions to warnings (*), both in the cache_file(...) call and the 'do stuff with f' block. The file is automatically closed upon exiting the with block. If getting an actual file fails, yields a DummyFile. :param package: the name of the package being checked as a string :param mode: the mode to open the file in, either 'r' or 'w'
def timestamp_to_local_time_str( timestamp, timezone_name, fmt="yyyy-MM-dd HH:mm:ss"): """Convert epoch timestamp to a localized datetime string. Arguments --------- timestamp : int The timestamp to convert. timezone_name : datetime.timezone The timezone of the desired local time. fmt : str The format of the output string. Returns ------- str The localized datetime string. """ localized_d = timestamp_to_local_time(timestamp, timezone_name) localized_datetime_str = localized_d.format_datetime(fmt) return localized_datetime_str
Convert epoch timestamp to a localized datetime string. Arguments --------- timestamp : int The timestamp to convert. timezone_name : datetime.timezone The timezone of the desired local time. fmt : str The format of the output string. Returns ------- str The localized datetime string.
def output(self, mode='file', forced=False, context=None): """ The general output method, override in subclass if you need to do any custom modification. Calls other mode specific methods or simply returns the content directly. """ output = '\n'.join(self.filter_input(forced, context=context)) if not output: return '' if settings.COMPRESS_ENABLED or forced: filtered_output = self.filter_output(output) return self.handle_output(mode, filtered_output, forced) return output
The general output method, override in subclass if you need to do any custom modification. Calls other mode specific methods or simply returns the content directly.
def replace(self, re_text, replace_str, text): """ 正则表达式替换 :param re_text: 正则表达式 :param replace_str: 替换字符串 :param text: 搜索文档 :return: 替换后的字符串 """ return re.sub(re_text, replace_str, text)
正则表达式替换 :param re_text: 正则表达式 :param replace_str: 替换字符串 :param text: 搜索文档 :return: 替换后的字符串
def parsePositionFile(filename): """ Parses Android GPS logger csv file and returns list of dictionaries """ l=[] with open( filename, "rb" ) as theFile: reader = csv.DictReader( theFile ) for line in reader: # Convert the time string to something # a bit more human readable mytime=dateparser.parse(line['time']) line['strtime']=mytime.strftime("%d %b %Y, %H:%M UTC") l.append(line) return l
Parses Android GPS logger csv file and returns list of dictionaries
def set_trapezoidal_integration(self, n): """Set the code to use trapezoidal integration. **Call signature** *n* Use this many nodes Returns *self* for convenience in chaining. """ if not (n >= 2): raise ValueError('must have n >= 2; got %r' % (n,)) self.in_vals[IN_VAL_INTEG_METH] = n + 1 return self
Set the code to use trapezoidal integration. **Call signature** *n* Use this many nodes Returns *self* for convenience in chaining.
def generateNamespaceChildrenString(self, nspace): ''' Helper method for :func:`~exhale.graph.ExhaleRoot.generateSingleNamespace`, and :func:`~exhale.graph.ExhaleRoot.generateFileNodeDocuments`. Builds the body text for the namespace node document that links to all of the child namespaces, structs, classes, functions, typedefs, unions, and variables associated with this namespace. :Parameters: ``nspace`` (ExhaleNode) The namespace node we are generating the body text for. :Return (str): The string to be written to the namespace node's reStructuredText document. ''' # sort the children nsp_namespaces = [] nsp_nested_class_like = [] nsp_enums = [] nsp_functions = [] nsp_typedefs = [] nsp_unions = [] nsp_variables = [] for child in nspace.children: # Skip children whose names were requested to be explicitly ignored. should_exclude = False for exclude in configs._compiled_listing_exclude: if exclude.match(child.name): should_exclude = True if should_exclude: continue if child.kind == "namespace": nsp_namespaces.append(child) elif child.kind == "struct" or child.kind == "class": child.findNestedClassLike(nsp_nested_class_like) child.findNestedEnums(nsp_enums) child.findNestedUnions(nsp_unions) elif child.kind == "enum": nsp_enums.append(child) elif child.kind == "function": nsp_functions.append(child) elif child.kind == "typedef": nsp_typedefs.append(child) elif child.kind == "union": nsp_unions.append(child) elif child.kind == "variable": nsp_variables.append(child) # generate their headings if they exist (no Defines...that's not a C++ thing...) children_stream = StringIO() self.generateSortedChildListString(children_stream, "Namespaces", nsp_namespaces) self.generateSortedChildListString(children_stream, "Classes", nsp_nested_class_like) self.generateSortedChildListString(children_stream, "Enums", nsp_enums) self.generateSortedChildListString(children_stream, "Functions", nsp_functions) self.generateSortedChildListString(children_stream, "Typedefs", nsp_typedefs) self.generateSortedChildListString(children_stream, "Unions", nsp_unions) self.generateSortedChildListString(children_stream, "Variables", nsp_variables) # read out the buffer contents, close it and return the desired string children_string = children_stream.getvalue() children_stream.close() return children_string
Helper method for :func:`~exhale.graph.ExhaleRoot.generateSingleNamespace`, and :func:`~exhale.graph.ExhaleRoot.generateFileNodeDocuments`. Builds the body text for the namespace node document that links to all of the child namespaces, structs, classes, functions, typedefs, unions, and variables associated with this namespace. :Parameters: ``nspace`` (ExhaleNode) The namespace node we are generating the body text for. :Return (str): The string to be written to the namespace node's reStructuredText document.
def cycle(arrays, descs=None, cadence=0.6, toworlds=None, drawoverlay=None, yflip=False, tostatuses=None, run_main=True, save_after_viewing=None): """Interactively display a series of 2D data arrays. arrays An iterable of 2D arrays (a 3D array works). descs An iterable of text descriptions, one for each array cadence The time delay before the next array is shown, in seconds. tostatuses An iterable of functions that convert cursor positions to a textual status output corresponding to that position. FIXME details needed. toworlds An iterable of functions that convert cursor positions to a latitude/longitude pair that is displayed in the status output. The `tostatuses` keyword is a more generic version of this. FIXME details needed. drawoverlay An optional function that draws an overlay on the display after the underlying data image is presented. FIXME details needed. yflip If true, have the numerical *y* coordinates have 0 refer to the bottom of the image. Note that the data array is still drawn such that its first row appears at the top! run_main If true, run the Gtk mainloop explicitly so that the function does not return until the window is closed. If false, no mainloop is run. If the application happens to already be running a mainloop in the background, the window will appear and the user will be able to interact with it while this thread continues executing. save_after_viewing If set to a string containing an integer percent-formatting specifier, the data will be written to a series of PNG files after the window is closed. """ n = len(arrays) amin = amax = h = w = None if toworlds is not None and tostatuses is not None: raise ValueError('only one of "toworlds" and "tostatuses" may be given') if descs is None: descs = [''] * n for array in arrays: thish, thisw = array.shape thismin, thismax = array.min(), array.max() if not np.isfinite(thismin): thismin = array[np.ma.where(np.isfinite(array))].min() if not np.isfinite(thismax): thismax = array[np.ma.where(np.isfinite(array))].max() if amin is None: w, h, amin, amax = thisw, thish, thismin, thismax else: if thisw != w: raise ValueError('array widths not all equal') if thish != h: raise ValueError('array heights not all equal') amin = min(amin, thismin) amax = max(amax, thismax) stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32, w) assert stride % 4 == 0 # stride is in bytes imgdata = np.empty((n, h, stride // 4), dtype=np.uint32) fixed = np.empty((n, h, w), dtype=np.int32) antimask = np.empty((n, h, w), dtype=np.bool_) surfaces = [None] * n imgdata.fill(0xFF000000) for i, array in enumerate(arrays): surfaces[i] = cairo.ImageSurface.create_for_data(imgdata[i], cairo.FORMAT_ARGB32, w, h, stride) if np.ma.is_masked(array): filled = array.filled(amin) antimask[i] = ~array.mask else: filled = array antimask[i].fill(True) fixed[i] = (filled - amin) * (0x0FFFFFF0 / (amax - amin)) def getn(): return n def getshapei(i): return w, h def getdesci(i): return descs[i] clipped = np.zeros((h, w), dtype=np.int32) # scratch arrays -- two needed clipped2 = np.zeros((h, w), dtype=np.uint32) # to make numpy ufunc casting happy def settuningi(i, tunerx, tunery): np.bitwise_and(imgdata[i], 0xFF000000, imgdata[i]) fmin = int(0x0FFFFFF0 * tunerx) fmax = int(0x0FFFFFF0 * tunery) if fmin == fmax: np.add(imgdata[i], 255 * (fixed[i] > fmin).astype(np.uint32), imgdata[i]) else: np.clip(fixed[i], fmin, fmax, clipped) np.subtract(clipped, fmin, clipped) np.multiply(clipped, 255. / (fmax - fmin), clipped2, casting='unsafe') np.add(imgdata[i], clipped2, imgdata[i]) np.multiply(imgdata[i], antimask[i], imgdata[i]) def getsurfacei(i, xoffset, yoffset, width, height): return surfaces[i], xoffset, yoffset # see comment in view() nomasks = [not np.ma.is_masked(a) or a.mask is np.ma.nomask for a in arrays] if tostatuses is None: if toworlds is None: tostatuses = [None] * n else: from .astutil import fmthours, fmtdeglat def make_status_func(toworld): def status(y_and_x): lat, lon = toworld(y_and_x) return 'lat=%s lon=%s' % (fmtdeglat(lat), fmthours(lon)) tostatuses = [make_status_func(toworlds[i]) for i in range(n)] def fmtstatusi(i, x, y): s = '' row = int(np.floor(y + 0.5)) col = int(np.floor(x + 0.5)) if row >= 0 and col >= 0 and row < h and col < w: if nomasks[i] or not arrays[i].mask[row,col]: s += '%g ' % arrays[i][row,col] if yflip: y = h - 1 - y row = h - 1 - row s += '[%d,%d] x=%.1f y=%.1f' % (row, col, x, y) if tostatuses[i] is not None: s += ' ' + tostatuses[i](np.array([y, x])) return s cycler = Cycler() cycler.set_n_getter(getn) cycler.set_shape_getter(getshapei) cycler.set_desc_getter(getdesci) cycler.set_tuning_setter(settuningi) cycler.set_surface_getter(getsurfacei) cycler.set_status_formatter(fmtstatusi) cycler.set_overlay_drawer(drawoverlay) cycler.win.show_all() if run_main: cycler.win.connect('destroy', Gtk.main_quit) Gtk.main() else: cycler.win.connect('destroy', lambda e: cycler.win.destroy()) if save_after_viewing is not None: for i in range(n): filename = save_after_viewing % (i,) settuningi(i, cycler.last_tunerx, cycler.last_tunery) surface, xoffset, yoffset = getsurfacei(i, 0, 0, w, h) surface.write_to_png(filename)
Interactively display a series of 2D data arrays. arrays An iterable of 2D arrays (a 3D array works). descs An iterable of text descriptions, one for each array cadence The time delay before the next array is shown, in seconds. tostatuses An iterable of functions that convert cursor positions to a textual status output corresponding to that position. FIXME details needed. toworlds An iterable of functions that convert cursor positions to a latitude/longitude pair that is displayed in the status output. The `tostatuses` keyword is a more generic version of this. FIXME details needed. drawoverlay An optional function that draws an overlay on the display after the underlying data image is presented. FIXME details needed. yflip If true, have the numerical *y* coordinates have 0 refer to the bottom of the image. Note that the data array is still drawn such that its first row appears at the top! run_main If true, run the Gtk mainloop explicitly so that the function does not return until the window is closed. If false, no mainloop is run. If the application happens to already be running a mainloop in the background, the window will appear and the user will be able to interact with it while this thread continues executing. save_after_viewing If set to a string containing an integer percent-formatting specifier, the data will be written to a series of PNG files after the window is closed.
def pubkey(self, identity, ecdh=False): """Return public key.""" _verify_support(identity, ecdh) return trezor.Trezor.pubkey(self, identity=identity, ecdh=ecdh)
Return public key.
def send_registration_mail(email, *, request, **kwargs): """send_registration_mail(email, *, request, **kwargs) Sends the registration mail * ``email``: The email address where the registration link should be sent to. * ``request``: A HTTP request instance, used to construct the complete URL (including protocol and domain) for the registration link. * Additional keyword arguments for ``get_confirmation_url`` respectively ``get_confirmation_code``. The mail is rendered using the following two templates: * ``registration/email_registration_email.txt``: The first line of this template will be the subject, the third to the last line the body of the email. * ``registration/email_registration_email.html``: The body of the HTML version of the mail. This template is **NOT** available by default and is not required either. """ render_to_mail( "registration/email_registration_email", {"url": get_confirmation_url(email, request, **kwargs)}, to=[email], ).send()
send_registration_mail(email, *, request, **kwargs) Sends the registration mail * ``email``: The email address where the registration link should be sent to. * ``request``: A HTTP request instance, used to construct the complete URL (including protocol and domain) for the registration link. * Additional keyword arguments for ``get_confirmation_url`` respectively ``get_confirmation_code``. The mail is rendered using the following two templates: * ``registration/email_registration_email.txt``: The first line of this template will be the subject, the third to the last line the body of the email. * ``registration/email_registration_email.html``: The body of the HTML version of the mail. This template is **NOT** available by default and is not required either.
def setEnv(self, name, value=None): """ Set an environment variable for the worker process before it is launched. The worker process will typically inherit the environment of the machine it is running on but this method makes it possible to override specific variables in that inherited environment before the worker is launched. Note that this mechanism is different to the one used by the worker internally to set up the environment of a job. A call to this method affects all jobs issued after this method returns. Note to implementors: This means that you would typically need to copy the variables before enqueuing a job. If no value is provided it will be looked up from the current environment. NB: Only the Mesos and single-machine batch systems support passing environment variables. On other batch systems, this method has no effect. See https://github.com/BD2KGenomics/toil/issues/547. :param str name: the environment variable to be set on the worker. :param str value: if given, the environment variable given by name will be set to this value. if None, the variable's current value will be used as the value on the worker :raise RuntimeError: if value is None and the name cannot be found in the environment """ if value is None: try: value = os.environ[name] except KeyError: raise RuntimeError("%s does not exist in current environment", name) self.environment[name] = value
Set an environment variable for the worker process before it is launched. The worker process will typically inherit the environment of the machine it is running on but this method makes it possible to override specific variables in that inherited environment before the worker is launched. Note that this mechanism is different to the one used by the worker internally to set up the environment of a job. A call to this method affects all jobs issued after this method returns. Note to implementors: This means that you would typically need to copy the variables before enqueuing a job. If no value is provided it will be looked up from the current environment. NB: Only the Mesos and single-machine batch systems support passing environment variables. On other batch systems, this method has no effect. See https://github.com/BD2KGenomics/toil/issues/547. :param str name: the environment variable to be set on the worker. :param str value: if given, the environment variable given by name will be set to this value. if None, the variable's current value will be used as the value on the worker :raise RuntimeError: if value is None and the name cannot be found in the environment
def _read_para_cert(self, code, cbit, clen, *, desc, length, version): """Read HIP CERT parameter. Structure of HIP CERT parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | CERT group | CERT count | CERT ID | CERT type | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Certificate / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / | Padding (variable length) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 cert.type Parameter Type 1 15 cert.critical Critical Bit 2 16 cert.length Length of Contents 4 32 cert.group CERT Group 5 40 cert.count CERT Count 6 48 cert.id CERT ID 7 56 cert.cert_type CERT Type 8 64 cert.certificate Certificate ? ? - Padding """ _ctgp = self._read_unpack(1) _ctct = self._read_unpack(1) _ctid = self._read_unpack(1) _cttp = self._read_unpack(1) _ctdt = self._read_fileng(clen-4) cert = dict( type=desc, critical=cbit, length=clen, group=_GROUP_ID.get(_ctgp, 'Unassigned'), count=_ctct, id=_ctid, cert_type=_CERT_TYPE.get(_cttp, 'Unassigned'), certificate=_ctdt, ) _plen = length - clen if _plen: self._read_fileng(_plen) return cert
Read HIP CERT parameter. Structure of HIP CERT parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | CERT group | CERT count | CERT ID | CERT type | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Certificate / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / | Padding (variable length) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 cert.type Parameter Type 1 15 cert.critical Critical Bit 2 16 cert.length Length of Contents 4 32 cert.group CERT Group 5 40 cert.count CERT Count 6 48 cert.id CERT ID 7 56 cert.cert_type CERT Type 8 64 cert.certificate Certificate ? ? - Padding
def load_vocab(fin): """ Load vocabulary from vocab file created by word2vec with ``-save-vocab <file>`` option. Args: fin (File): File-like object to read from. encoding (bytes): Encoding of the input file as defined in ``codecs`` module of Python standard library. errors (bytes): Set the error handling scheme. The default error handler is 'strict' meaning that encoding errors raise ValueError. Refer to ``codecs`` module for more information. Returns: OrderedDict: Mapping from a word (``bytes``) to the number of appearance in the original text (``int``). Order are preserved from the original vocab file. """ vocab = OrderedDict() for line in fin: v, c = line.strip().split() vocab[v] = int(c) return vocab
Load vocabulary from vocab file created by word2vec with ``-save-vocab <file>`` option. Args: fin (File): File-like object to read from. encoding (bytes): Encoding of the input file as defined in ``codecs`` module of Python standard library. errors (bytes): Set the error handling scheme. The default error handler is 'strict' meaning that encoding errors raise ValueError. Refer to ``codecs`` module for more information. Returns: OrderedDict: Mapping from a word (``bytes``) to the number of appearance in the original text (``int``). Order are preserved from the original vocab file.
def subsol(datetime): """Finds subsolar geocentric latitude and longitude. Parameters ========== datetime : :class:`datetime.datetime` Returns ======= sbsllat : float Latitude of subsolar point sbsllon : float Longitude of subsolar point Notes ===== Based on formulas in Astronomical Almanac for the year 1996, p. C24. (U.S. Government Printing Office, 1994). Usable for years 1601-2100, inclusive. According to the Almanac, results are good to at least 0.01 degree latitude and 0.025 degrees longitude between years 1950 and 2050. Accuracy for other years has not been tested. Every day is assumed to have exactly 86400 seconds; thus leap seconds that sometimes occur on December 31 are ignored (their effect is below the accuracy threshold of the algorithm). After Fortran code by A. D. Richmond, NCAR. Translated from IDL by K. Laundal. """ # convert to year, day of year and seconds since midnight year = datetime.year doy = datetime.timetuple().tm_yday ut = datetime.hour * 3600 + datetime.minute * 60 + datetime.second if not 1601 <= year <= 2100: raise ValueError('Year must be in [1601, 2100]') yr = year - 2000 nleap = int(np.floor((year - 1601.0) / 4.0)) nleap -= 99 if year <= 1900: ncent = int(np.floor((year - 1601.0) / 100.0)) ncent = 3 - ncent nleap = nleap + ncent l0 = -79.549 + (-0.238699 * (yr - 4.0 * nleap) + 3.08514e-2 * nleap) g0 = -2.472 + (-0.2558905 * (yr - 4.0 * nleap) - 3.79617e-2 * nleap) # Days (including fraction) since 12 UT on January 1 of IYR: df = (ut / 86400.0 - 1.5) + doy # Mean longitude of Sun: lmean = l0 + 0.9856474 * df # Mean anomaly in radians: grad = np.radians(g0 + 0.9856003 * df) # Ecliptic longitude: lmrad = np.radians(lmean + 1.915 * np.sin(grad) + 0.020 * np.sin(2.0 * grad)) sinlm = np.sin(lmrad) # Obliquity of ecliptic in radians: epsrad = np.radians(23.439 - 4e-7 * (df + 365 * yr + nleap)) # Right ascension: alpha = np.degrees(np.arctan2(np.cos(epsrad) * sinlm, np.cos(lmrad))) # Declination, which is also the subsolar latitude: sslat = np.degrees(np.arcsin(np.sin(epsrad) * sinlm)) # Equation of time (degrees): etdeg = lmean - alpha nrot = round(etdeg / 360.0) etdeg = etdeg - 360.0 * nrot # Subsolar longitude: sslon = 180.0 - (ut / 240.0 + etdeg) # Earth rotates one degree every 240 s. nrot = round(sslon / 360.0) sslon = sslon - 360.0 * nrot return sslat, sslon
Finds subsolar geocentric latitude and longitude. Parameters ========== datetime : :class:`datetime.datetime` Returns ======= sbsllat : float Latitude of subsolar point sbsllon : float Longitude of subsolar point Notes ===== Based on formulas in Astronomical Almanac for the year 1996, p. C24. (U.S. Government Printing Office, 1994). Usable for years 1601-2100, inclusive. According to the Almanac, results are good to at least 0.01 degree latitude and 0.025 degrees longitude between years 1950 and 2050. Accuracy for other years has not been tested. Every day is assumed to have exactly 86400 seconds; thus leap seconds that sometimes occur on December 31 are ignored (their effect is below the accuracy threshold of the algorithm). After Fortran code by A. D. Richmond, NCAR. Translated from IDL by K. Laundal.
def circos_radius(n_nodes, node_r): """ Automatically computes the origin-to-node centre radius of the Circos plot using the triangle equality sine rule. a / sin(A) = b / sin(B) = c / sin(C) :param n_nodes: the number of nodes in the plot. :type n_nodes: int :param node_r: the radius of each node. :type node_r: float :returns: Origin-to-node centre radius. """ A = 2 * np.pi / n_nodes # noqa B = (np.pi - A) / 2 # noqa a = 2 * node_r return a * np.sin(B) / np.sin(A)
Automatically computes the origin-to-node centre radius of the Circos plot using the triangle equality sine rule. a / sin(A) = b / sin(B) = c / sin(C) :param n_nodes: the number of nodes in the plot. :type n_nodes: int :param node_r: the radius of each node. :type node_r: float :returns: Origin-to-node centre radius.
async def send_contact(self, phone_number: base.String, first_name: base.String, last_name: typing.Union[base.String, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_markup=None, reply=True) -> Message: """ Use this method to send phone contacts. Source: https://core.telegram.org/bots/api#sendcontact :param phone_number: Contact's phone number :type phone_number: :obj:`base.String` :param first_name: Contact's first name :type first_name: :obj:`base.String` :param last_name: Contact's last name :type last_name: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message` """ warn_deprecated('"Message.send_contact" method will be removed in 2.2 version.\n' 'Use "Message.reply_contact" instead.', stacklevel=8) return await self.bot.send_contact(chat_id=self.chat.id, phone_number=phone_number, first_name=first_name, last_name=last_name, disable_notification=disable_notification, reply_to_message_id=self.message_id if reply else None, reply_markup=reply_markup)
Use this method to send phone contacts. Source: https://core.telegram.org/bots/api#sendcontact :param phone_number: Contact's phone number :type phone_number: :obj:`base.String` :param first_name: Contact's first name :type first_name: :obj:`base.String` :param last_name: Contact's last name :type last_name: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message`
def rlzs(self): """ :returns: an array of realizations """ tups = [(r.ordinal, r.uid, r.weight['weight']) for r in self.get_rlzs_assoc().realizations] return numpy.array(tups, rlz_dt)
:returns: an array of realizations
def init_parser(self): """ Init command line parser :return: """ parser = argparse.ArgumentParser(description='ROCA Fingerprinter') parser.add_argument('--tmp', dest='tmp_dir', default='.', help='Temporary dir for subprocessing (e.g. APK parsing scratch)') parser.add_argument('--debug', dest='debug', default=False, action='store_const', const=True, help='Debugging logging') parser.add_argument('--dump', dest='dump', default=False, action='store_const', const=True, help='Dump all processed info') parser.add_argument('--flatten', dest='flatten', default=False, action='store_const', const=True, help='Flatten the dump') parser.add_argument('--indent', dest='indent', default=False, action='store_const', const=True, help='Indent the dump') parser.add_argument('--old', dest='old', default=False, action='store_const', const=True, help='Old fingerprinting algorithm - moduli detector') parser.add_argument('--base64-stdin', dest='base64stdin', default=False, action='store_const', const=True, help='Decode STDIN as base64') parser.add_argument('--file-pem', dest='file_pem', default=False, action='store_const', const=True, help='Force read as PEM encoded file') parser.add_argument('--file-der', dest='file_der', default=False, action='store_const', const=True, help='Force read as DER encoded file') parser.add_argument('--file-pgp', dest='file_pgp', default=False, action='store_const', const=True, help='Force read as PGP ASC encoded file') parser.add_argument('--file-ssh', dest='file_ssh', default=False, action='store_const', const=True, help='Force read as SSH public key file') parser.add_argument('--file-mod', dest='file_mod', default=False, action='store_const', const=True, help='Force read as One modulus per line') parser.add_argument('--file-json', dest='file_json', default=False, action='store_const', const=True, help='Force read as JSON file') parser.add_argument('--file-ldiff', dest='file_ldiff', default=False, action='store_const', const=True, help='Force read as LDIFF file') parser.add_argument('--file-pkcs7', dest='file_pkcs7', default=False, action='store_const', const=True, help='Force read as PKCS7 file') parser.add_argument('--key-fmt-base64', dest='key_fmt_base64', default=False, action='store_const', const=True, help='Modulus per line, base64 encoded') parser.add_argument('--key-fmt-hex', dest='key_fmt_hex', default=False, action='store_const', const=True, help='Modulus per line, hex encoded') parser.add_argument('--key-fmt-dec', dest='key_fmt_dec', default=False, action='store_const', const=True, help='Modulus per line, dec encoded') parser.add_argument('--jks-pass-file', dest='jks_pass_file', default=None, help='Password file for JKS, one per line') parser.add_argument('files', nargs=argparse.ZERO_OR_MORE, default=[], help='files to process') return parser
Init command line parser :return:
def sha1_digest(instr): ''' Generate an sha1 hash of a given string. ''' if six.PY3: b = salt.utils.stringutils.to_bytes(instr) return hashlib.sha1(b).hexdigest() return hashlib.sha1(instr).hexdigest()
Generate an sha1 hash of a given string.
def get_value(self, default=None): """Get int from widget. Parameters ---------- default : list list with widgets Returns ------- list list that might contain int or str or float etc """ if default is None: default = [] try: text = literal_eval(self.text()) if not isinstance(text, list): pass # raise ValueError except ValueError: lg.debug('Cannot convert "' + str(text) + '" to list. ' + 'Using default ' + str(default)) text = default self.set_value(text) return text
Get int from widget. Parameters ---------- default : list list with widgets Returns ------- list list that might contain int or str or float etc
def qsize(self): """ Returns the number of items currently in the queue :return: Integer containing size of the queue :exception: ConnectionError if queue is not connected """ if not self.connected: raise QueueNotConnectedError("Queue is not Connected") try: size = self.__db.llen(self._key) except redis.ConnectionError as e: raise redis.ConnectionError(repr(e)) return size
Returns the number of items currently in the queue :return: Integer containing size of the queue :exception: ConnectionError if queue is not connected
def routerify(obj): """ Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object. """ router = Router() for info in get_routing_attributes(obj): router.add_route(*info) obj.__growler_router = router return router
Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object.
def view_graph(graph_str, parent=None, prune_to=None): """View a graph.""" from rezgui.dialogs.ImageViewerDialog import ImageViewerDialog from rez.config import config # check for already written tempfile h = hash((graph_str, prune_to)) filepath = graph_file_lookup.get(h) if filepath and not os.path.exists(filepath): filepath = None # write graph to tempfile if filepath is None: suffix = ".%s" % config.dot_image_format fd, filepath = tempfile.mkstemp(suffix=suffix, prefix="rez-graph-") os.close(fd) dlg = WriteGraphDialog(graph_str, filepath, parent, prune_to=prune_to) if not dlg.write_graph(): return # display graph graph_file_lookup[h] = filepath dlg = ImageViewerDialog(filepath, parent) dlg.exec_()
View a graph.
def get_capacity_vol(self, min_voltage=None, max_voltage=None, use_overall_normalization=True): """ Get the volumetric capacity of the electrode. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. use_overall_normalization (booL): If False, normalize by the discharged state of only the voltage pairs matching the voltage criteria. if True, use default normalization of the full electrode path. Returns: Volumetric capacity in mAh/cc across the insertion path (a subset of the path can be chosen by the optional arguments) """ pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage) normalization_vol = self.normalization_volume \ if use_overall_normalization or len(pairs_in_range) == 0 \ else pairs_in_range[-1].vol_discharge return sum([pair.mAh for pair in pairs_in_range]) / normalization_vol \ * 1e24 / N_A
Get the volumetric capacity of the electrode. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. use_overall_normalization (booL): If False, normalize by the discharged state of only the voltage pairs matching the voltage criteria. if True, use default normalization of the full electrode path. Returns: Volumetric capacity in mAh/cc across the insertion path (a subset of the path can be chosen by the optional arguments)
def partial_trace(self, qubits: Qubits) -> 'Channel': """Return the partial trace over the specified qubits""" vec = self.vec.partial_trace(qubits) return Channel(vec.tensor, vec.qubits)
Return the partial trace over the specified qubits