code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def show(self, func_name, values, labels=None): s = self.Stanza(self.indent) if func_name == '<module>' and self.in_console: func_name = '<console>' s.add([func_name + ': ']) reprs = map(self.safe_repr, values) if labels: sep = '' for label, repr in zip(labels, reprs): s.add([label + '=', self.CYAN, repr, self.NORMAL], sep) sep = ', ' else: sep = '' for repr in reprs: s.add([self.CYAN, repr, self.NORMAL], sep) sep = ', ' self.writer.write(s.chunks)
Prints out nice representations of the given values.
def copy_to_tmp(source): tmp_dir = tempfile.mkdtemp() p = pathlib.Path(source) dirname = p.name or 'temp' new_dir = os.path.join(tmp_dir, dirname) if os.path.isdir(source): shutil.copytree(source, new_dir) else: shutil.copy2(source, new_dir) return new_dir
Copies ``source`` to a temporary directory, and returns the copied location. If source is a file, the copied location is also a file.
def send_document(self, document, caption="", **options): return self.bot.api_call( "sendDocument", chat_id=str(self.id), document=document, caption=caption, **options )
Send a general file. :param document: Object containing the document data :param str caption: Document caption (optional) :param options: Additional sendDocument options (see https://core.telegram.org/bots/api#senddocument) :Example: >>> with open("file.doc", "rb") as f: >>> await chat.send_document(f)
def coldesc(self, columnname, actual=True): import casacore.tables.tableutil as pt return pt.makecoldesc(columnname, self.getcoldesc(columnname, actual))
Make the description of a column. Make the description object of the given column as :func:`makecoldesc` is doing with the description given by :func:`getcoldesc`.
def gen_paula_etree(paula_id): E = ElementMaker(nsmap=NSMAP) tree = E('paula', version='1.1') tree.append(E('header', paula_id=paula_id)) return E, tree
creates an element tree representation of an empty PAULA XML file.
def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1): _, _, _, num_channels = common_layers.shape_list(tensor1) diff_sq_sum = tf.reduce_sum((tensor1 - tensor2)**2, axis=(0, 1, 2)) _, feature_ranks = tf.math.top_k(diff_sq_sum, k=rank) feature_rank = feature_ranks[-1] channel_inds = tf.range(num_channels, dtype=tf.int32) channel_mask = tf.equal(channel_inds, feature_rank) ones_t = tf.ones(num_channels, dtype=tf.float32) zeros_t = tf.zeros(num_channels, dtype=tf.float32) interp_tensors = [] for coeff in coeffs: curr_coeff = tf.where(channel_mask, coeff * ones_t, zeros_t) interp_tensor = tensor1 + curr_coeff * (tensor2 - tensor1) interp_tensors.append(interp_tensor) return tf.concat(interp_tensors, axis=0)
Linearly interpolate channel at "rank" between two tensors. The channels are ranked according to their L2 norm between tensor1[channel] and tensor2[channel]. Args: tensor1: 4-D Tensor, NHWC tensor2: 4-D Tensor, NHWC coeffs: list of floats. rank: integer. Returns: interp_latents: list of interpolated 4-D Tensors, shape=(NHWC)
def validate(self, value): try: self._choice = IPAddress(value) except (ValueError, AddrFormatError): self.error_message = '%s is not a valid IP address.' % value return False if self._choice.is_netmask(): return True else: self.error_message = '%s is not a valid IP netmask.' % value return False
Return a boolean if the value is a valid netmask.
def start_processing_handler(self, event): self.logger.debug("Event %s: starting Volatility process(es).", event) for snapshot in self.snapshots: self.process_snapshot(snapshot) self.processing_done.set()
Asynchronous handler starting the Volatility processes.
def history(namespace_module): for path in get_namespace_history(namespace_module): h = get_bel_resource_hash(path.as_posix()) click.echo('{}\t{}'.format(path, h))
Hash all versions on Artifactory.
def _feed_to_kafka(self, json_item): @MethodTimer.timeout(self.settings['KAFKA_FEED_TIMEOUT'], False) def _feed(json_item): try: self.logger.debug("Sending json to kafka at " + str(self.settings['KAFKA_PRODUCER_TOPIC'])) future = self.producer.send(self.settings['KAFKA_PRODUCER_TOPIC'], json_item) future.add_callback(self._kafka_success) future.add_errback(self._kafka_failure) self.producer.flush() return True except Exception as e: self.logger.error("Lost connection to Kafka") self._spawn_kafka_connection_thread() return False return _feed(json_item)
Sends a request to Kafka :param json_item: The json item to send :returns: A boolean indicating whther the data was sent successfully or not
def _buf_append(self, string): if not self._buf: self._buf = string else: self._buf += string
Replace string directly without appending to an empty string, avoiding type issues.
def pending_work_items(self): "Iterable of all pending work items." pending = self._conn.execute( "SELECT * FROM work_items WHERE job_id NOT IN (SELECT job_id FROM results)" ) return (_row_to_work_item(p) for p in pending)
Iterable of all pending work items.
def add_cors(self, path, allowed_origins, allowed_headers=None, allowed_methods=None, max_age=None, allow_credentials=None): if self.has_path(path, self._OPTIONS_METHOD): return if not allowed_origins: raise ValueError("Invalid input. Value for AllowedOrigins is required") if not allowed_methods: allowed_methods = self._make_cors_allowed_methods_for_path(path) allowed_methods = "'{}'".format(allowed_methods) if allow_credentials is not True: allow_credentials = False self.add_path(path, self._OPTIONS_METHOD) self.get_path(path)[self._OPTIONS_METHOD] = self._options_method_response_for_cors(allowed_origins, allowed_headers, allowed_methods, max_age, allow_credentials)
Add CORS configuration to this path. Specifically, we will add a OPTIONS response config to the Swagger that will return headers required for CORS. Since SAM uses aws_proxy integration, we cannot inject the headers into the actual response returned from Lambda function. This is something customers have to implement themselves. If OPTIONS method is already present for the Path, we will skip adding CORS configuration Following this guide: https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool :param string path: Path to add the CORS configuration to. :param string/dict allowed_origins: Comma separate list of allowed origins. Value can also be an intrinsic function dict. :param string/dict allowed_headers: Comma separated list of allowed headers. Value can also be an intrinsic function dict. :param string/dict allowed_methods: Comma separated list of allowed methods. Value can also be an intrinsic function dict. :param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on Access-Control-Max-Age header. Value can also be an intrinsic function dict. :param bool/None allow_credentials: Flags whether request is allowed to contain credentials. :raises ValueError: When values for one of the allowed_* variables is empty
def load(path): with open(path, "r") as fobj: analytics = Analytics(info=json.load(fobj)) os.unlink(path) return analytics
Loads analytics report from json file specified by path. Args: path (str): path to json file with analytics report.
def apply_scaling(self, copy=True): if copy: return self.multiplier * self.data + self.base if self.multiplier != 1: self.data *= self.multiplier if self.base != 0: self.data += self.base return self.data
Scale pixel values to there true DN. :param copy: whether to apply the scalling to a copy of the pixel data and leave the orginial unaffected :returns: a scalled version of the pixel data
def remove_reference(type_): nake_type = remove_alias(type_) if not is_reference(nake_type): return type_ return nake_type.base
removes reference from the type definition If type is not reference type, it will be returned as is.
def cas(self, key, value, new_value): return self.run_script('cas', keys=[key], args=[value, new_value])
Perform an atomic compare-and-set on the value in "key", using a prefix match on the provided value.
def _run_event_methods(self, tag, stage=None): import inspect from ambry.bundle.events import _runable_for_event funcs = [] for func_name, f in inspect.getmembers(self, predicate=inspect.ismethod): if _runable_for_event(f, tag, stage): funcs.append(f) for func in funcs: func()
Run code in the bundle that is marked with events.
async def execute_command( self, *args: bytes, timeout: NumType = None ) -> SMTPResponse: command = b" ".join(args) + b"\r\n" await self.write_and_drain(command, timeout=timeout) response = await self.read_response(timeout=timeout) return response
Sends an SMTP command along with any args to the server, and returns a response.
async def export_wallet(self, von_wallet: Wallet, path: str) -> None: LOGGER.debug('WalletManager.export_wallet >>> von_wallet %s, path %s', von_wallet, path) if not von_wallet.handle: LOGGER.debug('WalletManager.export_wallet <!< Wallet %s is closed', von_wallet.name) raise WalletState('Wallet {} is closed'.format(von_wallet.name)) await wallet.export_wallet( von_wallet.handle, json.dumps({ 'path': path, **von_wallet.access_creds })) LOGGER.debug('WalletManager.export_wallet <<<')
Export an existing VON anchor wallet. Raise WalletState if wallet is closed. :param von_wallet: open wallet :param path: path to which to export wallet
def keep_scan(cls, result_key, token): def _scan(self): return self.get(token) cls.scan(result_key, _scan)
Define a property that is set to the list of lines that contain the given token. Uses the get method of the log file.
async def analog_write(self, command): pin = int(command[0]) value = int(command[1]) await self.core.analog_write(pin, value)
This method writes a value to an analog pin. It is used to set the output of a PWM pin or the angle of a Servo. :param command: {"method": "analog_write", "params": [PIN, WRITE_VALUE]} :returns: No return message.
def get(cls, func_hint): try: return Func(func_hint) except ValueError: pass if func_hint in cls._func_from_name: return cls._func_from_name[func_hint] if func_hint in cls._func_hash: return func_hint raise KeyError("unknown hash function", func_hint)
Return a registered hash function matching the given hint. The hint may be a `Func` member, a function name (with hyphens or underscores), or its code. A `Func` member is returned for standard multihash functions and an integer code for application-specific ones. If no matching function is registered, a `KeyError` is raised. >>> fm = FuncReg.get(Func.sha2_256) >>> fnu = FuncReg.get('sha2_256') >>> fnh = FuncReg.get('sha2-256') >>> fc = FuncReg.get(0x12) >>> fm == fnu == fnh == fc True
def relabel(self, catalogue): for work, label in catalogue.items(): self._matches.loc[self._matches[constants.WORK_FIELDNAME] == work, constants.LABEL_FIELDNAME] = label
Relabels results rows according to `catalogue`. A row whose work is labelled in the catalogue will have its label set to the label in the catalogue. Rows whose works are not labelled in the catalogue will be unchanged. :param catalogue: mapping of work names to labels :type catalogue: `Catalogue`
def __construct_lda_model(self): repos_of_interest = self.__get_interests() cleaned_tokens = self.__clean_and_tokenize(repos_of_interest) if not cleaned_tokens: cleaned_tokens = [["zkfgzkfgzkfgzkfgzkfgzkfg"]] dictionary = corpora.Dictionary(cleaned_tokens) corpus = [dictionary.doc2bow(text) for text in cleaned_tokens] self.lda_model = models.ldamodel.LdaModel( corpus, num_topics=1, id2word=dictionary, passes=10 )
Method to create LDA model to procure list of topics from. We do that by first fetching the descriptions of repositories user has shown interest in. We tokenize the hence fetched descriptions to procure list of cleaned tokens by dropping all the stop words and language names from it. We use the cleaned and sanitized token list to train LDA model from which we hope to procure topics of interests to the authenticated user.
def _download_items(db, last_id): MAX_RETRY = 20 MAX_DOC_ID = 10000000 not_found_cnt = 0 for doc_id in xrange(last_id, MAX_DOC_ID): doc_id += 1 print "Downloading %d.." % (doc_id) if not_found_cnt >= MAX_RETRY: print "It looks like this is an end:", doc_id - MAX_RETRY break try: record = _download(doc_id) except (DocumentNotFoundException, InvalidAlephBaseException): print "\tnot found, skipping" not_found_cnt += 1 continue not_found_cnt = 0 db["item_%d" % doc_id] = record db["last_id"] = doc_id - MAX_RETRY if doc_id > MAX_RETRY else 1 if doc_id % 100 == 0: db.commit()
Download items from the aleph and store them in `db`. Start from `last_id` if specified. Args: db (obj): Dictionary-like object used as DB. last_id (int): Start from this id.
def run_queue(self, options, todo): utils.logging_debug('AutoBatcher(%s): %d items', self._todo_tasklet.__name__, len(todo)) batch_fut = self._todo_tasklet(todo, options) self._running.append(batch_fut) batch_fut.add_callback(self._finished_callback, batch_fut, todo)
Actually run the _todo_tasklet.
def pillars(opts, functions, context=None): ret = LazyLoader(_module_dirs(opts, 'pillar'), opts, tag='pillar', pack={'__salt__': functions, '__context__': context, '__utils__': utils(opts)}) ret.pack['__ext_pillar__'] = ret return FilterDictWrapper(ret, '.ext_pillar')
Returns the pillars modules
def add_tag(self, tag): if tag not in self._tags: self._tags[tag] = dict()
add a tag to the tag list
def _calculate_sunrise_hour_angle(self, solar_dec, depression=0.833): hour_angle_arg = math.degrees(math.acos( math.cos(math.radians(90 + depression)) / (math.cos(math.radians(self.latitude)) * math.cos( math.radians(solar_dec))) - math.tan(math.radians(self.latitude)) * math.tan(math.radians(solar_dec)) )) return hour_angle_arg
Calculate hour angle for sunrise time in degrees.
def print_with_pager(output): if sys.stdout.isatty(): try: pager = subprocess.Popen( ['less', '-F', '-r', '-S', '-X', '-K'], stdin=subprocess.PIPE, stdout=sys.stdout) except subprocess.CalledProcessError: print(output) return else: pager.stdin.write(output.encode('utf-8')) pager.stdin.close() pager.wait() else: print(output)
Print the output to `stdout` using less when in a tty.
def config_dirs(): paths = [] if platform.system() == 'Darwin': paths.append(MAC_DIR) paths.append(UNIX_DIR_FALLBACK) paths.extend(xdg_config_dirs()) elif platform.system() == 'Windows': paths.append(WINDOWS_DIR_FALLBACK) if WINDOWS_DIR_VAR in os.environ: paths.append(os.environ[WINDOWS_DIR_VAR]) else: paths.append(UNIX_DIR_FALLBACK) paths.extend(xdg_config_dirs()) out = [] for path in paths: path = os.path.abspath(os.path.expanduser(path)) if path not in out: out.append(path) return out
Return a platform-specific list of candidates for user configuration directories on the system. The candidates are in order of priority, from highest to lowest. The last element is the "fallback" location to be used when no higher-priority config file exists.
def citation_count(papers, key='ayjid', verbose=False): if verbose: print "Generating citation counts for "+unicode(len(papers))+" papers..." counts = Counter() for P in papers: if P['citations'] is not None: for p in P['citations']: counts[p[key]] += 1 return counts
Generates citation counts for all of the papers cited by papers. Parameters ---------- papers : list A list of :class:`.Paper` instances. key : str Property to use as node key. Default is 'ayjid' (recommended). verbose : bool If True, prints status messages. Returns ------- counts : dict Citation counts for all papers cited by papers.
def modify_ack_deadline(self, items): ack_ids = [item.ack_id for item in items] seconds = [item.seconds for item in items] request = types.StreamingPullRequest( modify_deadline_ack_ids=ack_ids, modify_deadline_seconds=seconds ) self._manager.send(request)
Modify the ack deadline for the given messages. Args: items(Sequence[ModAckRequest]): The items to modify.
def percentile_for_in(self, leaderboard_name, member): if not self.check_member_in(leaderboard_name, member): return None responses = self.redis_connection.pipeline().zcard( leaderboard_name).zrevrank(leaderboard_name, member).execute() percentile = math.ceil( (float( (responses[0] - responses[1] - 1)) / float( responses[0]) * 100)) if self.order == self.ASC: return 100 - percentile else: return percentile
Retrieve the percentile for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the percentile for a member in the named leaderboard.
def grid_search(grid_scores, change, subset=None, kind='line', cmap=None, ax=None): if change is None: raise ValueError(('change can\'t be None, you need to select at least' ' one value to make the plot.')) if ax is None: ax = plt.gca() if cmap is None: cmap = default_heatmap() if isinstance(change, string_types) or len(change) == 1: return _grid_search_single(grid_scores, change, subset, kind, ax) elif len(change) == 2: return _grid_search_double(grid_scores, change, subset, cmap, ax) else: raise ValueError('change must have length 1 or 2 or be a string')
Plot results from a sklearn grid search by changing two parameters at most. Parameters ---------- grid_scores : list of named tuples Results from a sklearn grid search (get them using the `grid_scores_` parameter) change : str or iterable with len<=2 Parameter to change subset : dictionary-like parameter-value(s) pairs to subset from grid_scores. (e.g. ``{'n_estimartors': [1, 10]}``), if None all combinations will be used. kind : ['line', 'bar'] This only applies whe change is a single parameter. Changes the type of plot cmap : matplotlib Colormap This only applies when change are two parameters. Colormap used for the matrix. If None uses a modified version of matplotlib's OrRd colormap. ax: matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Returns ------- ax: matplotlib Axes Axes containing the plot Examples -------- .. plot:: ../../examples/grid_search.py
def _apply(self, ctx: ExtensionContext) -> AugmentedDict: def process(pattern: Pattern[str], _str: str) -> Any: _match = pattern.match(_str) if _match is None: return _str placeholder, external_path = _match.group(1), _match.group(2) with open(self.locator( external_path, cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None )) as fhandle: content = fhandle.read() return _str.replace(placeholder, content) node_key, node_value = ctx.node _pattern = re.compile(self.__pattern__) return {node_key: process(_pattern, node_value)}
Performs the actual loading of an external resource into the current model. Args: ctx: The processing context. Returns: Returns a dictionary that gets incorporated into the actual model.
def dict(cls, *args, **kwds): if args: if len(args) > 1: raise TypeError("Too many positional arguments") x = args[0] keys = [] vals = [] try: x_keys = x.keys except AttributeError: for k, v in x: keys.append(k) vals.append(v) else: keys = x_keys() vals = [x[k] for k in keys] return q('!', keys, vals) else: if kwds: keys = [] vals = [] for k, v in kwds.items(): keys.append(k) vals.append(v) return q('!', keys, vals) else: return q('()!()')
Construct a q dictionary K.dict() -> new empty q dictionary (q('()!()') K.dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs K.dict(iterable) -> new dictionary initialized from an iterable yielding (key, value) pairs K.dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. For example: K.dict(one=1, two=2)
def create_address(customer_id, data): Address = client.model('party.address') Country = client.model('country.country') Subdivision = client.model('country.subdivision') country, = Country.find([('code', '=', data['country'])]) state, = Subdivision.find([ ('code', 'ilike', '%-' + data['state']), ('country', '=', country['id']) ]) address, = Address.create([{ 'party': customer_id, 'name': data['name'], 'street': data['street'], 'street_bis': data['street_bis'], 'city': data['city'], 'zip': data['zip'], 'country': country['id'], 'subdivision': state['id'], }]) return address['id']
Create an address and return the id
def training_env(): from sagemaker_containers import _env return _env.TrainingEnv( resource_config=_env.read_resource_config(), input_data_config=_env.read_input_data_config(), hyperparameters=_env.read_hyperparameters())
Create a TrainingEnv. Returns: TrainingEnv: an instance of TrainingEnv
def validate_request(self, iface_name, func_name, params): self.interface(iface_name).function(func_name).validate_params(params)
Validates that the given params match the expected length and types for this interface and function. Returns two element tuple: (bool, string) - `bool` - True if valid, False if not - `string` - Description of validation error, or None if valid :Parameters: iface_name Name of interface func_name Name of function params List of params to validate against this function
def unsubscribe_user_from_discussion(recID, uid): query = params = (recID, uid) try: res = run_sql(query, params) except: return 0 if res > 0: return 1 return 0
Unsubscribe users from a discussion. :param recID: record ID corresponding to the discussion we want to unsubscribe the user :param uid: user id :return 1 if successful, 0 if not
def _change_kind(self, post_uid): post_data = self.get_post_data() logger.info('admin post update: {0}'.format(post_data)) MPost.update_misc(post_uid, kind=post_data['kcat']) update_category(post_uid, post_data) self.redirect('/{0}/{1}'.format(router_post[post_data['kcat']], post_uid))
To modify the category of the post, and kind.
def _clean_tag(t): t = _scored_patt.sub(string=t, repl='') if t == '_country_' or t.startswith('_country:'): t = 'nnp_country' elif t == 'vpb': t = 'vb' elif t == 'nnd': t = 'nns' elif t == 'nns_root:': t = 'nns' elif t == 'root:zygote': t = 'nn' elif t.startswith('root:'): t = 'uh' elif t in ('abbr_united_states_marine_corps', 'abbr_orange_juice'): t = "abbreviation" elif t == '+abbreviation': t = 'abbreviation' elif t.startswith('fw_misspelling:'): t = 'fw' return t
Fix up some garbage errors.
def __verify_server_version(self): if compare_versions('.'.join([_lib_major_version, _lib_minor_version]), self.product_version) > 0: logger.warning('Client version {} connecting to server with newer minor release {}.'.format( _lib_full_version, self.product_version )) if compare_versions(_lib_major_version, self.product_version) != 0: raise InvalidSwimlaneProductVersion( self, '{}.0'.format(_lib_major_version), '{}.0'.format(str(int(_lib_major_version) + 1)) )
Verify connected to supported server product version Notes: Logs warning if connecting to a newer minor server version Raises: swimlane.exceptions.InvalidServerVersion: If server major version is higher than package major version
def _set_property(xml_root, name, value, properties=None): if properties is None: properties = xml_root.find("properties") for prop in properties: if prop.get("name") == name: prop.set("value", utils.get_unicode_str(value)) break else: etree.SubElement( properties, "property", {"name": name, "value": utils.get_unicode_str(value)} )
Sets property to specified value.
def index_map(data): (entry, text_fn) = data text = text_fn() logging.debug("Got %s", entry.filename) for s in split_into_sentences(text): for w in split_into_words(s.lower()): yield (w, entry.filename)
Index demo map function.
def startup_script(self): script_file = self.script_file if script_file is None: return None try: with open(script_file, "rb") as f: return f.read().decode("utf-8", errors="replace") except OSError as e: raise VPCSError('Cannot read the startup script file "{}": {}'.format(script_file, e))
Returns the content of the current startup script
def _session_check(self): if not os.path.exists(SESSION_FILE): self._log.debug("Session file does not exist") return False with open(SESSION_FILE, 'rb') as f: cookies = requests.utils.cookiejar_from_dict(pickle.load(f)) self._session.cookies = cookies self._log.debug("Loaded cookies from session file") response = self._session.get(url=self.TEST_URL, headers=self.HEADERS) if self.TEST_KEY in str(response.content): self._log.debug("Session file appears invalid") return False self._is_authenticated = True self._process_state() return True
Attempt to authenticate the user through a session file. This process is done to avoid having to authenticate the user every single time. It uses a session file that is saved when a valid session is captured and then reused. Because sessions can expire, we need to test the session prior to calling the user authenticated. Right now that is done with a test string found in an unauthenticated session. This approach is not an ideal method, but it works.
def getCredentials(self): (username, password, public_key, private_key) = self.getCredentialValues() if public_key or private_key: return UserKeyCredential(username, public_key, private_key) if username or password: return UserPassCredential(username, password) return None
Return UserKeyCredential or UserPassCredential.
def bicluster_similarity(self, reference_model): similarity_score = consensus_score(self.model.biclusters_, reference_model.biclusters_) return similarity_score
Calculates the similarity between the current model of biclusters and the reference model of biclusters :param reference_model: The reference model of biclusters :return: Returns the consensus score(Hochreiter et. al., 2010), i.e. the similarity of two sets of biclusters.
def getNamedItem(self, name: str) -> Optional[Attr]: return self._dict.get(name, None)
Get ``Attr`` object which has ``name``. If does not have ``name`` attr, return None.
def get_one_dimensional_kernel(self, dim): oneDkernel = GridRBF(input_dim=1, variance=self.variance.copy(), originalDimensions=dim) return oneDkernel
Specially intended for Grid regression.
def _update_alignment(self, alignment): states = {"top": 2, "middle": 0, "bottom": 1} self.alignment_tb.state = states[alignment] self.alignment_tb.toggle(None) self.alignment_tb.Refresh()
Updates vertical text alignment button Parameters ---------- alignment: String in ["top", "middle", "right"] \tSwitches button to untoggled if False and toggled if True
def update_aliases(self): changed = False try: response = self.client.api.get_room_state(self.room_id) except MatrixRequestError: return False for chunk in response: content = chunk.get('content') if content: if 'aliases' in content: aliases = content['aliases'] if aliases != self.aliases: self.aliases = aliases changed = True if chunk.get('type') == 'm.room.canonical_alias': canonical_alias = content['alias'] if self.canonical_alias != canonical_alias: self.canonical_alias = canonical_alias changed = True if changed and self.aliases and not self.canonical_alias: self.canonical_alias = self.aliases[0] return changed
Get aliases information from room state Returns: boolean: True if the aliases changed, False if not
def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.regioninfo import RegionInfo purl = urlparse.urlparse(url) kwargs['port'] = purl.port kwargs['host'] = purl.hostname kwargs['path'] = purl.path if not 'is_secure' in kwargs: kwargs['is_secure'] = (purl.scheme == "https") kwargs['region'] = RegionInfo(name = purl.hostname, endpoint = purl.hostname) kwargs['aws_access_key_id']=aws_access_key_id kwargs['aws_secret_access_key']=aws_secret_access_key return(connect_ec2(**kwargs))
Connect to an EC2 Api endpoint. Additional arguments are passed through to connect_ec2. :type url: string :param url: A url for the ec2 api endpoint to connect to :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server
def format_specifier(ireq): specs = ireq.specifier._specs if ireq.req is not None else [] specs = sorted(specs, key=lambda x: x._spec[1]) return ",".join(str(s) for s in specs) or "<any>"
Generic formatter for pretty printing the specifier part of InstallRequirements to the terminal.
def connect_with_password(self, ssh, username, password, address, port, sock, timeout=20): ssh.connect(username=username, password=password, hostname=address, port=port, sock=sock, timeout=timeout)
Create an ssh session to a remote host with a username and password :type username: str :param username: username used for ssh authentication :type password: str :param password: password used for ssh authentication :type address: str :param address: remote server address :type port: int :param port: remote server port
def plotdata(self, key, part='re', scale='log', steps=50): if scale == 'log': x = np.logspace(log(self.scale_min), log(self.scale_max), steps, base=e) elif scale == 'linear': x = np.linspace(self.scale_min, self.scale_max, steps) y = self.fun(x) y = np.array([d[key] for d in y]) if part == 're': return x, y.real elif part == 'im': return x, y.imag
Return a tuple of arrays x, y that can be fed to plt.plot, where x is the scale in GeV and y is the parameter of interest. Parameters: - key: dicionary key of the parameter to be plotted (e.g. a WCxf coefficient name or a SM parameter like 'g') - part: plot the real part 're' (default) or the imaginary part 'im' - scale: 'log'; make the x steps logarithmically distributed; for 'linear', linearly distributed - steps: steps in x to take (default: 50)
def save(self): id = self.id or self.objects.id(self.name) self.objects[id] = self.prepare_save(dict(self)) self.id = id self.post_save() return id
Save this entry. If the entry does not have an :attr:`id`, a new id will be assigned, and the :attr:`id` attribute set accordingly. Pre-save processing of the fields saved can be done by overriding the :meth:`prepare_save` method. Additional actions to be done after the save operation has been completed can be added by defining the :meth:`post_save` method.
def filter_silenced(self): stashes = [ ('client', '/silence/{}'.format(self.event['client']['name'])), ('check', '/silence/{}/{}'.format( self.event['client']['name'], self.event['check']['name'])), ('check', '/silence/all/{}'.format(self.event['check']['name'])) ] for scope, path in stashes: if self.stash_exists(path): self.bail(scope + ' alerts silenced')
Determine whether a check is silenced and shouldn't handle.
def _database(self, writable=False): if self.path == MEMORY_DB_NAME: if not self.inmemory_db: self.inmemory_db = xapian.inmemory_open() return self.inmemory_db if writable: database = xapian.WritableDatabase(self.path, xapian.DB_CREATE_OR_OPEN) else: try: database = xapian.Database(self.path) except xapian.DatabaseOpeningError: raise InvalidIndexError('Unable to open index at %s' % self.path) return database
Private method that returns a xapian.Database for use. Optional arguments: ``writable`` -- Open the database in read/write mode (default=False) Returns an instance of a xapian.Database or xapian.WritableDatabase
def _detect(self): results = [] for c in self.slither.contracts_derived: ret = self.detect_uninitialized(c) for variable, functions in ret: info = "{}.{} ({}) is never initialized. It is used in:\n" info = info.format(variable.contract.name, variable.name, variable.source_mapping_str) for f in functions: info += "\t- {} ({})\n".format(f.name, f.source_mapping_str) source = [variable.source_mapping] source += [f.source_mapping for f in functions] json = self.generate_json_result(info) self.add_variable_to_json(variable, json) self.add_functions_to_json(functions, json) results.append(json) return results
Detect uninitialized state variables Recursively visit the calls Returns: dict: [contract name] = set(state variable uninitialized)
def capture(self, pattern, name=None): if isinstance(pattern, basestring): pattern = re.compile(pattern) def munge(self, value): match = pattern.match(value) if not match: return NONE for group in [name or self.name, 1]: try: return match.group(group) except IndexError: pass return NONE return self.munge.attach(self)(munge)
Hooks munge to capture a value based on a regex.
def stops(self): stops = set() for stop_time in self.stop_times(): stops |= stop_time.stops() return stops
Return all stops visited by trips for this agency.
def get_raw_data(self): if self.__report_kind != HidP_Output \ and self.__report_kind != HidP_Feature: raise HIDError("Only for output or feature reports") self.__prepare_raw_data() return helpers.ReadOnlyList(self.__raw_data)
Get raw HID report based on internal report item settings, creates new c_ubytes storage
def loadFromTemplate(template, stim=None): if stim is None: stim = StimulusModel() stim.setRepCount(template['reps']) stim.setUserTag(template.get('user_tag', '')) component_classes = get_stimuli_models() for comp_doc in template['components']: comp = get_component(comp_doc['stim_type'], component_classes) comp.loadState(comp_doc) stim.insertComponent(comp, *comp_doc['index']) autoparams = template['autoparameters'] for p in autoparams: selection = p['selection'] component_selection = [] for index in selection: component = stim.component(*index) component_selection.append(component) p['selection'] = component_selection stim.autoParams().setParameterList(autoparams) stim.setReorderFunc(order_function(template['reorder']), template['reorder']) stim.setStimType(template['testtype']) return stim
Loads the stimlus to the state provided by a template :param template: dict that includes all info nesessary to recreate stim :type template: dict :param stim: Stimulus to apply to, creates a new model if None :type stim: StimulusModel
def parse_stage_name(stage): if isinstance(stage, str): return stage try: return stage.name except AttributeError: try: return stage.__name__ except AttributeError: raise TypeError("Unsupported stage type: {}".format(type(stage)))
Determine the name of a stage. The stage may be provided already as a name, as a Stage object, or as a callable with __name__ (e.g., function). :param str | pypiper.Stage | function stage: Object representing a stage, from which to obtain name. :return str: Name of putative pipeline Stage.
def build_histogram(data, colorscale=None, nbins=10): if colorscale is None: colorscale = colorscale_default colorscale = _colors_to_rgb(colorscale) h_min, h_max = 0, 1 hist, bin_edges = np.histogram(data, range=(h_min, h_max), bins=nbins) bin_mids = np.mean(np.array(list(zip(bin_edges, bin_edges[1:]))), axis=1) histogram = [] max_bucket_value = max(hist) sum_bucket_value = sum(hist) for bar, mid in zip(hist, bin_mids): height = np.floor(((bar / max_bucket_value) * 100) + 0.5) perc = round((bar / sum_bucket_value) * 100.0, 1) color = _map_val2color(mid, 0.0, 1.0, colorscale) histogram.append({"height": height, "perc": perc, "color": color}) return histogram
Build histogram of data based on values of color_function
def clear_context(pid_file): return raise RuntimeError("Should not happen") fname = get_context_file_name(pid_file) shutil.move(fname, fname.replace("context.json", "context.old.json")) data = {} data["terminated"] = str(datetime.datetime.now(datetime.timezone.utc)) set_context(pid_file, data)
Called at exit. Delete the context file to signal there is no active notebook. We don't delete the whole file, but leave it around for debugging purposes. Maybe later we want to pass some information back to the web site.
def save_file(client, bucket, data_file, items, dry_run=None): logger.debug('Writing {number_items} items to s3. Bucket: {bucket} Key: {key}'.format( number_items=len(items), bucket=bucket, key=data_file )) if not dry_run: return _put_to_s3(client, bucket, data_file, json.dumps(items))
Tries to write JSON data to data file in S3.
def find_unresolved_and_unreferenced_symbols(self): unresolved = set() unreferenced = self._definitions.copy() self._collect_unresolved_and_unreferenced(set(), set(), unresolved, unreferenced, frozenset(self._definitions), start=True) return unresolved, unreferenced - Scope.ALL_BUILTINS
Find any unresolved symbols, and unreferenced symbols from this scope. :returns: ({unresolved}, {unreferenced})
def set_controller(self, controllers): command = ovs_vsctl.VSCtlCommand('set-controller', [self.br_name]) command.args.extend(controllers) self.run_command([command])
Sets the OpenFlow controller address. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl set-controller <bridge> <target>...
def set_icon(self, icon): self._icon = icon return self._listitem.setIconImage(icon)
Sets the listitem's icon image
def binaryRecordsStream(self, directory, recordLength): return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self, NoOpSerializer())
Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes
def start_monitoring(seconds_frozen=SECONDS_FROZEN, test_interval=TEST_INTERVAL): thread = StoppableThread(target=monitor, args=(seconds_frozen, test_interval)) thread.daemon = True thread.start() return thread
Start monitoring for hanging threads. seconds_frozen - How much time should thread hang to activate printing stack trace - default(10) tests_interval - Sleep time of monitoring thread (in milliseconds) - default(100)
def init(self): if not self.export_enable: return None try: parameters = pika.URLParameters( 'amqp://' + self.user + ':' + self.password + '@' + self.host + ':' + self.port + '/') connection = pika.BlockingConnection(parameters) channel = connection.channel() return channel except Exception as e: logger.critical("Connection to rabbitMQ failed : %s " % e) return None
Init the connection to the rabbitmq server.
def get_tweepy_auth(twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret): auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret) auth.set_access_token(twitter_access_token, twitter_access_token_secret) return auth
Make a tweepy auth object
def analysis(self): if self._analysis is not None: return self._analysis if self.cache_dir is not None: path = os.path.join(self.cache_dir, self.checksum) try: if self.refresh_cache: raise IOError with open(path + '.pickle', 'rb') as pickle_file: self._analysis = pickle.load(pickle_file) except IOError: self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate) with open(path + '.pickle', 'wb') as pickle_file: pickle.dump(self._analysis, pickle_file, pickle.HIGHEST_PROTOCOL) else: self._analysis = librosa_analysis.analyze_frames(self.all_as_mono(), self.samplerate) return self._analysis
Get musical analysis of the song using the librosa library
def params(self): if self._GETPOST is None: self._GETPOST = MultiDict(self.GET) self._GETPOST.update(dict(self.POST)) return self._GETPOST
A combined MultiDict with POST and GET parameters.
def _get_parents(folds, linenum): parents = [] for fold in folds: start, end = fold.range if linenum >= start and linenum <= end: parents.append(fold) else: continue return parents
Get the parents at a given linenum. If parents is empty, then the linenum belongs to the module. Parameters ---------- folds : list of :class:`FoldScopeHelper` linenum : int The line number to get parents for. Typically this would be the cursor position. Returns ------- parents : list of :class:`FoldScopeHelper` A list of :class:`FoldScopeHelper` objects that describe the defintion heirarcy for the given ``linenum``. The 1st index will be the top-level parent defined at the module level while the last index will be the class or funtion that contains ``linenum``.
def full(shape, fill_value, dtype=None, **kwargs): return (dc.zeros(shape, **kwargs) + fill_value).astype(dtype)
Create an array of given shape and type, filled with `fill_value`. Args: shape (sequence of ints): 2D shape of the array. fill_value (scalar or numpy.ndarray): Fill value or array. dtype (data-type, optional): Desired data-type for the array. kwargs (optional): Other arguments of the array (*coords, attrs, and name). Returns: array (decode.array): Decode array filled with `fill_value`.
def sections(self) -> list: self.config.read(self.filepath) return self.config.sections()
List of sections.
def exists(stream_name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} stream = _get_basic_stream(stream_name, conn) if 'error' in stream: r['result'] = False r['error'] = stream['error'] else: r['result'] = True return r
Check if the stream exists. Returns False and the error if it does not. CLI example:: salt myminion boto_kinesis.exists my_stream region=us-east-1
def find_extensions(self, tag=None, namespace=None): results = [] if tag and namespace: for element in self.extension_elements: if element.tag == tag and element.namespace == namespace: results.append(element) elif tag and not namespace: for element in self.extension_elements: if element.tag == tag: results.append(element) elif namespace and not tag: for element in self.extension_elements: if element.namespace == namespace: results.append(element) else: for element in self.extension_elements: results.append(element) return results
Searches extension elements for child nodes with the desired name. Returns a list of extension elements within this object whose tag and/or namespace match those passed in. To find all extensions in a particular namespace, specify the namespace but not the tag name. If you specify only the tag, the result list may contain extension elements in multiple namespaces. :param tag: str (optional) The desired tag :param namespace: str (optional) The desired namespace :Return: A list of elements whose tag and/or namespace match the parameters values
def floor_func(self, addr): try: prev_addr = self._function_map.floor_addr(addr) return self._function_map[prev_addr] except KeyError: return None
Return the function who has the greatest address that is less than or equal to `addr`. :param int addr: The address to query. :return: A Function instance, or None if there is no other function before `addr`. :rtype: Function or None
async def set_record(self, *, chat: typing.Union[str, int, None] = None, user: typing.Union[str, int, None] = None, state=None, data=None, bucket=None): if data is None: data = {} if bucket is None: bucket = {} chat, user = self.check_address(chat=chat, user=user) addr = f"fsm:{chat}:{user}" record = {'state': state, 'data': data, 'bucket': bucket} conn = await self.redis() await conn.execute('SET', addr, json.dumps(record))
Write record to storage :param bucket: :param chat: :param user: :param state: :param data: :return:
def map_dict(key_map, *dicts, copy=False, base=None): it = combine_dicts(*dicts).items() get = key_map.get return combine_dicts({get(k, k): v for k, v in it}, copy=copy, base=base)
Returns a dict with new key values. :param key_map: A dictionary that maps the dict keys ({old key: new key} :type key_map: dict :param dicts: A sequence of dicts. :type dicts: dict :param copy: If True, it returns a deepcopy of input values. :type copy: bool, optional :param base: Base dict where combine multiple dicts in one. :type base: dict, optional :return: A unique dict with new key values. :rtype: dict Example:: >>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2}) >>> sorted(d.items()) [('c', 1), ('d', 2)]
def get_fullname(snapshot): actor = get_actor(snapshot) properties = api.get_user_properties(actor) return properties.get("fullname", actor)
Get the actor's fullname of the snapshot
def load_from_string(self, content, container, **kwargs): return self.load_from_stream(anyconfig.compat.StringIO(content), container, **kwargs)
Load config from given string 'cnf_content'. :param content: Config content string :param container: callble to make a container object later :param kwargs: optional keyword parameters to be sanitized :: dict :return: Dict-like object holding config parameters
def _domain_event_agent_lifecycle_cb(conn, domain, state, reason, opaque): _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'state': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_', state), 'reason': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_', reason) })
Domain agent lifecycle events handler
def clean_pid_file(pidfile): if pidfile and os.path.exists(pidfile): os.unlink(pidfile)
clean pid file.
def geometric_center(coords, periodic): max_vals = periodic theta = 2 * np.pi * (coords / max_vals) eps = np.cos(theta) * max_vals / (2 * np.pi) zeta = np.sin(theta) * max_vals / (2 * np.pi) eps_avg = eps.sum(axis=0) zeta_avg = zeta.sum(axis=0) theta_avg = np.arctan2(-zeta_avg, -eps_avg) + np.pi return theta_avg * max_vals / (2 * np.pi)
Geometric center taking into account periodic boundaries
def nsdiffs(x, m, max_D=2, test='ocsb', **kwargs): if max_D <= 0: raise ValueError('max_D must be a positive integer') testfunc = get_callable(test, VALID_STESTS)(m, **kwargs)\ .estimate_seasonal_differencing_term x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) if is_constant(x): return 0 D = 0 dodiff = testfunc(x) while dodiff == 1 and D < max_D: D += 1 x = diff(x, lag=m) if is_constant(x): return D dodiff = testfunc(x) return D
Estimate the seasonal differencing term, ``D``. Perform a test of seasonality for different levels of ``D`` to estimate the number of seasonal differences required to make a given time series stationary. Will select the maximum value of ``D`` for which the time series is judged seasonally stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. m : int The number of seasonal periods (i.e., frequency of the time series) max_D : int, optional (default=2) Maximum number of seasonal differences allowed. Must be a positive integer. The estimated value of ``D`` will not exceed ``max_D``. test : str, optional (default='ocsb') Type of unit root test of seasonality to use in order to detect seasonal periodicity. Valid tests include ("ocsb", "ch"). Note that the CHTest is very slow for large data. Returns ------- D : int The estimated seasonal differencing term. This is the maximum value of ``D`` such that ``D <= max_D`` and the time series is judged seasonally stationary. If the time series is constant, will return 0.
def python_date_format(self, long_format=None, time_only=False): msgid = long_format and 'date_format_long' or 'date_format_short' if time_only: msgid = 'time_format' formatstring = translate(msgid, domain="senaite.core", context=self.request) if formatstring is None or formatstring.startswith( 'date_') or formatstring.startswith('time_'): self.logger.error("bika/%s/%s could not be translated" % (self.request.get('LANGUAGE'), msgid)) properties = getToolByName(self.context, 'portal_properties').site_properties if long_format: format = properties.localLongTimeFormat else: if time_only: format = properties.localTimeOnlyFormat else: format = properties.localTimeFormat return format return formatstring.replace(r"${", '%').replace('}', '')
This convert bika domain date format msgstrs to Python strftime format strings, by the same rules as ulocalized_time. XXX i18nl10n.py may change, and that is where this code is taken from.
def setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs, outputDir, tags=None): datafindcaches, _ = \ setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs, outputDir, tags=tags) datafindouts = convert_cachelist_to_filelist(datafindcaches) return datafindcaches, datafindouts
This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_single_call_perifo this call will one call to the datafind server for every science segment. This function will return a list of files corresponding to the individual frames returned by the datafind query. This will allow pegasus to more easily identify all the files used as input, but may cause problems for codes that need to take frame cache files as input. Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline.
def _get_format_module(image_format): format_module = None nag_about_gifs = False if detect_format.is_format_selected(image_format, Settings.to_png_formats, png.PROGRAMS): format_module = png elif detect_format.is_format_selected(image_format, jpeg.FORMATS, jpeg.PROGRAMS): format_module = jpeg elif detect_format.is_format_selected(image_format, gif.FORMATS, gif.PROGRAMS): format_module = gif nag_about_gifs = True return format_module, nag_about_gifs
Get the format module to use for optimizing the image.
def add_clink(self,my_clink): if self.causalRelations_layer is None: self.causalRelations_layer = CcausalRelations() self.root.append(self.causalRelations_layer.get_node()) self.causalRelations_layer.add_clink(my_clink)
Adds a clink to the causalRelations layer @type my_clink: L{Cclink} @param my_clink: clink object
def quit_all(editor, force=False): quit(editor, all_=True, force=force)
Quit all.
def sample_stats_to_xarray(self): data = self.sample_stats if not isinstance(data, dict): raise TypeError("DictConverter.sample_stats is not a dictionary") return dict_to_dataset(data, library=None, coords=self.coords, dims=self.dims)
Convert sample_stats samples to xarray.