code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def find_frequencies(data, freq=44100, bits=16): n = len(data) p = _fft(data) uniquePts = numpy.ceil((n + 1) / 2.0) p = [(abs(x) / float(n)) ** 2 * 2 for x in p[0:uniquePts]] p[0] = p[0] / 2 if n % 2 == 0: p[-1] = p[-1] / 2 s = freq / float(n) freqArray = numpy.arange(0, uniquePts * s, s) return zip(freqArray, p)
Convert audio data into a frequency-amplitude table using fast fourier transformation. Return a list of tuples (frequency, amplitude). Data should only contain one channel of audio.
def get_lib_module_dict(self): from importlib import import_module if not self.ref: return {} u = parse_app_url(self.ref) if u.scheme == 'file': if not self.set_sys_path(): return {} for module_name in self.lib_dir_names: try: m = import_module(module_name) return {k: v for k, v in m.__dict__.items() if not k.startswith('__')} except ModuleNotFoundError as e: if not module_name in str(e): raise continue else: return {}
Load the 'lib' directory as a python module, so it can be used to provide functions for rowpipe transforms. This only works filesystem packages
def grounded_slot_range(self, slot: Optional[Union[SlotDefinition, Optional[str]]]) -> str: if slot is not None and not isinstance(slot, str): slot = slot.range if slot is None: return DEFAULT_BUILTIN_TYPE_NAME elif slot in builtin_names: return slot elif slot in self.schema.types: return self.grounded_slot_range(self.schema.types[slot].typeof) else: return slot
Chase the slot range to its final form @param slot: slot to check @return: name of resolved range
def hydrate(self, database, recursive=True): if isinstance(self, Document): self.reload(database) for field in self: obj = getattr(self, field) if isinstance(obj, Document): obj.reload(database) if recursive: obj.hydrate(database) return self
By default, recursively reloads all instances of Document in the model. Recursion can be turned off. :param database: the `Database` object source for rehydrating. :return: an updated instance of `Document` / self.
def domain_name(self, levels=1): if levels < 1: raise ValueError("levels must be greater than or equal to 1") if levels == 1: return self.domain_word() + '.' + self.tld() else: return self.domain_word() + '.' + self.domain_name(levels - 1)
Produce an Internet domain name with the specified number of subdomain levels. >>> domain_name() nichols-phillips.com >>> domain_name(2) williamson-hopkins.jackson.com
def closed(self, user): decision = False for record in self.history: if record["when"] < self.options.since.date: continue if not decision and record["when"] < self.options.until.date: for change in record["changes"]: if (change["field_name"] == "status" and change["added"] == "CLOSED" and record["who"] in [user.email, user.name]): decision = True else: for change in record["changes"]: if (change["field_name"] == "status" and change["removed"] == "CLOSED"): decision = False return decision
Moved to CLOSED and not later moved to ASSIGNED
def _reverse_call(self, related_method, *values): related_fields = self._to_fields(*values) for related_field in related_fields: if callable(related_method): related_method(related_field, self.instance._pk) else: getattr(related_field, related_method)(self.instance._pk)
Convert each value to a related field, then call the method on each field, passing self.instance as argument. If related_method is a string, it will be the method of the related field. If it's a callable, it's a function which accept the related field and self.instance.
def get_all_users(self): r = requests.get(api_url+'users/', headers=self.headers) print(request_status(r)) r.raise_for_status() output = r.json() result = [] for x in output: result.append(User(x)) return result
Returns all the users
def nearest_interval(self, interval): thresh_range = 25 if interval < self.intervals[0] - thresh_range or interval > self.intervals[-1] + thresh_range: raise IndexError("The interval given is beyond " + str(thresh_range) + " cents over the range of intervals defined.") index = find_nearest_index(self.intervals, interval) return self.intervals[index]
This function returns the nearest interval to any given interval.
def is_valid(hal_id): match = REGEX.match(hal_id) return (match is not None) and (match.group(0) == hal_id)
Check that a given HAL id is a valid one. :param hal_id: The HAL id to be checked. :returns: Boolean indicating whether the HAL id is valid or not. >>> is_valid("hal-01258754, version 1") True >>> is_valid("hal-01258754") True >>> is_valid("hal-01258754v2") True >>> is_valid("foobar") False
def _termIsObsolete(oboTerm): isObsolete = False if u'is_obsolete' in oboTerm: if oboTerm[u'is_obsolete'].lower() == u'true': isObsolete = True return isObsolete
Determine wheter an obo 'Term' entry is marked as obsolete. :param oboTerm: a dictionary as return by :func:`maspy.ontology._attributeLinesToDict()` :return: bool
def instance(host=None, port=None): if not hasattr(WebServer, "_instance") or WebServer._instance is None: assert host is not None assert port is not None WebServer._instance = WebServer(host, port) return WebServer._instance
Singleton to return only one instance of Server. :returns: instance of Server
def _load_github_repo(): if 'TRAVIS' in os.environ: raise RuntimeError('Detected that we are running in Travis. ' 'Stopping to prevent infinite loops.') try: with open(os.path.join(config_dir, 'repo'), 'r') as f: return f.read() except (OSError, IOError): raise RuntimeError('Could not find your repository. ' 'Have you ran `trytravis --repo`?')
Loads the GitHub repository from the users config.
def _make_hlog_numeric(b, r, d): hlog_obj = lambda y, x, b, r, d: hlog_inv(y, b, r, d) - x find_inv = vectorize(lambda x: brentq(hlog_obj, -2 * r, 2 * r, args=(x, b, r, d))) return find_inv
Return a function that numerically computes the hlog transformation for given parameter values.
def eval(self, data, name, feval=None): if not isinstance(data, Dataset): raise TypeError("Can only eval for Dataset instance") data_idx = -1 if data is self.train_set: data_idx = 0 else: for i in range_(len(self.valid_sets)): if data is self.valid_sets[i]: data_idx = i + 1 break if data_idx == -1: self.add_valid(data, name) data_idx = self.__num_dataset - 1 return self.__inner_eval(name, data_idx, feval)
Evaluate for data. Parameters ---------- data : Dataset Data for the evaluating. name : string Name of the data. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results.
def scores(factors): temperaments = { const.CHOLERIC: 0, const.MELANCHOLIC: 0, const.SANGUINE: 0, const.PHLEGMATIC: 0 } qualities = { const.HOT: 0, const.COLD: 0, const.DRY: 0, const.HUMID: 0 } for factor in factors: element = factor['element'] temperament = props.base.elementTemperament[element] temperaments[temperament] += 1 tqualities = props.base.temperamentQuality[temperament] qualities[tqualities[0]] += 1 qualities[tqualities[1]] += 1 return { 'temperaments': temperaments, 'qualities': qualities }
Computes the score of temperaments and elements.
def visit_subscript(self, node, parent): context = self._get_context(node) newnode = nodes.Subscript( ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent ) newnode.postinit( self.visit(node.value, newnode), self.visit(node.slice, newnode) ) return newnode
visit a Subscript node by returning a fresh instance of it
def gracefulShutdown(self): if not self.bf.perspective: log.msg("No active connection, shutting down NOW") reactor.stop() return log.msg( "Telling the master we want to shutdown after any running builds are finished") d = self.bf.perspective.callRemote("shutdown") def _shutdownfailed(err): if err.check(AttributeError): log.msg( "Master does not support worker initiated shutdown. Upgrade master to 0.8.3 or later to use this feature.") else: log.msg('callRemote("shutdown") failed') log.err(err) d.addErrback(_shutdownfailed) return d
Start shutting down
def layer_covariance(layer1, layer2=None): layer2 = layer2 or layer1 act1, act2 = layer1.activations, layer2.activations num_datapoints = act1.shape[0] return np.matmul(act1.T, act2) / float(num_datapoints)
Computes the covariance matrix between the neurons of two layers. If only one layer is passed, computes the symmetric covariance matrix of that layer.
def by_credentials(cls, session, login, password): user = cls.by_login(session, login, local=True) if not user: return None if crypt.check(user.password, password): return user
Get a user from given credentials :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param login: username :type login: unicode :param password: user password :type password: unicode :return: associated user :rtype: :class:`pyshop.models.User`
def pv_count(self): self.open() count = lvm_vg_get_pv_count(self.handle) self.close() return count
Returns the physical volume count.
def from_polar(degrees, magnitude): vec = Vector2() vec.X = math.cos(math.radians(degrees)) * magnitude vec.Y = -math.sin(math.radians(degrees)) * magnitude return vec
Convert polar coordinates to Carteasian Coordinates
def url(self): end_url = ("/accounts/{account_id}/alerts/{alert_id}/mentions/" "{mention_id}/children?") def without_keys(d, keys): return {x: d[x] for x in d if x not in keys} keys = {"access_token", "account_id", "alert_id"} parameters = without_keys(self.params, keys) for key, value in list(parameters.items()): if value != '': end_url += '&' + key + '={' + key + '}' end_url = end_url.format(**self.params) return self._base_url + end_url
The concatenation of the `base_url` and `end_url` that make up the resultant url. :return: the `base_url` and the `end_url`. :rtype: str
def check_version(chrome_exe): cmd = [chrome_exe, '--version'] out = subprocess.check_output(cmd, timeout=60) m = re.search(br'(Chromium|Google Chrome) ([\d.]+)', out) if not m: sys.exit( 'unable to parse browser version from output of ' '%r: %r' % (subprocess.list2cmdline(cmd), out)) version_str = m.group(2).decode() major_version = int(version_str.split('.')[0]) if major_version < 64: sys.exit('brozzler requires chrome/chromium version 64 or ' 'later but %s reports version %s' % ( chrome_exe, version_str))
Raises SystemExit if `chrome_exe` is not a supported browser version. Must run in the main thread to have the desired effect.
def merge(a, b): "merges b into a recursively if a and b are dicts" for key in b: if isinstance(a.get(key), dict) and isinstance(b.get(key), dict): merge(a[key], b[key]) else: a[key] = b[key] return a
merges b into a recursively if a and b are dicts
def create_x10(plm, housecode, unitcode, feature): from insteonplm.devices.ipdb import IPDB ipdb = IPDB() product = ipdb.x10(feature) deviceclass = product.deviceclass device = None if deviceclass: device = deviceclass(plm, housecode, unitcode) return device
Create an X10 device from a feature definition.
def text(el, strip=True): if not el: return "" text = el.text if strip: text = text.strip() return text
Return the text of a ``BeautifulSoup`` element
def unused(self): unused = self.definitions - self.used used_nodes = set([u[1] for u in self.used]) unused = set([u for u in unused if u[1] not in used_nodes]) return unused
Calculate which AST nodes are unused. Note that we have to take special care in the case of x,y = f(z) where x is used later, but y is not.
def _pick_selected_option(cls): for option in cls.select_el: if not hasattr(option, "selected"): return None if option.selected: return option.value return None
Select handler for authors.
def _validate(self,val): if isinstance(self.class_, tuple): class_name = ('(%s)' % ', '.join(cl.__name__ for cl in self.class_)) else: class_name = self.class_.__name__ if self.is_instance: if not (isinstance(val,self.class_)) and not (val is None and self.allow_None): raise ValueError( "Parameter '%s' value must be an instance of %s, not '%s'" % (self.name, class_name, val)) else: if not (val is None and self.allow_None) and not (issubclass(val,self.class_)): raise ValueError( "Parameter '%s' must be a subclass of %s, not '%s'" % (val.__name__, class_name, val.__class__.__name__))
val must be None, an instance of self.class_ if self.is_instance=True or a subclass of self_class if self.is_instance=False
def studio(self): if self._studio is None: from twilio.rest.studio import Studio self._studio = Studio(self) return self._studio
Access the Studio Twilio Domain :returns: Studio Twilio Domain :rtype: twilio.rest.studio.Studio
def pause(env, identifier): vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS') if not (env.skip_confirmations or formatting.confirm('This will pause the VS with id %s. Continue?' % vs_id)): raise exceptions.CLIAbort('Aborted.') env.client['Virtual_Guest'].pause(id=vs_id)
Pauses an active virtual server.
def order_duplicate_volume(self, origin_volume_id, origin_snapshot_id=None, duplicate_size=None, duplicate_iops=None, duplicate_tier_level=None, duplicate_snapshot_size=None, hourly_billing_flag=False): file_mask = 'id,billingItem[location,hourlyFlag],snapshotCapacityGb,'\ 'storageType[keyName],capacityGb,originalVolumeSize,'\ 'provisionedIops,storageTierLevel,'\ 'staasVersion,hasEncryptionAtRest' origin_volume = self.get_file_volume_details(origin_volume_id, mask=file_mask) order = storage_utils.prepare_duplicate_order_object( self, origin_volume, duplicate_iops, duplicate_tier_level, duplicate_size, duplicate_snapshot_size, 'file', hourly_billing_flag ) if origin_snapshot_id is not None: order['duplicateOriginSnapshotId'] = origin_snapshot_id return self.client.call('Product_Order', 'placeOrder', order)
Places an order for a duplicate file volume. :param origin_volume_id: The ID of the origin volume to be duplicated :param origin_snapshot_id: Origin snapshot ID to use for duplication :param duplicate_size: Size/capacity for the duplicate volume :param duplicate_iops: The IOPS per GB for the duplicate volume :param duplicate_tier_level: Tier level for the duplicate volume :param duplicate_snapshot_size: Snapshot space size for the duplicate :param hourly_billing_flag: Billing type, monthly (False) or hourly (True), default to monthly. :return: Returns a SoftLayer_Container_Product_Order_Receipt
def type_name(cls): if cls.__type_name__: type_name = cls.__type_name__.lower() else: camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2)), s) type_name = ccase(cls.__name__) type_name = type_name[-48:] type_name = type_name.lower() type_name = re.sub(r'^_+', '', type_name) cls.__type_name__ = type_name return type_name
Returns the type name if it's been defined otherwise, it creates it from the class name
def _return_wrapper(fits, return_all, start, trace): if not is_iterable(fits): fits = [fits] if trace: print('Total fit time: %.3f seconds' % (time.time() - start)) if not return_all: return fits[0] return fits
If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all.
def indices(self, data): duration = self.data_duration(data) for start in range(0, duration - self.duration, self.stride): yield start
Generate patch start indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch
def console_get_char(con: tcod.console.Console, x: int, y: int) -> int: return lib.TCOD_console_get_char(_console(con), x, y)
Return the character at the x,y of this console. .. deprecated:: 8.4 Array access performs significantly faster than using this function. See :any:`Console.ch`.
def iter_files(self, number=-1, etag=None): url = self._build_url('files', base_url=self._api) return self._iter(int(number), url, PullFile, etag=etag)
Iterate over the files associated with this pull request. :param int number: (optional), number of files to return. Default: -1 returns all available files. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`PullFile <PullFile>`\ s
def get_user_avatar(self, userid): response, status_code = self.__pod__.User.get_v1_admin_user_uid_avatar( sessionToken=self.__session, uid=userid ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
get avatar by user id
def undo(self, i=-1): _history_enabled = self.history_enabled param = self.get_history(i) self.disable_history() param.undo() self.remove_parameter(uniqueid=param.uniqueid) if _history_enabled: self.enable_history()
Undo an item in the history logs :parameter int i: integer for indexing (can be positive or negative). Defaults to -1 if not provided (the latest recorded history item) :raises ValueError: if no history items have been recorded
def get_parser(self, prog_name): parser = argparse.ArgumentParser(description=self.get_description(), prog=prog_name, add_help=False) return parser
Override to add command options.
def on_step_end(self, **kwargs:Any)->None: "Update the params from master to model and zero grad." self.learn.model.zero_grad() master2model(self.model_params, self.master_params, self.flat_master)
Update the params from master to model and zero grad.
def get_config(section, option, allow_empty_option=True, default=""): try: value = config.get(section, option) if value is None or len(value) == 0: if allow_empty_option: return "" else: return default else: return value except ConfigParser.NoSectionError: return default
Get data from configs
def onEdge(self, canvas): sides = [] if int(self.position[0]) <= 0: sides.append(1) if (int(self.position[0]) + self.image.width) >= canvas.width: sides.append(3) if int(self.position[1]) <= 0: sides.append(2) if (int(self.position[1]) + self.image.height) >= canvas.height: sides.append(0) return sides
Returns a list of the sides of the sprite which are touching the edge of the canvas. 0 = Bottom 1 = Left 2 = Top 3 = Right
def get_streams(self): self._write_lock.acquire() try: return itervalues(self._stream_by_id) finally: self._write_lock.release()
Return a snapshot of all streams in existence at time of call.
def finalize(self): self.pause_session_logging() self._disable_logging() self._msg_callback = None self._error_msg_callback = None self._warning_msg_callback = None self._info_msg_callback = None
Clean up the object. After calling this method the object can't be used anymore. This will be reworked when changing the logging model.
def get_next_start_id(self): link = self.rws_connection.last_result.links.get("next", None) if link: link = link['url'] p = urlparse(link) start_id = int(parse_qs(p.query)['startid'][0]) return start_id return None
If link for next result set has been passed, extract it and get the next set start id
def get_current_target(module, module_parameter=None, action_parameter=None): result = exec_action(module, 'show', module_parameter=module_parameter, action_parameter=action_parameter)[0] if not result: return None if result == '(unset)': return None return result
Get the currently selected target for the given module. module name of the module to be queried for its current target module_parameter additional params passed to the defined module action_parameter additional params passed to the 'show' action CLI Example (current target of system-wide ``java-vm``): .. code-block:: bash salt '*' eselect.get_current_target java-vm action_parameter='system' CLI Example (current target of ``kernel`` symlink): .. code-block:: bash salt '*' eselect.get_current_target kernel
def get_plugins(sites=None): plugins = [] for plugin in CMSPlugin.objects.all(): if plugin: cl = plugin.get_plugin_class().model if 'posts' in cl._meta.get_all_field_names(): instance = plugin.get_plugin_instance()[0] plugins.append(instance) if sites and len(sites) > 0: onsite = [] for plugin in plugins: try: if plugin.page.site in sites: onsite.append(plugin) except AttributeError: continue return onsite return plugins
Returns all GoScale plugins It ignored all other django-cms plugins
def ensure_repo_exists(self): if not os.path.isdir(self.cwd): os.makedirs(self.cwd) if not os.path.isdir(os.path.join(self.cwd, ".git")): self.git.init() self.git.config("user.email", "you@example.com") self.git.config("user.name", "Your Name")
Create git repo if one does not exist yet
def is_link_text_present(self, link_text): soup = self.get_beautiful_soup() html_links = soup.find_all('a') for html_link in html_links: if html_link.text.strip() == link_text.strip(): return True return False
Returns True if the link text appears in the HTML of the page. The element doesn't need to be visible, such as elements hidden inside a dropdown selection.
def json_as_html(self): from cspreports import utils formatted_json = utils.format_report(self.json) return mark_safe("<pre>\n%s</pre>" % escape(formatted_json))
Print out self.json in a nice way.
def find_top_level_directory(start_directory): top_level = start_directory while os.path.isfile(os.path.join(top_level, '__init__.py')): top_level = os.path.dirname(top_level) if top_level == os.path.dirname(top_level): raise ValueError("Can't find top level directory") return os.path.abspath(top_level)
Finds the top-level directory of a project given a start directory inside the project. Parameters ---------- start_directory : str The directory in which test discovery will start.
def clone_source_dir(source_dir, dest_dir): if os.path.isdir(dest_dir): print('removing', dest_dir) shutil.rmtree(dest_dir) shutil.copytree(source_dir, dest_dir)
Copies the source Protobuf files into a build directory. Args: source_dir (str): source directory of the Protobuf files dest_dir (str): destination directory of the Protobuf files
def split_given_spans(self, spans, sep=' '): N = len(spans) results = [{TEXT: text} for text in self.texts_from_spans(spans, sep=sep)] for elem in self: if isinstance(self[elem], list): splits = divide_by_spans(self[elem], spans, translate=True, sep=sep) for idx in range(N): results[idx][elem] = splits[idx] return [Text(res) for res in results]
Split the text into several pieces. Resulting texts have all the layers that are present in the text instance that is splitted. The elements are copied to resulting pieces that are covered by their spans. However, this can result in empty layers if no element of a splitted layer fits into a span of a particular output piece. The positions of layer elements that are copied are translated according to the container span, so they are consistent with returned text lengths. Parameters ---------- spans: list of spans. The positions determining the regions that will end up as individual pieces. Spans themselves can be lists of spans, which denote multilayer-style text regions. sep: str The separator that is used to join together text pieces of multilayer spans. Returns ------- list of Text One instance of text per span.
def set_window_iconify_callback(window, cbfun): window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if window_addr in _window_iconify_callback_repository: previous_callback = _window_iconify_callback_repository[window_addr] else: previous_callback = None if cbfun is None: cbfun = 0 c_cbfun = _GLFWwindowiconifyfun(cbfun) _window_iconify_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetWindowIconifyCallback(window, cbfun) if previous_callback is not None and previous_callback[0] != 0: return previous_callback[0]
Sets the iconify callback for the specified window. Wrapper for: GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun cbfun);
def init(lib_name=None, bin_path=None, sdk_path=None): if sum(bool(x) for x in [lib_name, bin_path, sdk_path]) > 1: raise ValueError('expected zero or one arguments') if sdk_path: if sys.platform.startswith('win32'): bin_path = os.path.join(sdk_path, 'bin') elif sys.platform.startswith('darwin'): bin_path = os.path.join(sdk_path, 'myo.framework') else: raise RuntimeError('unsupported platform: {!r}'.format(sys.platform)) if bin_path: lib_name = os.path.join(bin_path, _getdlname()) if not lib_name: lib_name = _getdlname() global libmyo libmyo = ffi.dlopen(lib_name)
Initialize the Myo SDK by loading the libmyo shared library. With no arguments, libmyo must be on your `PATH` or `LD_LIBRARY_PATH`. You can specify the exact path to libmyo with *lib_name*. Alternatively, you can specify the binaries directory that contains libmyo with *bin_path*. Finally, you can also pass the path to the Myo SDK root directory and it will figure out the path to libmyo by itself.
def main(arguments): global verbose global veryVerbose global iteration_num global single_score global pr_flag global match_triple_dict iteration_num = arguments.r + 1 if arguments.ms: single_score = False if arguments.v: verbose = True if arguments.vv: veryVerbose = True if arguments.pr: pr_flag = True floatdisplay = "%%.%df" % arguments.significant for (precision, recall, best_f_score) in score_amr_pairs(args.f[0], args.f[1], justinstance=arguments.justinstance, justattribute=arguments.justattribute, justrelation=arguments.justrelation): if pr_flag: print("Precision: " + floatdisplay % precision) print("Recall: " + floatdisplay % recall) print("F-score: " + floatdisplay % best_f_score) args.f[0].close() args.f[1].close()
Main function of smatch score calculation
def get_template_path(filename): if os.path.isfile(filename): return os.path.abspath(filename) for i in sys.path: if os.path.isfile(os.path.join(i, filename)): return os.path.abspath(os.path.join(i, filename)) return None
Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found
def _close(self, name, suppress_logging): try: pool_names = list(self.pools) if name in pool_names: self.pools[name].close() del self.pools[name] except Exception as e: self.logger.error('Exception on closing Flopsy Pool for {0}: {1}'.format(name, e), exc_info=not suppress_logging)
closes one particular pool and all its amqp amqp connections
def from_urdf_file(cls, urdf_file, base_elements=None, last_link_vector=None, base_element_type="link", active_links_mask=None, name="chain"): if base_elements is None: base_elements = ["base_link"] links = URDF_utils.get_urdf_parameters(urdf_file, base_elements=base_elements, last_link_vector=last_link_vector, base_element_type=base_element_type) return cls([link_lib.OriginLink()] + links, active_links_mask=active_links_mask, name=name)
Creates a chain from an URDF file Parameters ---------- urdf_file: str The path of the URDF file base_elements: list of strings List of the links beginning the chain last_link_vector: numpy.array Optional : The translation vector of the tip. name: str The name of the Chain base_element_type: str active_links_mask: list[bool]
def resolve_randconfig(self, args): ttype = args.get('ttype') if is_null(ttype): sys.stderr.write('Target type must be specified') return None name_keys = dict(target_type=ttype, fullpath=True) randconfig = self.randconfig(**name_keys) rand_override = args.get('rand_config') if is_not_null(rand_override): randconfig = rand_override return randconfig
Get the name of the specturm file based on the job arguments
def _openapi_json(self): from pprint import pprint pprint(self.to_dict()) return current_app.response_class(json.dumps(self.to_dict(), indent=4), mimetype='application/json')
Serve JSON spec file
def get_morph_files(directory): lsdir = (os.path.join(directory, m) for m in os.listdir(directory)) return list(filter(_is_morphology_file, lsdir))
Get a list of all morphology files in a directory Returns: list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
def add_known_host(host, application_name, user=None): cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] try: remote_key = subprocess.check_output(cmd).strip() except Exception as e: log('Could not obtain SSH host key from %s' % host, level=ERROR) raise e current_key = ssh_known_host_key(host, application_name, user) if current_key and remote_key: if is_same_key(remote_key, current_key): log('Known host key for compute host %s up to date.' % host) return else: remove_known_host(host, application_name, user) log('Adding SSH host key to known hosts for compute node at %s.' % host) with open(known_hosts(application_name, user), 'a') as out: out.write("{}\n".format(remote_key))
Add the given host key to the known hosts file. :param host: host name :type host: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str
def green(cls): "Make the text foreground color green." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK wAttributes |= win32.FOREGROUND_GREEN cls._set_text_attributes(wAttributes)
Make the text foreground color green.
def guest_resize_cpus(self, userid, cpu_cnt): action = "resize guest '%s' to have '%i' virtual cpus" % (userid, cpu_cnt) LOG.info("Begin to %s" % action) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.resize_cpus(userid, cpu_cnt) LOG.info("%s successfully." % action)
Resize virtual cpus of guests. :param userid: (str) the userid of the guest to be resized :param cpu_cnt: (int) The number of virtual cpus that the guest should have defined in user directory after resize. The value should be an integer between 1 and 64.
def get_error_info(self) -> Tuple[Optional[str], Optional[str]]: etag = self.find1("error-app-tag") emsg = self.find1("error-message") return (etag.argument if etag else None, emsg.argument if emsg else None)
Return receiver's error tag and error message if present.
def auto_param_specs(self): for spec in self.study_class.parameter_specs(): if spec.name not in self._name_map: yield spec
Parameter pecs in the sub-study class that are not explicitly provided in the name map
def handle_request(self): timeout = self.socket.gettimeout() if timeout is None: timeout = self.timeout elif self.timeout is not None: timeout = min(timeout, self.timeout) fd_sets = select.select([self], [], [], timeout) if not fd_sets[0]: self.handle_timeout() return self._handle_request_noblock()
Handle one request, possibly blocking. Respects self.timeout.
def create_prototype(sample_dimension, parameter_kind_base='user', parameter_kind_options=[], state_stay_probabilities=[0.6, 0.6, 0.7]): parameter_kind = create_parameter_kind(base=parameter_kind_base, options=parameter_kind_options) transition = create_transition(state_stay_probabilities) state_count = len(state_stay_probabilities) states = [] for i in range(state_count): state = create_gmm(np.zeros(sample_dimension), np.ones(sample_dimension), weights=None, gconsts=None) states.append(state) hmms = [create_hmm(states, transition)] macros = [create_options(vector_size=sample_dimension, parameter_kind=parameter_kind)] model = create_model(macros, hmms) return model
Create a prototype HTK model file using a feature file.
def has_value(key): return salt.utils.data.traverse_dict_and_list( __grains__, key, KeyError) is not KeyError
Determine whether a key exists in the grains dictionary. Given a grains dictionary that contains the following structure:: {'pkg': {'apache': 'httpd'}} One would determine if the apache key in the pkg dict exists by:: pkg:apache CLI Example: .. code-block:: bash salt '*' grains.has_value pkg:apache
def merkleroot(merkletree: 'MerkleTreeState') -> Locksroot: assert merkletree.layers, 'the merkle tree layers are empty' assert merkletree.layers[MERKLEROOT], 'the root layer is empty' return Locksroot(merkletree.layers[MERKLEROOT][0])
Return the root element of the merkle tree.
def set_child_value( self, sensor_id, child_id, value_type, value, **kwargs): if not self.is_sensor(sensor_id, child_id): return if self.sensors[sensor_id].new_state: self.sensors[sensor_id].set_child_value( child_id, value_type, value, children=self.sensors[sensor_id].new_state) else: self.add_job(partial( self.sensors[sensor_id].set_child_value, child_id, value_type, value, **kwargs))
Add a command to set a sensor value, to the queue. A queued command will be sent to the sensor when the gateway thread has sent all previously queued commands. If the sensor attribute new_state returns True, the command will be buffered in a queue on the sensor, and only the internal sensor state will be updated. When a smartsleep message is received, the internal state will be pushed to the sensor, via _handle_smartsleep method.
def peid_features(self, pefile_handle): peid_match = self.peid_sigs.match(pefile_handle) return peid_match if peid_match else []
Get features from PEid signature database
def kick_chat_member(self, user_id): return self.bot.api_call("kickChatMember", chat_id=self.id, user_id=user_id)
Use this method to kick a user from a group or a supergroup. The bot must be an administrator in the group for this to work. :param int user_id: Unique identifier of the target user
def match_country_name_to_its_code(country_name, city=''): if country_name: country_name = country_name.upper().replace('.', '').strip() if country_to_iso_code.get(country_name): return country_to_iso_code.get(country_name) elif country_name == 'KOREA': if city.upper() in south_korean_cities: return 'KR' else: for c_code, spellings in countries_alternative_spellings.items(): for spelling in spellings: if country_name == spelling: return c_code return None
Try to match country name with its code. Name of the city helps when country_name is "Korea".
def list_items(self, url, container=None, last_obj=None, spr=False): headers, container_uri = self._return_base_data( url=url, container=container ) if container: resp = self._header_getter(uri=container_uri, headers=headers) if resp.status_code == 404: LOG.info('Container [ %s ] not found.', container) return [resp] return self._list_getter( uri=container_uri, headers=headers, last_obj=last_obj, spr=spr )
Builds a long list of objects found in a container. NOTE: This could be millions of Objects. :param url: :param container: :param last_obj: :param spr: "single page return" Limit the returned data to one page :type spr: ``bol`` :return None | list:
def set_as_object(self, value): self.clear() map = MapConverter.to_map(value) self.append(map)
Sets a new value to map element :param value: a new element or map value.
def set(self, key, value): return uwsgi.cache_set(key, value, self.timeout, self.name)
Sets the specified key value. :param str|unicode key: :param int|str|unicode value: :rtype: bool
def format_configdictfield_nodes(field_name, field, field_id, state, lineno): value_item = nodes.definition_list_item() value_item += nodes.term(text="Value type") value_item_def = nodes.definition() value_item_def_para = nodes.paragraph() name = '.'.join((field.itemtype.__module__, field.itemtype.__name__)) value_item_def_para += pending_config_xref(rawsource=name) value_item_def += value_item_def_para value_item += value_item_def dl = nodes.definition_list() dl += create_default_item_node(field, state) dl += create_field_type_item_node(field, state) dl += create_keytype_item_node(field, state) dl += value_item desc_node = create_description_node(field, state) title = create_title_node(field_name, field, field_id, state, lineno) return [title, dl, desc_node]
Create a section node that documents a ConfigDictField config field. Parameters ---------- field_name : `str` Name of the configuration field (the attribute name of on the config class). field : ``lsst.pex.config.ConfigDictField`` A configuration field. field_id : `str` Unique identifier for this field. This is used as the id and name of the section node. with a -section suffix state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. lineno (`int`) Usually the directive's ``lineno`` attribute. Returns ------- ``docutils.nodes.section`` Section containing documentation nodes for the ConfigDictField.
def get_filenames(dirname, full_path=False, match_regex=None, extension=None): if not os.path.exists(dirname): raise OSError('directory "{}" does not exist'.format(dirname)) match_regex = re.compile(match_regex) if match_regex else None for filename in sorted(os.listdir(dirname)): if extension and not os.path.splitext(filename)[-1] == extension: continue if match_regex and not match_regex.search(filename): continue if full_path is True: yield os.path.join(dirname, filename) else: yield filename
Get all filenames under ``dirname`` that match ``match_regex`` or have file extension equal to ``extension``, optionally prepending the full path. Args: dirname (str): /path/to/dir on disk where files to read are saved full_path (bool): if False, return filenames without path; if True, return filenames with path, as ``os.path.join(dirname, fname)`` match_regex (str): include files whose names match this regex pattern extension (str): if files only of a certain type are wanted, specify the file extension (e.g. ".txt") Yields: str: next matching filename
def solution_to_array(solution, events, slots): array = np.zeros((len(events), len(slots)), dtype=np.int8) for item in solution: array[item[0], item[1]] = 1 return array
Convert a schedule from solution to array form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise Example ------- For For 3 events, 7 slots and the solution:: [(0, 1), (1, 4), (2, 5)] The resulting array would be:: [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0]]
def is_group(self): if self._broadcast is None and self.chat: self._broadcast = getattr(self.chat, 'broadcast', None) return ( isinstance(self._chat_peer, (types.PeerChat, types.PeerChannel)) and not self._broadcast )
True if the message was sent on a group or megagroup.
def omit(keys, from_, strict=False): ensure_iterable(keys) ensure_mapping(from_) if strict: remaining_keys = set(iterkeys(from_)) remove_subset(remaining_keys, keys) else: remaining_keys = set(iterkeys(from_)) - set(keys) return from_.__class__((k, from_[k]) for k in remaining_keys)
Returns a subset of given dictionary, omitting specified keys. :param keys: Iterable of keys to exclude :param strict: Whether ``keys`` are required to exist in the dictionary :return: Dictionary filtered by omitting ``keys`` :raise KeyError: If ``strict`` is True and one of ``keys`` is not found in the dictionary .. versionadded:: 0.0.2
def _debug_line(linenum: int, line: str, extramsg: str = "") -> None: log.critical("{}Line {}: {!r}", extramsg, linenum, line)
Writes a debugging report on a line.
def adjust_ether (self, ip=None, ether=None): if ip != None and ip.haslayer(IP) and ether != None and ether.haslayer(Ether): iplong = atol(ip.dst) ether.dst = "01:00:5e:%02x:%02x:%02x" % ( (iplong>>16)&0x7F, (iplong>>8)&0xFF, (iplong)&0xFF ) return True else: return False
Called to explicitely fixup an associated Ethernet header The function adjusts the ethernet header destination MAC address based on the destination IP address.
def set_itunes_author_name(self): try: self.itunes_author_name = self.soup.find('itunes:author').string except AttributeError: self.itunes_author_name = None
Parses author name from itunes tags and sets value
def sample_zip(items_list, num_samples, allow_overflow=False, per_bin=1): samples_list = [[] for _ in range(num_samples)] samples_iter = zip_longest(*items_list) sx = 0 for ix, samples_ in zip(range(num_samples), samples_iter): samples = filter_Nones(samples_) samples_list[sx].extend(samples) if (ix + 1) % per_bin == 0: sx += 1 if allow_overflow: overflow_samples = flatten([filter_Nones(samples_) for samples_ in samples_iter]) return samples_list, overflow_samples else: try: samples_iter.next() except StopIteration: pass else: raise AssertionError('Overflow occured') return samples_list
Helper for sampling Given a list of lists, samples one item for each list and bins them into num_samples bins. If all sublists are of equal size this is equivilent to a zip, but otherewise consecutive bins will have monotonically less elemements # Doctest doesn't work with assertionerror #util_list.sample_zip(items_list, 2) #... #AssertionError: Overflow occured Args: items_list (list): num_samples (?): allow_overflow (bool): per_bin (int): Returns: tuple : (samples_list, overflow_samples) Examples: >>> # DISABLE_DOCTEST >>> from utool import util_list >>> items_list = [[1, 2, 3, 4, 0], [5, 6, 7], [], [8, 9], [10]] >>> util_list.sample_zip(items_list, 5) ... [[1, 5, 8, 10], [2, 6, 9], [3, 7], [4], [0]] >>> util_list.sample_zip(items_list, 2, allow_overflow=True) ... ([[1, 5, 8, 10], [2, 6, 9]], [3, 7, 4]) >>> util_list.sample_zip(items_list, 4, allow_overflow=True, per_bin=2) ... ([[1, 5, 8, 10, 2, 6, 9], [3, 7, 4], [], []], [0])
def cleanSystem(cls): resourceRootDirPath = os.environ[cls.rootDirPathEnvName] os.environ.pop(cls.rootDirPathEnvName) shutil.rmtree(resourceRootDirPath) for k, v in list(os.environ.items()): if k.startswith(cls.resourceEnvNamePrefix): os.environ.pop(k)
Removes all downloaded, localized resources
def _init_file_logger(logger, level, log_path, log_size, log_count): if level not in [logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]: level = logging.DEBUG for h in logger.handlers: if isinstance(h, logging.handlers.RotatingFileHandler): if h.level == level: return fh = logging.handlers.RotatingFileHandler( log_path, maxBytes=log_size, backupCount=log_count) fh.setLevel(level) fh.setFormatter(_formatter) logger.addHandler(fh)
one logger only have one level RotatingFileHandler
def from_flag(cls, flag): value = _get_flag_value(flag) if value is None: return None relation_name = value['relation'] conversations = Conversation.load(value['conversations']) return cls.from_name(relation_name, conversations)
Find relation implementation in the current charm, based on the name of an active flag. You should not use this method directly. Use :func:`endpoint_from_flag` instead.
def load_limits(self, config): self._limits['history_size'] = 28800 if not hasattr(config, 'has_section'): return False if config.has_section('global'): self._limits['history_size'] = config.get_float_value('global', 'history_size', default=28800) logger.debug("Load configuration key: {} = {}".format('history_size', self._limits['history_size'])) if config.has_section(self.plugin_name): for level, _ in config.items(self.plugin_name): limit = '_'.join([self.plugin_name, level]) try: self._limits[limit] = config.get_float_value(self.plugin_name, level) except ValueError: self._limits[limit] = config.get_value(self.plugin_name, level).split(",") logger.debug("Load limit: {} = {}".format(limit, self._limits[limit])) return True
Load limits from the configuration file, if it exists.
def export_db(filename=None, remote=False): local_machine() if not filename: filename = settings.DB_DUMP_FILENAME if remote: backup_dir = settings.FAB_SETTING('SERVER_DB_BACKUP_DIR') else: backup_dir = '' local('pg_dump -c -Fc -O -U {0}{1} {2} -f {3}{4}'.format( env.db_role, HOST, env.db_name, backup_dir, filename))
Exports the database. Make sure that you have this in your ``~/.pgpass`` file: localhost:5433:*:<db_role>:<password> Also make sure that the file has ``chmod 0600 .pgpass``. Usage:: fab export_db fab export_db:filename=foobar.dump
def append_sample(self, **kwargs) -> 'Seeding': data = { 'seed': random.randint(0, 1000000) } data.update(kwargs) return self._append_seed(SEED_TYPE_SAMPLE, data)
Add seed induction methods. Kwargs can have ``number_edges`` or ``number_seed_nodes``. :returns: self for fluid API
def create(self, name): if not os.path.exists(self.directory): os.makedirs(self.directory) filename = self.get_new_filename(name) with open(os.path.join(self.directory, filename), 'w') as fp: fp.write("def up(db): pass\n\n\n") fp.write("def down(db): pass\n") logger.info(filename)
Create a new empty migration.
def checksum (data, start = 0, skip_word = None): if len(data) % 2 != 0: arr = array.array('H', data[:-1]) else: arr = array.array('H', data) if skip_word is not None: for i in range(0, len(arr)): if i == skip_word: continue start += arr[i] else: for i in range(0, len(arr)): start += arr[i] if len(data) % 2 != 0: start += struct.unpack('H', data[-1:]+b'\x00')[0] start = (start >> 16) + (start & 0xffff) start += (start >> 16) return ntohs(~start & 0xffff)
Calculate standard internet checksum over data starting at start'th byte skip_word: If specified, it's the word offset of a word in data to "skip" (as if it were zero). The purpose is when data is received data which contains a computed checksum that you are trying to verify -- you want to skip that word since it was zero when the checksum was initially calculated.
def __get_formulas(self): array = self._get_target().getFormulaArray() return tuple(itertools.chain.from_iterable(array))
Gets formulas in this cell range as a tuple. If cells contain actual formulas then the returned values start with an equal sign but all values are returned.
def make_template_name(self, model_type, sourcekey): format_dict = self.__dict__.copy() format_dict['sourcekey'] = sourcekey if model_type == 'IsoSource': return self._name_factory.spectral_template(**format_dict) elif model_type in ['MapCubeSource', 'SpatialMap']: return self._name_factory.diffuse_template(**format_dict) else: raise ValueError("Unexpected model_type %s" % model_type)
Make the name of a template file for particular component Parameters ---------- model_type : str Type of model to use for this component sourcekey : str Key to identify this component Returns filename or None if component does not require a template file
def _make_ntgrid(grid): hnames = [_nospace(n) for n in grid[0][1:]] vnames = [_nospace(row[0]) for row in grid[1:]] vnames_s = " ".join(vnames) hnames_s = " ".join(hnames) ntcol = collections.namedtuple('ntcol', vnames_s) ntrow = collections.namedtuple('ntrow', hnames_s) rdict = [dict(list(zip(hnames, row[1:]))) for row in grid[1:]] ntrows = [ntrow(**rdict[i]) for i, name in enumerate(vnames)] ntcols = ntcol(**dict(list(zip(vnames, ntrows)))) return ntcols
make a named tuple grid [["", "a b", "b c", "c d"], ["x y", 1, 2, 3 ], ["y z", 4, 5, 6 ], ["z z", 7, 8, 9 ],] will return ntcol(x_y=ntrow(a_b=1, b_c=2, c_d=3), y_z=ntrow(a_b=4, b_c=5, c_d=6), z_z=ntrow(a_b=7, b_c=8, c_d=9))