code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def is_zero(self): for o in self.matrix.ravel(): try: if not o.is_zero: return False except AttributeError: if not o == 0: return False return True
Are all elements of the matrix zero?
def load_template(self, path): if not path.startswith('/'): for folder in self.options['template_folders']: fullpath = op.join(folder, path) if op.exists(fullpath): path = fullpath break else: raise JadeException('Template doesnt exist: %s' % path) with open(path, 'rb') as f: source = f.read().decode(self.options['encoding']) return ExtendCompiler( pyjade.parser.Parser(source).parse(), pretty=self.options['pretty'], env=self, compileDebug=True )
Load and compile a template.
def make_scrape_request(session, url, mode='get', data=None): try: html = session.request(mode, url, data=data) except RequestException: raise VooblyError('failed to connect') if SCRAPE_FETCH_ERROR in html.text: raise VooblyError('not logged in') if html.status_code != 200 or SCRAPE_PAGE_NOT_FOUND in html.text: raise VooblyError('page not found') return bs4.BeautifulSoup(html.text, features='lxml')
Make a request to URL.
def getName(self, innerFlag=False): if self._currentIdx is None: raise error.PyAsn1Error('Component not chosen') else: if innerFlag: c = self._componentValues[self._currentIdx] if isinstance(c, Choice): return c.getName(innerFlag) return self.componentType.getNameByPosition(self._currentIdx)
Return the name of currently assigned component of the |ASN.1| object. Returns ------- : :py:class:`str` |ASN.1| component name
def filter_belief(stmts_in, belief_cutoff, **kwargs): dump_pkl = kwargs.get('save') logger.info('Filtering %d statements to above %f belief' % (len(stmts_in), belief_cutoff)) stmts_out = [] for stmt in stmts_in: if stmt.belief < belief_cutoff: continue stmts_out.append(stmt) supp_by = [] supp = [] for st in stmt.supports: if st.belief >= belief_cutoff: supp.append(st) for st in stmt.supported_by: if st.belief >= belief_cutoff: supp_by.append(st) stmt.supports = supp stmt.supported_by = supp_by logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
Filter to statements with belief above a given cutoff. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. belief_cutoff : float Only statements with belief above the belief_cutoff will be returned. Here 0 < belief_cutoff < 1. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
def get(search="unsigned"): plugins = [] for i in os.walk('/usr/lib/nagios/plugins'): for f in i[2]: plugins.append(f) return plugins
List all available plugins
def _draw_binary(self): self._draw_pr_curve(self.recall_, self.precision_, label="binary PR curve") self._draw_ap_score(self.score_)
Draw the precision-recall curves in the binary case
def range(cls, collection, attribute, left, right, closed, index_id, skip=None, limit=None): kwargs = { 'index': index_id, 'attribute': attribute, 'left': left, 'right': right, 'closed': closed, 'skip': skip, 'limit': limit, } return cls._construct_query(name='range', collection=collection, multiple=True, **kwargs)
This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present. :param collection Collection instance :param attribute The attribute path to check :param left The lower bound :param right The upper bound :param closed If true, use interval including left and right, otherwise exclude right, but include left :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list
def calc_dist(lamost_point, training_points, coeffs): diff2 = (training_points - lamost_point)**2 dist = np.sqrt(np.sum(diff2*coeffs, axis=1)) return np.mean(dist[dist.argsort()][0:10])
avg dist from one lamost point to nearest 10 training points
def p_debug(self, message): "Format and print debug messages" print("{}{} `{}`".format(self._debug_indent * " ", message, repr(self.p_suffix(10))))
Format and print debug messages
def attention_lm_moe_24b_diet(): hparams = attention_lm_moe_large_diet() hparams.moe_hidden_sizes = "12288" hparams.moe_num_experts = 1024 hparams.batch_size = 4096 return hparams
Unnecessarily large model with 24B params - because we can.
def convex_conj(self): r if self.quadratic_coeff == 0: cconj = self.functional.convex_conj.translated(self.linear_term) if self.constant != 0: cconj = cconj - self.constant return cconj else: return super(FunctionalQuadraticPerturb, self).convex_conj
r"""Convex conjugate functional of the functional. Notes ----- Given a functional :math:`f`, the convex conjugate of a linearly perturbed version :math:`f(x) + <y, x>` is given by a translation of the convex conjugate of :math:`f`: .. math:: (f + \langle y, \cdot \rangle)^* (x^*) = f^*(x^* - y). For reference on the identity used, see `[KP2015]`_. Moreover, the convex conjugate of :math:`f + c` is by definition .. math:: (f + c)^* (x^*) = f^*(x^*) - c. References ---------- [KP2015] Komodakis, N, and Pesquet, J-C. *Playing with Duality: An overview of recent primal-dual approaches for solving large-scale optimization problems*. IEEE Signal Processing Magazine, 32.6 (2015), pp 31--54. .. _[KP2015]: https://arxiv.org/abs/1406.5429
def remove_custom_css(destdir, resource=PKGNAME ): if not os.path.isdir( destdir ): return False custom = os.path.join( destdir, 'custom.css' ) copy = True found = False prefix = css_frame_prefix(resource) with io.open(custom + '-new', 'wt') as fout: with io.open(custom) as fin: for line in fin: if line.startswith( prefix + 'START' ): copy = False found = True elif line.startswith( prefix + 'END' ): copy = True elif copy: fout.write( line ) if found: os.rename( custom+'-new',custom) else: os.unlink( custom+'-new') return found
Remove the kernel CSS from custom.css
def restart(self, all=False): if all: data = {'type': self.type} else: data = {'ps': self.process} r = self._h._http_resource( method='POST', resource=('apps', self.app.name, 'ps', 'restart'), data=data ) r.raise_for_status()
Restarts the given process.
def make_tarball(src_dir): if type(src_dir) != str: raise TypeError('src_dir must be str') output_file = src_dir + ".tar.gz" log.msg("Wrapping tarball '{out}' ...".format(out=output_file)) if not _dry_run: with tarfile.open(output_file, "w:gz") as tar: tar.add(src_dir, arcname=os.path.basename(src_dir)) return output_file
Make gzipped tarball from a source directory :param src_dir: source directory :raises TypeError: if src_dir is not str
def get_sentence_break_property(value, is_bytes=False): obj = unidata.ascii_sentence_break if is_bytes else unidata.unicode_sentence_break if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['sentencebreak'].get(negated, negated) else: value = unidata.unicode_alias['sentencebreak'].get(value, value) return obj[value]
Get `SENTENCE BREAK` property.
def show_function_centric_wizard(self): from safe.gui.tools.wizard.wizard_dialog import WizardDialog if self.wizard and self.wizard.isVisible(): return if not self.wizard: self.wizard = WizardDialog( self.iface.mainWindow(), self.iface, self.dock_widget) self.wizard.set_function_centric_mode() self.wizard.show()
Show the function centric wizard.
def get_system(self, identity): return system.HPESystem(self._conn, identity, redfish_version=self.redfish_version)
Given the identity return a HPESystem object :param identity: The identity of the System resource :returns: The System object
def get_android_id(self) -> str: output, _ = self._execute( '-s', self.device_sn, 'shell', 'settings', 'get', 'secure', 'android_id') return output.strip()
Show Android ID.
def parse(self, text, layers=None): params = { "text": text, "key": self.key, } if layers is not None: if isinstance(layers, six.string_types): params["layers"] = layers elif isinstance(layers, collections.Iterable): params["layers"] = ",".join(layers) req = requests.get(self.NLU_URL, params=params) return req.json()
Parsing passed text to json. Args: text: Text to parse. layers (optional): Special fields. Only one string or iterable object (e.g "Data", ("Data", "Fio")). Only these fields will be returned. Returns: The parsed text into a json object.
def gen_file_lines(path, mode='rUb', strip_eol=True, ascii=True, eol='\n'): if isinstance(path, str): path = open(path, mode) with path: for line in path: if ascii: line = str(line) if strip_eol: line = line.rstrip('\n') yield line
Generate a sequence of "documents" from the lines in a file Arguments: path (file or str): path to a file or an open file_obj ready to be read mode (str): file mode to open a file in strip_eol (bool): whether to strip the EOL char from lines as they are read/generated/yielded ascii (bool): whether to use the stringify and to_ascii functions on each line eol (str): UNUSED character delimitting lines in the file TODO: Use `eol` to split lines (currently ignored because use `file.readline` doesn't have EOL arg)
def raw_query(self, query, query_parameters=None): self.assert_no_raw_query() if len(self._where_tokens) != 0 or len(self._select_tokens) != 0 or len( self._order_by_tokens) != 0 or len(self._group_by_tokens) != 0: raise InvalidOperationException( "You can only use raw_query on a new query, without applying any operations " "(such as where, select, order_by, group_by, etc)") if query_parameters: self.query_parameters = query_parameters self._query = query return self
To get all the document that equal to the query @param str query: The rql query @param dict query_parameters: Add query parameters to the query {key : value}
def _message_received(self, message): with self.lock: self._state.receive_message(message) for callable in chain(self._on_message_received, self._on_message): callable(message)
Notify the observers about the received message.
def update(self, egg, permute=False, nperms=1000, parallel=False): self.n+=1 next_weights = np.nanmean(_analyze_chunk(egg, analysis=fingerprint_helper, analysis_type='fingerprint', pass_features=True, permute=permute, n_perms=nperms, parallel=parallel).values, 0) if self.state is not None: c = self.state*self.n self.state = np.nansum(np.array([c, next_weights]), axis=0)/(self.n+1) else: self.state = next_weights self.history.append(next_weights)
In-place method that updates fingerprint with new data Parameters ---------- egg : quail.Egg Data to update fingerprint Returns ---------- None
def origin(self): with self.selenium.context(self.selenium.CONTEXT_CHROME): return self.root.get_attribute("origin")
Provide access to the notification origin. Returns: str: The notification origin.
def plot_loss(self, n_skip=10, n_skip_end=5): if not in_ipynb(): plt.switch_backend('agg') plt.plot(self.iterations[n_skip:-n_skip_end], self.losses[n_skip:-n_skip_end]) if not in_ipynb(): plt.savefig(os.path.join(self.save_path, 'loss_plot.png')) np.save(os.path.join(self.save_path, 'losses.npy'), self.losses[10:])
plots loss function as function of iterations. When used in Jupyternotebook, plot will be displayed in notebook. Else, plot will be displayed in console and both plot and loss are saved in save_path.
def get_key(): character_name = chr codes = _get_keycodes() if len(codes) == 1: code = codes[0] if code >= 32: return character_name(code) return control_key_name(code) return get_extended_key_name(codes)
Get a key from the keyboard as a string A 'key' will be a single char, or the name of an extended key
def get_quoted_local_columns(self, platform): columns = [] for column in self._local_column_names.values(): columns.append(column.get_quoted_name(platform)) return columns
Returns the quoted representation of the referencing table column names the foreign key constraint is associated with. But only if they were defined with one or the referencing table column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list
def render_html(self, *args, **kwargs): static_url = '%s://%s%s' % (self.request.scheme, self.request.get_host(), settings.STATIC_URL) media_url = '%s://%s%s' % (self.request.scheme, self.request.get_host(), settings.MEDIA_URL) with override_settings(STATIC_URL=static_url, MEDIA_URL=media_url): template = loader.get_template(self.template_name) context = self.get_context_data(*args, **kwargs) html = template.render(context) return html
Renders the template. :rtype: str
def serialize(self, data): with open(self.dest, 'wb') as fh: root = et.Element('nrml') self.add_hazard_curves(root, self.metadata, data) nrml.write(list(root), fh)
Write a sequence of hazard curves to the specified file. :param data: Iterable of hazard curve data. Each datum must be an object with the following attributes: * poes: A list of probability of exceedence values (floats). * location: An object representing the location of the curve; must have `x` and `y` to represent lon and lat, respectively.
def render(self, template_name: str, **ctx): if '.' not in template_name: template_file_extension = (self.Meta.template_file_extension or app.config.TEMPLATE_FILE_EXTENSION) template_name = f'{template_name}{template_file_extension}' if self.Meta.template_folder_name and os.sep not in template_name: template_name = os.path.join(self.Meta.template_folder_name, template_name) return render_template(template_name, **ctx)
Convenience method for rendering a template. :param template_name: The template's name. Can either be a full path, or a filename in the controller's template folder. :param ctx: Context variables to pass into the template.
def get_admins_from_django(homedir): return ["root@localhost"] path = homedir + "/settings/basic.py" if not os.path.exists(path): path = homedir + "/settings.py" if not os.path.exists(path): return mod = compiler.parseFile(path) for node in mod.node.nodes: try: if node.asList()[0].name == "ADMINS": return [it.nodes[1].value for it in node.asList()[1].asList()] except: pass
Get admin's emails from django settings
def pos(self): pos = self._pos if pos is None: return self.p0 pos = {param: self._pos[..., k] for (k, param) in enumerate(self.sampling_params)} return pos
A dictionary of the current walker positions. If the sampler hasn't been run yet, returns p0.
def get_writable_metadata(self, file_format): if file_format == 'json': metadata = self.json elif file_format == 'xml': metadata = self.xml else: raise TypeError('The requested file type (%s) is not yet supported' % file_format) return metadata
Convert the metadata to a writable form. :param file_format: the needed format can be json or xml :type file_format: str :return: the dupled metadata :rtype: str
def cli(self): if self._cli is None: self._cli = self.create_interface() return self._cli
Makes the interface or refreshes it
def get_rand_string(length=12, allowed_chars='0123456789abcdef'): if not using_sysrandom: random.seed( hashlib.sha256( ("%s%s" % ( random.getstate(), time.time())).encode('utf-8') ).digest()) return ''.join([random.choice(allowed_chars) for i in range(length)])
Returns a securely generated random string. Taken from the Django project The default length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
def _request(self, url, params={}): r = self._session.get(url=url, params=params, headers=DEFAULT_ORIGIN) return r
Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url
def focus_next(self, count=1): " Focus the next pane. " panes = self.panes if panes: self.active_pane = panes[(panes.index(self.active_pane) + count) % len(panes)] else: self.active_pane = None
Focus the next pane.
def get(self, *args, **kwargs): self.before_get(args, kwargs) relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data() obj, data = self._data_layer.get_relationship(model_relationship_field, related_type_, related_id_field, kwargs) result = {'links': {'self': request.path, 'related': self.schema._declared_fields[relationship_field].get_related_url(obj)}, 'data': data} qs = QSManager(request.args, self.schema) if qs.include: schema = compute_schema(self.schema, dict(), qs, qs.include) serialized_obj = schema.dump(obj) result['included'] = serialized_obj.data.get('included', dict()) final_result = self.after_get(result) return final_result
Get a relationship details
def SCISetStyling(self, line: int, col: int, numChar: int, style: bytearray): if not self.isPositionValid(line, col): return pos = self.positionFromLineIndex(line, col) self.SendScintilla(self.SCI_STARTSTYLING, pos, 0xFF) self.SendScintilla(self.SCI_SETSTYLING, numChar, style)
Pythonic wrapper for the SCI_SETSTYLING command. For example, the following code applies style #3 to the first five characters in the second line of the widget: SCISetStyling((0, 1), 5, 3) |Args| * ``line`` (**int**): line number where to start styling. * ``col`` (**int**): column number where to start styling. * ``numChar`` (**int**): number of characters to style. * ``style`` (**int**): Scintilla style number. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
def browserify_libs(lib_dirs, output_file, babelify=False): from .modules import browserify if not isinstance(lib_dirs, (list, tuple)): raise RuntimeError('Browserify Libs compiler takes a list of library directories as input.') return { 'dependencies_fn': browserify.browserify_deps_libs, 'compiler_fn': browserify.browserify_compile_libs, 'input': lib_dirs, 'output': output_file, 'kwargs': { 'babelify': babelify, }, }
Browserify one or more library directories into a single javascript file. Generates source maps in debug mode. Minifies the output in release mode. The final directory name in each of lib_dirs is the library name for importing. Eg.:: lib_dirs = ['cordova_libs/jskit'] var MyClass = require('jskit/MyClass');
def stringc(text, color): if has_colors: text = str(text) return "\033["+codeCodes[color]+"m"+text+"\033[0m" else: return text
Return a string with terminal colors.
def _get_jmx_data(self, instance, jmx_address, tags): response = self._rest_request_to_json( instance, jmx_address, self.JMX_PATH, {'qry': self.HDFS_DATANODE_BEAN_NAME}, tags=tags ) beans = response.get('beans', []) return beans
Get namenode beans data from JMX endpoint
def is_circumpolar(self, var): xcoord = self.get_x(var) return xcoord is not None and xcoord.ndim == 2
Test if a variable is on a circumpolar grid Parameters ---------- %(CFDecoder.is_triangular.parameters)s Returns ------- %(CFDecoder.is_triangular.returns)s
def on_pumprequest(self, event): self.log("Updating pump status: ", event.controlvalue) self._set_digital_pin(self._pump_channel, event.controlvalue)
Activates or deactivates a connected pump. :param event:
def all_tamil( word_in ): if isinstance(word_in,list): word = word_in else: word = get_letters( word_in ) return all( [(letter in tamil_letters) for letter in word] )
predicate checks if all letters of the input word are Tamil letters
def _itertuples(df): cols = [df.iloc[:, k] for k in range(len(df.columns))] return zip(df.index, *cols)
Custom implementation of ``DataFrame.itertuples`` that returns plain tuples instead of namedtuples. About 50% faster.
def _get_protocol_tuple(data: Dict[str, Any]) -> Tuple[str, List, Dict]: return data['function'], data.get('args', []), data.get('kwargs', {})
Convert a dictionary to a tuple.
def html_encode(path): if sys.version_info > (3, 2, 0): return urllib.parse.quote(utils.ensure_string(path)) else: return urllib.quote(utils.ensure_string(path))
Return an HTML encoded Path. :param path: ``str`` :return: ``str``
def timer(fun, *a, **k): @wraps(fun) def timer(*a, **k): start = arrow.now() ret = fun(*a, **k) end = arrow.now() print('timer:fun: %s\n start:%s,end:%s, took [%s]' % ( str(fun), str(start), str(end), str(end - start))) return ret return timer
define a timer for a rule function for log and statistic purposes
def to_datetimeindex(self, unsafe=False): nptimes = cftime_to_nptime(self) calendar = infer_calendar_name(self) if calendar not in _STANDARD_CALENDARS and not unsafe: warnings.warn( 'Converting a CFTimeIndex with dates from a non-standard ' 'calendar, {!r}, to a pandas.DatetimeIndex, which uses dates ' 'from the standard calendar. This may lead to subtle errors ' 'in operations that depend on the length of time between ' 'dates.'.format(calendar), RuntimeWarning, stacklevel=2) return pd.DatetimeIndex(nptimes)
If possible, convert this index to a pandas.DatetimeIndex. Parameters ---------- unsafe : bool Flag to turn off warning when converting from a CFTimeIndex with a non-standard calendar to a DatetimeIndex (default ``False``). Returns ------- pandas.DatetimeIndex Raises ------ ValueError If the CFTimeIndex contains dates that are not possible in the standard calendar or outside the pandas.Timestamp-valid range. Warns ----- RuntimeWarning If converting from a non-standard calendar to a DatetimeIndex. Warnings -------- Note that for non-standard calendars, this will change the calendar type of the index. In that case the result of this method should be used with caution. Examples -------- >>> import xarray as xr >>> times = xr.cftime_range('2000', periods=2, calendar='gregorian') >>> times CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object') >>> times.to_datetimeindex() DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)
def pull(args): p = OptionParser(pull.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) prefix = get_prefix() version, partID, unitigID = args s = ".".join(args) cmd = "tigStore" cmd += " -g ../{0}.gkpStore -t ../{0}.tigStore".format(prefix) cmd += " {0} -up {1} -d layout -u {2}".format(version, partID, unitigID) unitigfile = "unitig" + s sh(cmd, outfile=unitigfile) return unitigfile
%prog pull version partID unitigID For example, `%prog pull 5 530` will pull the utg530 from partition 5 The layout is written to `unitig530`
def div(self, y): r y = np.asanyarray(y) if y.shape[0] != self.Ne: raise ValueError('First dimension must be the number of edges ' 'G.Ne = {}, got {}.'.format(self.Ne, y.shape)) return self.D.dot(y)
r"""Compute the divergence of a signal defined on the edges. The divergence :math:`z` of a signal :math:`y` is defined as .. math:: z = \operatorname{div}_\mathcal{G} y = D y, where :math:`D` is the differential operator :attr:`D`. The value of the divergence on the vertex :math:`v_i` is .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2}} y[k] for the combinatorial Laplacian, and .. math:: z[i] = \sum_k D[i, k] y[k] = \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}} \sqrt{\frac{W[j, i]}{2 d[i]}} y[k] - \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}} \sqrt{\frac{W[i, j]}{2 d[i]}} y[k] for the normalized Laplacian. For undirected graphs, only half the edges are kept and the :math:`1/\sqrt{2}` factor disappears from the above equations. See :meth:`compute_differential_operator` for details. Parameters ---------- y : array_like Signal of length :attr:`n_edges` living on the edges. Returns ------- z : ndarray Divergence signal of length :attr:`n_vertices` living on the vertices. See Also -------- compute_differential_operator grad : compute the gradient of a vertex signal Examples -------- Non-directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2., 4., -2., 0.]) Directed graph and combinatorial Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='combinatorial') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-1.41421356, 2.82842712, -1.41421356, 0. ]) Non-directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=False, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ]) Directed graph and normalized Laplacian: >>> graph = graphs.Path(4, directed=True, lap_type='normalized') >>> graph.compute_differential_operator() >>> graph.div([2, -2, 0]) array([-2. , 2.82842712, -1.41421356, 0. ])
def load_scheduler_plugins(self): if not self.scheduler_plugins: for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.schedulers']['plugins']: cls = entry_point.load() self.scheduler_plugins[cls.__name__] = cls if cls.__name__ == self.active_scheduler: self.log.debug('Scheduler loaded: {} in module {}'.format(cls.__name__, cls.__module__)) else: self.log.debug('Scheduler disabled: {} in module {}'.format(cls.__name__, cls.__module__))
Refresh the list of available schedulers Returns: `list` of :obj:`BaseScheduler`
def coherence_spectrogram(self, other, stride, fftlength=None, overlap=None, window='hann', nproc=1): from ..spectrogram.coherence import from_timeseries return from_timeseries(self, other, stride, fftlength=fftlength, overlap=overlap, window=window, nproc=nproc)
Calculate the coherence spectrogram between this `TimeSeries` and other. Parameters ---------- other : `TimeSeries` the second `TimeSeries` in this CSD calculation stride : `float` number of seconds in single PSD (column of spectrogram) fftlength : `float` number of seconds in single FFT overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats nproc : `int` number of parallel processes to use when calculating individual coherence spectra. Returns ------- spectrogram : `~gwpy.spectrogram.Spectrogram` time-frequency coherence spectrogram as generated from the input time-series.
def _state_connecting(self): def responseReceived(protocol): self.makeConnection(protocol) if self._state == 'aborting': self._toState('disconnecting') else: self._toState('connected') def trapError(failure): self._toState('error', failure) def onEntry(entry): if self.delegate: try: self.delegate(entry) except: log.err() else: pass d = self.api(onEntry, self.args) d.addCallback(responseReceived) d.addErrback(trapError)
A connection is being started. A succesful attempt results in the state C{'connected'} when the first response from Twitter has been received. Transitioning to the state C{'aborting'} will cause an immediate disconnect instead, by transitioning to C{'disconnecting'}. Errors will cause a transition to the C{'error'} state.
def explain_prediction_sklearn(estimator, doc, vec=None, top=None, top_targets=None, target_names=None, targets=None, feature_names=None, feature_re=None, feature_filter=None, vectorized=False): return explain_prediction_sklearn_not_supported(estimator, doc)
Return an explanation of a scikit-learn estimator
def _conn(commit=False): defaults = {'host': 'localhost', 'user': 'salt', 'password': 'salt', 'dbname': 'salt', 'port': 5432} conn_kwargs = {} for key, value in defaults.items(): conn_kwargs[key] = __opts__.get('queue.{0}.{1}'.format(__virtualname__, key), value) try: conn = psycopg2.connect(**conn_kwargs) except psycopg2.OperationalError as exc: raise SaltMasterError('pgjsonb returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") finally: conn.close()
Return an postgres cursor
def cli(env, identifier): vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS') instance = vsi.get_instance(vs_id) table = formatting.Table(['username', 'password']) for item in instance['operatingSystem']['passwords']: table.add_row([item['username'], item['password']]) env.fout(table)
List virtual server credentials.
def get_model_class(self): if getattr(self, 'model', None): return self.model elif getattr(self, 'object', None): return self.object.__class__ elif 'app' in self.kwargs and 'model' in self.kwargs: return apps.get_model(self.kwargs.get('app'), self.kwargs.get('model')) elif hasattr(self, 'get_queryset'): return self.get_queryset().model else: return None
Get model class
def _validate_json_for_regular_workflow(json_spec, args): validated = {} override_project_id, override_folder, override_workflow_name = \ dxpy.executable_builder.get_parsed_destination(args.destination) validated['project'] = _get_destination_project(json_spec, args, override_project_id) validated['folder'] = _get_destination_folder(json_spec, override_folder) workflow_name = _get_workflow_name(json_spec, override_workflow_name) if not workflow_name: print('Warning: workflow name is not specified') else: validated['name'] = workflow_name return validated
Validates fields used only for building a regular, project-based workflow.
def extend_dict_key_value( in_dict, keys, value, delimiter=DEFAULT_TARGET_DELIM, ordered_dict=False): dict_pointer, last_key = _dict_rpartition( in_dict, keys, delimiter=delimiter, ordered_dict=ordered_dict) if last_key not in dict_pointer or dict_pointer[last_key] is None: dict_pointer[last_key] = [] try: dict_pointer[last_key].extend(value) except AttributeError: raise SaltInvocationError('The last key contains a {}, which cannot extend.' ''.format(type(dict_pointer[last_key]))) except TypeError: raise SaltInvocationError('Cannot extend {} with a {}.' ''.format(type(dict_pointer[last_key]), type(value))) return in_dict
Ensures that in_dict contains the series of recursive keys defined in keys. Also extends the list, that is at the end of `in_dict` traversed with `keys`, with `value`. :param dict in_dict: The dictionary to work with :param str keys: The delimited string with one or more keys. :param any value: The value to extend the nested dict-key with. :param str delimiter: The delimiter to use in `keys`. Defaults to ':'. :param bool ordered_dict: Create OrderedDicts if keys are missing. Default: create regular dicts. :return dict: Though it updates in_dict in-place.
def two_way_portal(self, other, **stats): return self.character.new_portal( self, other, symmetrical=True, **stats )
Connect these nodes with a two-way portal and return it.
def readCovarianceMatrixFile(cfile,readCov=True,readEig=True): covFile = cfile+'.cov' evalFile = cfile+'.cov.eval' evecFile = cfile+'.cov.evec' RV = {} if readCov: assert os.path.exists(covFile), '%s is missing.'%covFile RV['K'] = SP.loadtxt(covFile) if readEig: assert os.path.exists(evalFile), '%s is missing.'%evalFile assert os.path.exists(evecFile), '%s is missing.'%evecFile RV['eval'] = SP.loadtxt(evalFile) RV['evec'] = SP.loadtxt(evecFile) return RV
reading in similarity matrix cfile File containing the covariance matrix. The corresponding ID file must be specified in cfile.id)
def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument( '-w', '--window', default="pyqt5", choices=find_window_classes(), help='Name for the window type to use', ) parser.add_argument( '-fs', '--fullscreen', action="store_true", help='Open the window in fullscreen mode', ) parser.add_argument( '-vs', '--vsync', type=str2bool, default="1", help="Enable or disable vsync", ) parser.add_argument( '-s', '--samples', type=int, default=4, help="Specify the desired number of samples to use for multisampling", ) parser.add_argument( '-c', '--cursor', type=str2bool, default="true", help="Enable or disable displaying the mouse cursor", ) return parser.parse_args(args or sys.argv[1:])
Parse arguments from sys.argv
def copy(self): df = super(Ensemble,self).copy() return type(self).from_dataframe(df=df)
make a deep copy of self Returns ------- Ensemble : Ensemble
def _expected_condition_value_in_element_attribute(self, element_attribute_value): element, attribute, value = element_attribute_value web_element = self._expected_condition_find_element(element) try: return web_element if web_element and web_element.get_attribute(attribute) == value else False except StaleElementReferenceException: return False
Tries to find the element and checks that it contains the requested attribute with the expected value, but does not thrown an exception if the element is not found :param element_attribute_value: Tuple with 3 items where: [0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found [1] attribute: element's attribute where to check its value [2] value: expected value for the element's attribute :returns: the web element if it contains the expected value for the requested attribute or False :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
def horiz_rows(labels, data, normal_dat, args, colors): val_min = find_min(data) for i in range(len(labels)): if args['no_labels']: label = '' else: label = "{:<{x}}: ".format(labels[i], x=find_max_label_length(labels)) values = data[i] num_blocks = normal_dat[i] for j in range(len(values)): if j > 0: len_label = len(label) label = ' ' * len_label tail = ' {}{}'.format(args['format'].format(values[j]), args['suffix']) if colors: color = colors[j] else: color = None if not args['vertical']: print(label, end="") yield(values[j], int(num_blocks[j]), val_min, color) if not args['vertical']: print(tail)
Prepare the horizontal graph. Each row is printed through the print_row function.
def formfield_for_foreignkey_helper(inline, *args, **kwargs): db_field = args[0] if db_field.name != "related_type": return args, kwargs initial_filter = getattr(settings, RELATED_TYPE_INITIAL_FILTER, False) if "initial" not in kwargs and initial_filter: initial = RelatedType.objects.get(**initial_filter).pk kwargs["initial"] = initial return args, kwargs
The implementation for ``RelatedContentInline.formfield_for_foreignkey`` This takes the takes all of the ``args`` and ``kwargs`` from the call to ``formfield_for_foreignkey`` and operates on this. It returns the updated ``args`` and ``kwargs`` to be passed on to ``super``. This is solely an implementation detail as it's easier to test a function than to provide all of the expectations that the ``GenericTabularInline`` has.
def update_entitlement(owner, repo, identifier, name, token, show_tokens): client = get_entitlements_api() data = {} if name is not None: data["name"] = name if token is not None: data["token"] = token with catch_raise_api_exception(): data, _, headers = client.entitlements_partial_update_with_http_info( owner=owner, repo=repo, identifier=identifier, data=data, show_tokens=show_tokens, ) ratelimits.maybe_rate_limit(client, headers) return data.to_dict()
Update an entitlement in a repository.
def has_edge(self, edge): u, v = edge return (u, v) in self.edge_properties
Return whether an edge exists. @type edge: tuple @param edge: Edge. @rtype: boolean @return: Truth-value for edge existence.
def decode(decoder, data, length, frame_size, decode_fec, channels=2): pcm_size = frame_size * channels * ctypes.sizeof(ctypes.c_int16) pcm = (ctypes.c_int16 * pcm_size)() pcm_pointer = ctypes.cast(pcm, c_int16_pointer) decode_fec = int(bool(decode_fec)) result = _decode(decoder, data, length, pcm_pointer, frame_size, decode_fec) if result < 0: raise OpusError(result) return array.array('h', pcm).tostring()
Decode an Opus frame Unlike the `opus_decode` function , this function takes an additional parameter `channels`, which indicates the number of channels in the frame
def get_stats_display_width(self, curse_msg, without_option=False): try: if without_option: c = len(max(''.join([(u(u(nativestr(i['msg'])).encode('ascii', 'replace')) if not i['optional'] else "") for i in curse_msg['msgdict']]).split('\n'), key=len)) else: c = len(max(''.join([u(u(nativestr(i['msg'])).encode('ascii', 'replace')) for i in curse_msg['msgdict']]).split('\n'), key=len)) except Exception as e: logger.debug('ERROR: Can not compute plugin width ({})'.format(e)) return 0 else: return c
Return the width of the formatted curses message.
def generate_additional_context(self, matching_datasets): dataset_ids = [upload.id for upload in matching_datasets] tags = Tag.objects.filter( dataset__in=dataset_ids ).distinct().annotate( Count('word') ).order_by('-word__count')[:5] hubs = matching_datasets.values("hub_slug").annotate( Count('hub_slug') ).order_by('-hub_slug__count') if hubs: most_used_hub = get_hub_name_from_slug(hubs[0]['hub_slug']) hub_slug = hubs[0]['hub_slug'] else: most_used_hub = None hub_slug = None return { 'tags': tags, 'hub': most_used_hub, 'hub_slug': hub_slug, }
Return additional information about matching datasets. Includes upload counts, related hubs, related tags.
def dirname(self, path): if self.is_ssh(path): remotepath = self._get_remote(path) remotedir = os.path.dirname(remotepath) return self._get_tramp_path(remotedir) else: return os.path.dirname(path)
Returns the full path to the parent directory of the specified file path.
def save_region(region, filename): region.save(filename) logging.info("Wrote {0}".format(filename)) return
Save the given region to a file Parameters ---------- region : :class:`AegeanTools.regions.Region` A region. filename : str Output file name.
def _writeOptionalXsecCards(self, fileObject, xSec, replaceParamFile): if xSec.erode: fileObject.write('ERODE\n') if xSec.maxErosion != None: fileObject.write('MAX_EROSION %.6f\n' % xSec.maxErosion) if xSec.subsurface: fileObject.write('SUBSURFACE\n') if xSec.mRiver != None: mRiver = vwp(xSec.mRiver, replaceParamFile) try: fileObject.write('M_RIVER %.6f\n' % mRiver) except: fileObject.write('M_RIVER %s\n' % mRiver) if xSec.kRiver != None: kRiver = vwp(xSec.kRiver, replaceParamFile) try: fileObject.write('K_RIVER %.6f\n' % kRiver) except: fileObject.write('K_RIVER %s\n' % kRiver)
Write Optional Cross Section Cards to File Method
def serialize_cert_to_der(cert_obj): return cert_obj.public_bytes( cryptography.hazmat.primitives.serialization.Encoding.DER )
Serialize certificate to DER. Args: cert_obj: cryptography.Certificate Returns: bytes: DER encoded certificate
def update(self, new_keys: Index): if not self._map.index.intersection(new_keys).empty: raise KeyError("Non-unique keys in index.") mapping_update = self.hash_(new_keys) if self._map.empty: self._map = mapping_update.drop_duplicates() else: self._map = self._map.append(mapping_update).drop_duplicates() collisions = mapping_update.index.difference(self._map.index) salt = 1 while not collisions.empty: mapping_update = self.hash_(collisions, salt) self._map = self._map.append(mapping_update).drop_duplicates() collisions = mapping_update.index.difference(self._map.index) salt += 1
Adds the new keys to the mapping. Parameters ---------- new_keys : The new index to hash.
def generate(self, overwrite=False): super(Upstart, self).generate(overwrite=overwrite) svc_file_template = self.template_prefix + '.conf' self.svc_file_path = self.generate_into_prefix + '.conf' self.generate_file_from_template(svc_file_template, self.svc_file_path) return self.files
Generate a config file for an upstart service.
def _example_short_number(region_code): metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = metadata.short_code if desc.example_number is not None: return desc.example_number return U_EMPTY_STRING
Gets a valid short number for the specified region. Arguments: region_code -- the region for which an example short number is needed. Returns a valid short number for the specified region. Returns an empty string when the metadata does not contain such information.
def get_final_path(path): r desired_access = api.NULL share_mode = ( api.FILE_SHARE_READ | api.FILE_SHARE_WRITE | api.FILE_SHARE_DELETE ) security_attributes = api.LPSECURITY_ATTRIBUTES() hFile = api.CreateFile( path, desired_access, share_mode, security_attributes, api.OPEN_EXISTING, api.FILE_FLAG_BACKUP_SEMANTICS, api.NULL, ) if hFile == api.INVALID_HANDLE_VALUE: raise WindowsError() buf_size = api.GetFinalPathNameByHandle( hFile, LPWSTR(), 0, api.VOLUME_NAME_DOS) handle_nonzero_success(buf_size) buf = create_unicode_buffer(buf_size) result_length = api.GetFinalPathNameByHandle( hFile, buf, len(buf), api.VOLUME_NAME_DOS) assert result_length < len(buf) handle_nonzero_success(result_length) handle_nonzero_success(api.CloseHandle(hFile)) return buf[:result_length]
r""" For a given path, determine the ultimate location of that path. Useful for resolving symlink targets. This functions wraps the GetFinalPathNameByHandle from the Windows SDK. Note, this function fails if a handle cannot be obtained (such as for C:\Pagefile.sys on a stock windows system). Consider using trace_symlink_target instead.
def dump(self): return json.dumps( self.primitive, sort_keys=True, ensure_ascii=False, separators=(',', ':'))
Item as a JSON representation.
def delete(self, template_name): template = db.Template.find_one(template_name=template_name) if not template: return self.make_response('No such template found', HTTP.NOT_FOUND) db.session.delete(template) db.session.commit() auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name}) return self.make_response({ 'message': 'Template has been deleted', 'templateName': template_name })
Delete a template
def pi (self, data): data = data.encode(self.encoding, "ignore") self.fd.write("<?%s?>" % data)
Print HTML pi. @param data: the tag data @type data: string @return: None
def _get_animation_frames(self, all_datasets, shape, fill_value=None, ignore_missing=False): for idx, ds in enumerate(all_datasets): if ds is None and ignore_missing: continue elif ds is None: log.debug("Missing frame: %d", idx) data = da.zeros(shape, dtype=np.uint8, chunks=shape) data = xr.DataArray(data) else: img = get_enhanced_image(ds) data, mode = img.finalize(fill_value=fill_value) if data.ndim == 3: data = data.transpose('y', 'x', 'bands') yield data.data
Create enhanced image frames to save to a file.
def _emiss_ep(self, Eph): if self.weight_ep == 0.0: return np.zeros_like(Eph) gam = np.vstack(self._gam) eps = (Eph / mec2).decompose().value emiss = c.cgs * trapz_loglog( np.vstack(self._nelec) * self._sigma_ep(gam, eps), self._gam, axis=0, ).to(u.cm ** 2 / Eph.unit) return emiss
Electron-proton bremsstrahlung emissivity per unit photon energy
def get_from_ident(self, ident): model_repr, job_pk = ident.split(':', 1) klass = import_class(model_repr) return klass.get(job_pk)
Take a string as returned by get_ident and return a job, based on the class representation and the job's pk from the ident
def _idx_table_by_name(tables, crumbs): _tables = OrderedDict() try: for _idx, _table in enumerate(tables): _name = "{}{}".format(crumbs, _idx) _tmp = _idx_col_by_name(_table) if _name in _tables: _name = "{}_{}".format(_name, _idx) _tmp["tableName"] = _name _tables[_name] = _tmp except Exception as e: logger_jsons.error("idx_table_by_name: {}".format(e)) print("Error: idx_table_by_name: {}".format(e)) return _tables
Import summary, ensemble, or distribution data. :param list tables: Metadata :return dict _tables: Metadata
def avail_images(call=None): all_servers = list_nodes_full() templates = {} for server in all_servers: if server["IsTemplate"]: templates.update({"Template Name": server["Name"]}) return templates
returns a list of images available to you
def urls(self): for base_url, mapping in self.routes.items(): for url, _ in mapping.items(): yield base_url + url
Returns a generator of all URLs attached to this API
def _valid_headerline(l): if not l: return False headers = l.split('\t') first_col = headers[0] tsplit = first_col.split(':') if len(tsplit) != 2: return False if tsplit[0] in ('entity', 'update'): return tsplit[1] in ('participant_id', 'participant_set_id', 'sample_id', 'sample_set_id', 'pair_id', 'pair_set_id') elif tsplit[0] == 'membership': if len(headers) < 2: return False return tsplit[1].replace('set_', '') == headers[1] else: return False
return true if the given string is a valid loadfile header
def remove_interface_router(self, router, body=None): return self.put((self.router_path % router) + "/remove_router_interface", body=body)
Removes an internal network interface from the specified router.
def scan_on_disk(node, env, path=()): try: flist = node.fs.listdir(node.get_abspath()) except (IOError, OSError): return [] e = node.Entry for f in filter(do_not_scan, flist): e('./' + f) return scan_in_memory(node, env, path)
Scans a directory for on-disk files and directories therein. Looking up the entries will add these to the in-memory Node tree representation of the file system, so all we have to do is just that and then call the in-memory scanning function.
def add_sidebar(self, component: Component) -> None: if not self.sidebar: raise NoSidebarError('Set `sidebar=True` if you want to use the sidebar.') if not isinstance(component, Component): raise ValueError('component must be Component type, found {}'.format(component)) self._controllers.append(component)
Add a widget to the sidebar. Parameters ---------- component : bowtie._Component Add this component to the sidebar, it will be appended to the end.
def network_get(auth=None, **kwargs): cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_network(**kwargs)
Get a single network filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.network_get name=XLB4
def notes(self): return tuple(self._get_note_data(note) for note in self.root.iter('note'))
Tuple of notes..
def store_job(self): try: class_name = self.__class__.__name__.lower() except AttributeError: log.warning( 'Unable to determine class name', exc_info_on_loglevel=logging.DEBUG ) return True try: return self.opts['{0}_returns'.format(class_name)] except KeyError: return True
Helper that allows us to turn off storing jobs for different classes that may incorporate this mixin.
def size(self, key, resource_type): with self._objects_queue_lock: return len(self._objects_queue[key].get(resource_type, []))
Return the size of the queue for a given key and resource type. If the key is not in the cache, this will raise a KeyError.
def get_consumer_info(self, instance, cursor): cursor.execute(self.CONSUMER_INFO_STATEMENT) for queue_name, consumer_name, lag, pending_events, last_seen in cursor: yield queue_name, consumer_name, { 'lag': lag, 'pending_events': pending_events, 'last_seen': last_seen, }
Collects metrics for all consumers on the connected database.