code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def empty_over_span(self, time, duration): for seg in self.segments: if seg.comp_location_in_seconds >= time and\ seg.comp_location_in_seconds < time + duration: return False elif seg.comp_location_in_seconds + seg.duration_in_seconds >= time and\ seg.comp_location_in_seconds + seg.duration_in_seconds < time + duration: return False elif seg.comp_location_in_seconds < time and\ seg.comp_location_in_seconds + seg.duration_in_seconds >= time + duration: return False return True
Helper method that tests whether composition contains any segments at a given time for a given duration. :param time: Time (in seconds) to start span :param duration: Duration (in seconds) of span :returns: `True` if there are no segments in the composition that overlap the span starting at `time` and lasting for `duration` seconds. `False` otherwise.
def write(self, auth, resource, value, options={}, defer=False): return self._call('write', auth, [resource, value, options], defer)
Writes a single value to the resource specified. Args: auth: cik for authentication. resource: resource to write to. value: value to write options: options.
def now(tzinfo=True): if dj_now: return dj_now() if tzinfo: return datetime.utcnow().replace(tzinfo=utc) return datetime.now()
Return an aware or naive datetime.datetime, depending on settings.USE_TZ.
def detect_encoding(value): if six.PY2: null_pattern = tuple(bool(ord(char)) for char in value[:4]) else: null_pattern = tuple(bool(char) for char in value[:4]) encodings = { (0, 0, 0, 1): 'utf-32-be', (0, 1, 0, 1): 'utf-16-be', (1, 0, 0, 0): 'utf-32-le', (1, 0, 1, 0): 'utf-16-le', } return encodings.get(null_pattern, 'utf-8')
Returns the character encoding for a JSON string.
def _create_training_directories(): logger.info('Creating a new training folder under %s .' % base_dir) os.makedirs(model_dir) os.makedirs(input_config_dir) os.makedirs(output_data_dir) _write_json({}, hyperparameters_file_dir) _write_json({}, input_data_config_file_dir) host_name = socket.gethostname() resources_dict = { "current_host": host_name, "hosts": [host_name] } _write_json(resources_dict, resource_config_file_dir)
Creates the directory structure and files necessary for training under the base path
def get_institution(self, **kwargs): qualifier = kwargs.get('qualifier', '') content = kwargs.get('content', '') if qualifier == 'grantor': return content return None
Get the dissertation institution.
def list_proxy(root_package = 'vlcp'): proxy_dict = OrderedDict() pkg = __import__(root_package, fromlist=['_']) for imp, module, _ in walk_packages(pkg.__path__, root_package + '.'): m = __import__(module, fromlist = ['_']) for _, v in vars(m).items(): if v is not None and isinstance(v, type) and issubclass(v, _ProxyModule) \ and v is not _ProxyModule \ and v.__module__ == module \ and hasattr(v, '_default'): name = v.__name__.lower() if name not in proxy_dict: proxy_dict[name] = {'defaultmodule': v._default.__name__.lower(), 'class': repr(v._default.__module__ + '.' + v._default.__name__)} return proxy_dict
Walk through all the sub modules, find subclasses of vlcp.server.module._ProxyModule, list their default values
def copy_figure_to_clipboard(figure='gcf'): try: import pyqtgraph as _p if figure is 'gcf': figure = _s.pylab.gcf() path = _os.path.join(_s.settings.path_home, "clipboard.png") figure.savefig(path) _p.QtGui.QApplication.instance().clipboard().setImage(_p.QtGui.QImage(path)) except: print("This function currently requires pyqtgraph to be installed.")
Copies the specified figure to the system clipboard. Specifying 'gcf' will use the current figure.
def add(self, recipients): if recipients: if isinstance(recipients, str): self._recipients.append( Recipient(address=recipients, parent=self._parent, field=self._field)) elif isinstance(recipients, Recipient): self._recipients.append(recipients) elif isinstance(recipients, tuple): name, address = recipients if address: self._recipients.append( Recipient(address=address, name=name, parent=self._parent, field=self._field)) elif isinstance(recipients, list): for recipient in recipients: self.add(recipient) else: raise ValueError('Recipients must be an address string, a ' 'Recipient instance, a (name, address) ' 'tuple or a list') self._track_changes()
Add the supplied recipients to the exiting list :param recipients: list of either address strings or tuples (name, address) or dictionary elements :type recipients: list[str] or list[tuple] or list[dict]
def create_highlight(self, artist): highlight = copy.copy(artist) highlight.set(color=self.highlight_color, mec=self.highlight_color, lw=self.highlight_width, mew=self.highlight_width) artist.axes.add_artist(highlight) return highlight
Create a new highlight for the given artist.
def showEvent(self, event): super(CodeEdit, self).showEvent(event) self.panels.refresh()
Overrides showEvent to update the viewport margins
def browsers(self, browser=None, browser_version=None, device=None, os=None, os_version=None): response = self.execute('GET', '/screenshots/browsers.json') for key, value in list(locals().items()): if key in ('self', 'response') or not value: continue response = [item for item in response if match_item(key, value, item)] return response
Returns list of available browsers & OS.
def get_bounce(bounce_id, api_key=None, secure=None, test=None, **request_args): return _default_bounce.get(bounce_id, api_key=api_key, secure=secure, test=test, **request_args)
Get a single bounce. :param bounce_id: The bounce's id. Get the id with :func:`get_bounces`. :param api_key: Your Postmark API key. Required, if `test` is not `True`. :param secure: Use the https scheme for the Postmark API. Defaults to `True` :param test: Use the Postmark Test API. Defaults to `False`. :param \*\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`BounceResponse`
def selectisnot(table, field, value, complement=False): return selectop(table, field, value, operator.is_not, complement=complement)
Select rows where the given field `is not` the given value.
def match(self, row): for condition in self._conditions: if condition.match(row): return True return False
Returns True if the row matches one or more child conditions. Returns False otherwise. :param dict row: The row. :rtype: bool
def as_set(self, include_weak=False): rv = set(self._strong) if include_weak: rv.update(self._weak) return rv
Convert the `ETags` object into a python set. Per default all the weak etags are not part of this set.
def copy(self): model = self.__class__(self._database) model._limits_lower = dict(self._limits_lower) model._limits_upper = dict(self._limits_upper) model._reaction_set = set(self._reaction_set) model._compound_set = set(self._compound_set) return model
Return copy of model
def spit(path, txt, encoding='UTF-8', append=False): mode = 'a' if append else 'w' with io.open(path, mode, encoding=encoding) as f: f.write(txt) return txt
Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string
def get_name(self, tag): name = super(functionTagProcessor, self).get_name(tag) if self.include_function_signatures: func_args = tag.findChild('arglist') if func_args and len(func_args.contents): name += func_args.contents[0] ret_type = tag.findChild('type') if ret_type and len(ret_type.contents): name += ' -> ' + ret_type.contents[0] return name
Override. Extract a representative "name" from a function tag. get_name's output can be controlled through keyword arguments that are provided when initializing a functionTagProcessor. For instance, function arguments and return types can be included by passing include_function_signatures=True to __init__(). Args: tag: A BeautifulSoup Tag for a function. Returns: A string that would be appropriate to use as an entry name for a function in a Zeal database.
def create_element(self, ns, name): elem = self._node.makeelement('{%s}%s' % (SLDNode._nsmap[ns], name), nsmap=SLDNode._nsmap) self._node.append(elem) return getattr(self, name)
Create an element as a child of this SLDNode. @type ns: string @param ns: The namespace of the new element. @type name: string @param name: The name of the new element. @rtype: L{SLDNode} @return: The wrapped node, in the parent's property class. This will always be a descendent of SLDNode.
def write_yaml(filename, content): y = _yaml.dump(content, indent=4, default_flow_style=False) if y: return write_file(filename, y)
Writes YAML files :param filename: The full path to the YAML file :param content: The content to dump :returns: The size written
def subscribe(self, topic=b''): self.sockets[zmq.SUB].setsockopt(zmq.SUBSCRIBE, topic) poller = self.pollers[zmq.SUB] return poller
subscribe to the SUB socket, to listen for incomming variables, return a stream that can be listened to.
def get_context_data(self, **kwargs): context = super(ScheduleXmlView, self).get_context_data(**kwargs) if self.request.GET.get('render_description', None) == '1': context['render_description'] = True else: context['render_description'] = False return context
Allow adding a 'render_description' parameter
def get_article(self, url=None, article_id=None, max_pages=25): query_params = {} if url is not None: query_params['url'] = url if article_id is not None: query_params['article_id'] = article_id query_params['max_pages'] = max_pages url = self._generate_url('parser', query_params=query_params) return self.get(url)
Send a GET request to the `parser` endpoint of the parser API to get back the representation of an article. The article can be identified by either a URL or an id that exists in Readability. Note that either the `url` or `article_id` param should be passed. :param url (optional): The url of an article whose content is wanted. :param article_id (optional): The id of an article in the Readability system whose content is wanted. :param max_pages: The maximum number of pages to parse and combine. The default is 25.
def _init_training(self): if self.check: self.backprop = CheckedBackprop(self.network, self.problem.cost) else: self.backprop = BatchBackprop(self.network, self.problem.cost) self.momentum = Momentum() self.decent = GradientDecent() self.decay = WeightDecay() self.tying = WeightTying(*self.problem.weight_tying) self.weights = self.tying(self.weights)
Classes needed during training.
def setup_request_sessions(self): self.req_session = requests.Session() self.req_session.headers.update(self.headers)
Sets up a requests.Session object for sharing headers across API requests.
def peek(self, length): return self.string[self.pos:self.pos + length]
Get a number of characters without advancing the scan pointer. >>> s = Scanner("test string") >>> s.peek(7) 'test st' >>> s.peek(7) 'test st'
def read_csv(self, dtype=False, parse_dates=True, *args, **kwargs): import pandas t = self.resolved_url.get_resource().get_target() kwargs = self._update_pandas_kwargs(dtype, parse_dates, kwargs) return pandas.read_csv(t.fspath, *args, **kwargs)
Fetch the target and pass through to pandas.read_csv Don't provide the first argument of read_csv(); it is supplied internally.
def AddHeadwayPeriodObject(self, headway_period, problem_reporter): warnings.warn("No longer supported. The HeadwayPeriod class was renamed to " "Frequency, and all related functions were renamed " "accordingly.", DeprecationWarning) self.AddFrequencyObject(frequency, problem_reporter)
Deprecated. Please use AddFrequencyObject instead.
def make_fake_data(g, fac=1.0): if hasattr(g, 'keys'): if not isinstance(g, BufferDict): g = BufferDict(g) return BufferDict(g, buf=make_fake_data(g.buf, fac)) else: g_shape = numpy.shape(g) g_flat = numpy.array(g).flat zero = numpy.zeros(len(g_flat), float) dg = (2. ** -0.5) * gvar(zero, evalcov(g_flat)) dg *= fac noise = gvar(zero, sdev(dg)) g_flat = mean(g_flat) + dg + noise + next(raniter(dg + noise)) return g_flat[0] if g_shape == () else g_flat.reshape(g_shape)
Make fake data based on ``g``. This function replaces the |GVar|\s in ``g`` by new |GVar|\s with similar means and a similar covariance matrix, but multiplied by ``fac**2`` (so standard deviations are ``fac`` times smaller). The changes are random. The function was designed to create fake data for testing fitting routines, where ``g`` is set equal to ``fitfcn(x, prior)`` and ``fac<1`` (e.g., set ``fac=0.1`` to get fit parameters whose standard deviations are 10x smaller than those of the corresponding priors). Args: g (dict, array or gvar.GVar): The |GVar| or array of |GVar|\s, or dictionary whose values are |GVar|\s or arrays of |GVar|\s that from which the fake data is generated. fac (float): Uncertainties are rescaled by ``fac`` in the fake data. Returns: A collection of |GVar|\s with the same layout as ``g`` but with somewhat different means, and standard deviations rescaled by ``fac``.
def _must_be_deleted(local_path, r_st): if not os.path.lexists(local_path): return True l_st = os.lstat(local_path) if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode): return True return False
Return True if the remote correspondent of local_path has to be deleted. i.e. if it doesn't exists locally or if it has a different type from the remote one.
def parse_broken_json(json_text: str) -> dict: json_text = json_text.replace(":", ": ") json_dict = yaml.load(json_text) return json_dict
Parses broken JSON that the standard Python JSON module cannot parse. Ex: {success:true} Keys do not contain quotes and the JSON cannot be parsed using the regular json encoder. YAML happens to be a superset of JSON and can parse json without quotes.
def methods(self) -> 'PrettyDir': return PrettyDir( self.obj, [ pattr for pattr in self.pattrs if category_match(pattr.category, AttrCategory.FUNCTION) ], )
Returns all methods of the inspected object. Note that "methods" can mean "functions" when inspecting a module.
def unique(lst): seen = set() def make_seen(x): seen.add(x) return x return [make_seen(x) for x in lst if x not in seen]
Return unique elements :class:`pandas.unique` and :class:`numpy.unique` cast mixed type lists to the same type. They are faster, but some times we want to maintain the type. Parameters ---------- lst : list-like List of items Returns ------- out : list Unique items in the order that they appear in the input. Examples -------- >>> import pandas as pd >>> import numpy as np >>> lst = ['one', 'two', 123, 'three'] >>> pd.unique(lst) array(['one', 'two', '123', 'three'], dtype=object) >>> np.unique(lst) array(['123', 'one', 'three', 'two'], dtype='<U5') >>> unique(lst) ['one', 'two', 123, 'three'] pandas and numpy cast 123 to a string!, and numpy does not even maintain the order.
def draw_identity_line(ax=None, dynamic=True, **kwargs): ax = ax or plt.gca() if 'c' not in kwargs and 'color' not in kwargs: kwargs['color'] = LINE_COLOR if 'alpha' not in kwargs: kwargs['alpha'] = 0.5 identity, = ax.plot([],[], **kwargs) def callback(ax): xlim = ax.get_xlim() ylim = ax.get_ylim() data = ( max(xlim[0], ylim[0]), min(xlim[1], ylim[1]) ) identity.set_data(data, data) callback(ax) if dynamic: ax.callbacks.connect('xlim_changed', callback) ax.callbacks.connect('ylim_changed', callback) return ax
Draws a 45 degree identity line such that y=x for all points within the given axes x and y limits. This function also registeres a callback so that as the figure is modified, the axes are updated and the line remains drawn correctly. Parameters ---------- ax : matplotlib Axes, default: None The axes to plot the figure on. If None is passed in the current axes will be used (or generated if required). dynamic : bool, default : True If the plot is dynamic, callbacks will be registered to update the identiy line as axes are changed. kwargs : dict Keyword arguments to pass to the matplotlib plot function to style the identity line. Returns ------- ax : matplotlib Axes The axes with the line drawn on it. Notes ----- .. seealso:: `StackOverflow discussion: Does matplotlib have a function for drawing diagonal lines in axis coordinates? <https://stackoverflow.com/questions/22104256/does-matplotlib-have-a-function-for-drawing-diagonal-lines-in-axis-coordinates>`_
def _proc_dihedral(self): main_axis, rot = max(self.rot_sym, key=lambda v: v[1]) self.sch_symbol = "D{}".format(rot) mirror_type = self._find_mirror(main_axis) if mirror_type == "h": self.sch_symbol += "h" elif not mirror_type == "": self.sch_symbol += "d"
Handles dihedral group molecules, i.e those with intersecting R2 axes and a main axis.
def peak_generation_per_technology_and_weather_cell(self): peak_generation = defaultdict(float) for gen in self.generators: if hasattr(gen, 'weather_cell_id'): if (gen.type, gen.weather_cell_id) in peak_generation.keys(): peak_generation[gen.type, gen.weather_cell_id] += gen.nominal_capacity else: peak_generation[gen.type, gen.weather_cell_id] = gen.nominal_capacity else: message = 'No weather cell ID found for ' \ 'generator {}.'.format(repr(gen)) raise KeyError(message) series_index = pd.MultiIndex.from_tuples(list(peak_generation.keys()), names=['type', 'weather_cell_id']) return pd.Series(peak_generation, index=series_index)
Peak generation of each technology and the corresponding weather cell in the grid Returns ------- :pandas:`pandas.Series<series>` Peak generation index by technology
def parse_substitution_from_list(list_rep): if type(list_rep) is not list: raise SyntaxError('Substitution must be a list') if len(list_rep) < 2: raise SyntaxError('Substitution must be a list of size 2') pattern = list_rep[0] replacement = list_rep[1] is_multiline = False if (len(list_rep) > 2): is_multiline = list_rep[2] if type(is_multiline) is not bool: raise SyntaxError('is_multiline must be a boolean') result = substitute.Substitution(pattern, replacement, is_multiline) return result
Parse a substitution from the list representation in the config file.
def symmetry_reduce(tensors, structure, tol=1e-8, **kwargs): sga = SpacegroupAnalyzer(structure, **kwargs) symmops = sga.get_symmetry_operations(cartesian=True) unique_mapping = TensorMapping([tensors[0]], [[]], tol=tol) for tensor in tensors[1:]: is_unique = True for unique_tensor, symmop in itertools.product(unique_mapping, symmops): if np.allclose(unique_tensor.transform(symmop), tensor, atol=tol): unique_mapping[unique_tensor].append(symmop) is_unique = False break if is_unique: unique_mapping[tensor] = [] return unique_mapping
Function that converts a list of tensors corresponding to a structure and returns a dictionary consisting of unique tensor keys with symmop values corresponding to transformations that will result in derivative tensors from the original list Args: tensors (list of tensors): list of Tensor objects to test for symmetrically-equivalent duplicates structure (Structure): structure from which to get symmetry tol (float): tolerance for tensor equivalence kwargs: keyword arguments for the SpacegroupAnalyzer returns: dictionary consisting of unique tensors with symmetry operations corresponding to those which will reconstruct the remaining tensors as values
def create_comment(self, text): return DashboardComment.get_or_create(self._issue_or_pr, self._header, text)
Mimic issue API, so we can use it everywhere. Return dashboard comment.
def extract_cpio (archive, compression, cmd, verbosity, interactive, outdir): cmdlist = [util.shell_quote(cmd), '--extract', '--make-directories', '--preserve-modification-time'] if sys.platform.startswith('linux') and not cmd.endswith('bsdcpio'): cmdlist.extend(['--no-absolute-filenames', '--force-local', '--nonmatching', r'"*\.\.*"']) if verbosity > 1: cmdlist.append('-v') cmdlist.extend(['<', util.shell_quote(os.path.abspath(archive))]) return (cmdlist, {'cwd': outdir, 'shell': True})
Extract a CPIO archive.
def enumerate_spans(sentence: List[T], offset: int = 0, max_span_width: int = None, min_span_width: int = 1, filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]: max_span_width = max_span_width or len(sentence) filter_function = filter_function or (lambda x: True) spans: List[Tuple[int, int]] = [] for start_index in range(len(sentence)): last_end_index = min(start_index + max_span_width, len(sentence)) first_end_index = min(start_index + min_span_width - 1, len(sentence)) for end_index in range(first_end_index, last_end_index): start = offset + start_index end = offset + end_index if filter_function(sentence[slice(start_index, end_index + 1)]): spans.append((start, end)) return spans
Given a sentence, return all token spans within the sentence. Spans are `inclusive`. Additionally, you can provide a maximum and minimum span width, which will be used to exclude spans outside of this range. Finally, you can provide a function mapping ``List[T] -> bool``, which will be applied to every span to decide whether that span should be included. This allows filtering by length, regex matches, pos tags or any Spacy ``Token`` attributes, for example. Parameters ---------- sentence : ``List[T]``, required. The sentence to generate spans for. The type is generic, as this function can be used with strings, or Spacy ``Tokens`` or other sequences. offset : ``int``, optional (default = 0) A numeric offset to add to all span start and end indices. This is helpful if the sentence is part of a larger structure, such as a document, which the indices need to respect. max_span_width : ``int``, optional (default = None) The maximum length of spans which should be included. Defaults to len(sentence). min_span_width : ``int``, optional (default = 1) The minimum length of spans which should be included. Defaults to 1. filter_function : ``Callable[[List[T]], bool]``, optional (default = None) A function mapping sequences of the passed type T to a boolean value. If ``True``, the span is included in the returned spans from the sentence, otherwise it is excluded..
def __decode_timehex(self, timehex): year, month, day, hour, minute, second = unpack("6B", timehex) year += 2000 d = datetime(year, month, day, hour, minute, second) return d
timehex string of six bytes
def paths(self): paths = traversal.closed_paths(self.entities, self.vertices) return paths
Sequence of closed paths, encoded by entity index. Returns --------- paths: (n,) sequence of (*,) int referencing self.entities
def fork(executable, args=(), env={}, path=None, timeout=3600): d = defer.Deferred() p = ProcessProtocol(d, timeout) reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path) return d
fork Provides a deferred wrapper function with a timeout function :param executable: Executable :type executable: str. :param args: Tupple of arguments :type args: tupple. :param env: Environment dictionary :type env: dict. :param timeout: Kill the child process if timeout is exceeded :type timeout: int.
def add_to_linestring(position_data, kml_linestring): global kml position_data[2] += float(args.aoff) kml_linestring.coords.addcoordinates([position_data])
add a point to the kml file
def reset(self): self._variables_shim = {} self._executable = None self._bitstrings = None self.status = 'connected'
Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run.
def human_repr(self): return urlunsplit( SplitResult( self.scheme, self._make_netloc( self.user, self.password, self.host, self._val.port, encode=False ), self.path, self.query_string, self.fragment, ) )
Return decoded human readable string for URL representation.
def CheckHeader(context, header, include_quotes = '<>', language = None): prog_prefix, hdr_to_check = \ createIncludesFromHeaders(header, 1, include_quotes) res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix, language = language, include_quotes = include_quotes) context.did_show_result = 1 return not res
A test for a C or C++ header file.
def on_btn_add_fit(self, event): if self.auto_save.GetValue(): self.current_fit = self.add_fit(self.s, None, None, None, saved=True) else: self.current_fit = self.add_fit(self.s, None, None, None, saved=False) self.generate_warning_text() self.update_warning_box() if self.ie_open: self.ie.update_editor() self.update_fit_boxes(True) self.get_new_PCA_parameters(event)
add a new interpretation to the current specimen Parameters ---------- event : the wx.ButtonEvent that triggered this function Alters ------ pmag_results_data
def mark_module_skipped(self, module_name): try: del self.modules[module_name] except KeyError: pass self.skip_modules[module_name] = True
Skip reloading the named module in the future
def DeleteInstanceTags(r, instance, tags, dry_run=False): query = { "tag": tags, "dry-run": dry_run, } return r.request("delete", "/2/instances/%s/tags" % instance, query=query)
Deletes tags from an instance. @type instance: str @param instance: instance to delete tags from @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run
def price_oscillator(data, short_period, long_period): catch_errors.check_for_period_error(data, short_period) catch_errors.check_for_period_error(data, long_period) ema_short = ema(data, short_period) ema_long = ema(data, long_period) po = ((ema_short - ema_long) / ema_long) * 100 return po
Price Oscillator. Formula: (short EMA - long EMA / long EMA) * 100
def get_remote_port(self, tlv_data): ret, parsed_val = self._check_common_tlv_format( tlv_data, "\n", "Port Description TLV") if not ret: return None return parsed_val[1].strip()
Returns Remote Port from the TLV.
def symlink_exists(self, symlink): if not isinstance(symlink, basestring): raise TypeError("symlink can only be an instance of type basestring") exists = self._call("symlinkExists", in_p=[symlink]) return exists
Checks whether a symbolic link exists in the guest. in symlink of type str Path to the alleged symbolic link. Guest path style. return exists of type bool Returns @c true if the symbolic link exists. Returns @c false if it does not exist, if the file system object identified by the path is not a symbolic link, or if the object type is inaccessible to the user, or if the @a symlink argument is empty. raises :class:`OleErrorNotimpl` The method is not implemented yet.
def end_subsegment(self, end_time=None): subsegment = self.get_trace_entity() if self._is_subsegment(subsegment): subsegment.close(end_time) self._local.entities.pop() return True else: log.warning("No subsegment to end.") return False
End the current active segment. Return False if there is no subsegment to end. :param int end_time: epoch in seconds. If not specified the current system time will be used.
def respond(self): response = self.req.server.wsgi_app(self.env, self.start_response) try: for chunk in filter(None, response): if not isinstance(chunk, six.binary_type): raise ValueError('WSGI Applications must yield bytes') self.write(chunk) finally: self.req.ensure_headers_sent() if hasattr(response, 'close'): response.close()
Process the current request. From :pep:`333`: The start_response callable must not actually transmit the response headers. Instead, it must store them for the server or gateway to transmit only after the first iteration of the application return value that yields a NON-EMPTY string, or upon the application's first invocation of the write() callable.
def find_matching_bracket_position(self, start_pos=None, end_pos=None): for A, B in '()', '[]', '{}', '<>': if self.current_char == A: return self.find_enclosing_bracket_right(A, B, end_pos=end_pos) or 0 elif self.current_char == B: return self.find_enclosing_bracket_left(A, B, start_pos=start_pos) or 0 return 0
Return relative cursor position of matching [, (, { or < bracket. When `start_pos` or `end_pos` are given. Don't look past the positions.
def _lengstr(obj): n = leng(obj) if n is None: r = '' elif n > _len(obj): r = ' leng %d!' % n else: r = ' leng %d' % n return r
Object length as a string.
def modify_parameters(self, modifier_function): baseline_parameters = self.baseline.parameters baseline_parameters_copy = copy.deepcopy(baseline_parameters) reform_parameters = modifier_function(baseline_parameters_copy) if not isinstance(reform_parameters, ParameterNode): return ValueError( 'modifier_function {} in module {} must return a ParameterNode' .format(modifier_function.__name__, modifier_function.__module__,) ) self.parameters = reform_parameters self._parameters_at_instant_cache = {}
Make modifications on the parameters of the legislation Call this function in `apply()` if the reform asks for legislation parameter modifications. :param modifier_function: A function that takes an object of type :any:`ParameterNode` and should return an object of the same type.
def update_task_redundancy(config, task_id, redundancy): if task_id is None: msg = ("Are you sure you want to update all the tasks redundancy?") if click.confirm(msg): res = _update_tasks_redundancy(config, task_id, redundancy) click.echo(res) else: click.echo("Aborting.") else: res = _update_tasks_redundancy(config, task_id, redundancy) click.echo(res)
Update task redudancy for a project.
def update_orders(self, market_id, instructions, customer_ref=None): return self.make_api_request( 'Sports', 'updateOrders', utils.get_kwargs(locals()), model=models.UpdateExecutionReport, )
Update non-exposure changing fields. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `UpdateInstruction` objects :param str customer_ref: Optional order identifier string
def invalidate(self, key): if key not in self.data: return del self.data[key] for cname in self.components: if key in self.depends[cname]: for downstream_key in self.provides[cname]: self.invalidate(downstream_key)
Remove the given data item along with all items that depend on it in the graph.
def get_job_asset_url(self, job_id, filename): return 'https://saucelabs.com/rest/v1/{}/jobs/{}/assets/{}'.format( self.client.sauce_username, job_id, filename)
Get details about the static assets collected for a specific job.
def _wrap_stream_errors(callable_): _patch_callable_name(callable_) @general_helpers.wraps(callable_) def error_remapped_callable(*args, **kwargs): try: result = callable_(*args, **kwargs) return _StreamingResponseIterator(result) except grpc.RpcError as exc: six.raise_from(exceptions.from_grpc_error(exc), exc) return error_remapped_callable
Wrap errors for Unary-Stream and Stream-Stream gRPC callables. The callables that return iterators require a bit more logic to re-map errors when iterating. This wraps both the initial invocation and the iterator of the return value to re-map errors.
def from_json(cls, json_obj): if isinstance(json_obj, str): json_obj = json.loads(json_obj) time = None value = None if cls.TIME_FIELD_NAME in json_obj: time = json_obj[cls.TIME_FIELD_NAME] else: raise InvalidMetricError("{field} must be present!".format( field=cls.TIME_FIELD_NAME)) if cls.VALUE_FIELD_NAME in json_obj: value = json_obj[cls.VALUE_FIELD_NAME] return cls(time, value)
Build a MetricResponse from JSON. :param json_obj: JSON data representing a Cube Metric. :type json_obj: `String` or `json` :throws: `InvalidMetricError` when any of {type,time,data} fields are not present in json_obj.
def make_image(location, size, fmt): if not os.path.isabs(location): return '' if not os.path.isdir(os.path.dirname(location)): return '' if not __salt__['cmd.retcode']( 'qemu-img create -f {0} {1} {2}M'.format( fmt, location, size), python_shell=False): return location return ''
Create a blank virtual machine image file of the specified size in megabytes. The image can be created in any format supported by qemu CLI Example: .. code-block:: bash salt '*' qemu_img.make_image /tmp/image.qcow 2048 qcow2 salt '*' qemu_img.make_image /tmp/image.raw 10240 raw
def blur(dset,fwhm,prefix=None): if prefix==None: prefix = nl.suffix(dset,'_blur%.1fmm'%fwhm) return available_method('blur')(dset,fwhm,prefix)
blurs ``dset`` with given ``fwhm`` runs 3dmerge to blur dataset to given ``fwhm`` default ``prefix`` is to suffix ``dset`` with ``_blur%.1fmm``
def artifact2destination(self, artifact, descriptor): _art = base64.b64decode(artifact) assert _art[:2] == ARTIFACT_TYPECODE try: endpoint_index = str(int(_art[2:4])) except ValueError: endpoint_index = str(int(hexlify(_art[2:4]))) entity = self.sourceid[_art[4:24]] destination = None for desc in entity["%s_descriptor" % descriptor]: for srv in desc["artifact_resolution_service"]: if srv["index"] == endpoint_index: destination = srv["location"] break return destination
Translate an artifact into a receiver location :param artifact: The Base64 encoded SAML artifact :return:
def extract_geometry(dataset): alg = vtk.vtkGeometryFilter() alg.SetInputDataObject(dataset) alg.Update() return _get_output(alg)
Extract the outer surface of a volume or structured grid dataset as PolyData. This will extract all 0D, 1D, and 2D cells producing the boundary faces of the dataset.
def lpop(self, key, *, encoding=_NOTSET): return self.execute(b'LPOP', key, encoding=encoding)
Removes and returns the first element of the list stored at key.
def copy_current_websocket_context(func: Callable) -> Callable: if not has_websocket_context(): raise RuntimeError('Attempt to copy websocket context outside of a websocket context') websocket_context = _websocket_ctx_stack.top.copy() @wraps(func) async def wrapper(*args: Any, **kwargs: Any) -> Any: async with websocket_context: return await func(*args, **kwargs) return wrapper
Share the current websocket context with the function decorated. The websocket context is local per task and hence will not be available in any other task. This decorator can be used to make the context available, .. code-block:: python @copy_current_websocket_context async def within_context() -> None: method = websocket.method ...
def _memory_usage(func, gallery_conf): if gallery_conf['show_memory']: from memory_profiler import memory_usage assert callable(func) mem, out = memory_usage(func, max_usage=True, retval=True, multiprocess=True) mem = mem[0] else: out = func() mem = 0 return out, mem
Get memory usage of a function call.
def end_compress(codec, stream): OPENJP2.opj_end_compress.argtypes = [CODEC_TYPE, STREAM_TYPE_P] OPENJP2.opj_end_compress.restype = check_error OPENJP2.opj_end_compress(codec, stream)
End of compressing the current image. Wraps the openjp2 library function opj_end_compress. Parameters ---------- codec : CODEC_TYPE Compressor handle. stream : STREAM_TYPE_P Output stream buffer. Raises ------ RuntimeError If the OpenJPEG library routine opj_end_compress fails.
def read_moc_fits(moc, filename, include_meta=False, **kwargs): hl = fits.open(filename, mode='readonly', **kwargs) read_moc_fits_hdu(moc, hl[1], include_meta)
Read data from a FITS file into a MOC. Any additional keyword arguments are passed to the astropy.io.fits.open method.
def _configobj_factory(self, infile, raise_errors=True, list_values=True, file_error=True, interpolation=False, configspec=None, stringify=True, _inspec=False ): return configobj.ConfigObj(infile=infile, raise_errors=raise_errors, list_values=list_values, file_error=file_error, interpolation=interpolation, configspec=configspec, stringify=stringify, _inspec=_inspec )
Factory Method. Create Configobj instance and register it to self.config. This method also is used to create configspec instance. Because configspec instance also is ConfigObj instance.
def knt2mlt(t): t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") out = [] e = None for k in range(t.shape[0]): if t[k] != e: e = t[k] count = 0 else: count += 1 out.append(count) return np.array( out )
Count multiplicities of elements in a sorted list or rank-1 array. Minimal emulation of MATLAB's ``knt2mlt``. Parameters: t: Python list or rank-1 array. Must be sorted! Returns: out rank-1 array such that out[k] = #{ t[i] == t[k] for i < k } Example: If ``t = [1, 1, 2, 3, 3, 3]``, then ``out = [0, 1, 0, 0, 1, 2]``. Caveat: Requires input to be already sorted (this is not checked).
def list_themes(dark=True): dark = "dark" if dark else "light" themes = os.scandir(os.path.join(MODULE_DIR, "colorschemes", dark)) return [t for t in themes if os.path.isfile(t.path)]
List all installed theme files.
def _match_real(filename, include, exclude, follow, symlinks): sep = '\\' if util.platform() == "windows" else '/' if isinstance(filename, bytes): sep = os.fsencode(sep) if not filename.endswith(sep) and os.path.isdir(filename): filename += sep matched = False for pattern in include: if _fs_match(pattern, filename, sep, follow, symlinks): matched = True break if matched: matched = True if exclude: for pattern in exclude: if _fs_match(pattern, filename, sep, follow, symlinks): matched = False break return matched
Match real filename includes and excludes.
def get_converter(self, parameter): if parameter not in self._converters: param = self.get_parameter(parameter) try: scale = float(param['Scale']) except KeyError: scale = 1 def convert(value): return value * scale return convert
Generate unit conversion function for given parameter
def shuffle_step(entries, step): answer = [] for i in range(0, len(entries), step): sub = entries[i:i+step] shuffle(sub) answer += sub return answer
Shuffle the step
def getIndices(self): for indexName in self.neograph.nodes.indexes.keys(): indexObject = self.neograph.nodes.indexes.get(indexName) yield Index(indexName, "vertex", "manual", indexObject) for indexName in self.neograph.relationships.indexes.keys(): indexObject = self.neograph.relationships.indexes.get(indexName) yield Index(indexName, "edge", "manual", indexObject)
Returns a generator function over all the existing indexes @returns A generator function over all rhe Index objects
def delete_object_in_seconds(self, obj, seconds, extra_info=None): return self.manager.delete_object_in_seconds(self, obj, seconds)
Sets the object in this container to be deleted after the specified number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
def CallDhclient( interfaces, logger, dhclient_script=None): logger.info('Enabling the Ethernet interfaces %s.', interfaces) dhclient_command = ['dhclient'] if dhclient_script and os.path.exists(dhclient_script): dhclient_command += ['-sf', dhclient_script] try: subprocess.check_call(dhclient_command + ['-x'] + interfaces) subprocess.check_call(dhclient_command + interfaces) except subprocess.CalledProcessError: logger.warning('Could not enable interfaces %s.', interfaces)
Configure the network interfaces using dhclient. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port. dhclient_script: string, the path to a dhclient script used by dhclient.
def scale_down(self, workers, pods=None): pods = pods or self._cleanup_terminated_pods(self.pods()) ips = set(urlparse(worker).hostname for worker in workers) to_delete = [p for p in pods if p.status.pod_ip in ips] if not to_delete: return self._delete_pods(to_delete)
Remove the pods for the requested list of workers When scale_down is called by the _adapt async loop, the workers are assumed to have been cleanly closed first and in-memory data has been migrated to the remaining workers. Note that when the worker process exits, Kubernetes leaves the pods in a 'Succeeded' state that we collect here. If some workers have not been closed, we just delete the pods with matching ip addresses. Parameters ---------- workers: List[str] List of addresses of workers to close
def compute_mask_offsets(shard_id2num_examples): total_num_examples = sum(shard_id2num_examples) mask_offsets = [] total_num_examples = 0 for num_examples_in_shard in shard_id2num_examples: mask_offsets.append(total_num_examples % 100) total_num_examples += num_examples_in_shard return mask_offsets
Return the list of offsets associated with each shards. Args: shard_id2num_examples: `list[int]`, mapping shard_id=>num_examples Returns: mask_offsets: `list[int]`, offset to skip for each of the shard
def dijkstra(graph, start, end=None): D = {} P = {} Q = _priorityDictionary() Q[start] = 0 for v in Q: D[v] = Q[v] if v == end: break for w in graph.out_nbrs(v): edge_id = graph.edge_by_node(v,w) vwLength = D[v] + graph.edge_data(edge_id) if w in D: if vwLength < D[w]: raise GraphError("Dijkstra: found better path to already-final vertex") elif w not in Q or vwLength < Q[w]: Q[w] = vwLength P[w] = v return (D,P)
Dijkstra's algorithm for shortest paths `David Eppstein, UC Irvine, 4 April 2002 <http://www.ics.uci.edu/~eppstein/161/python/>`_ `Python Cookbook Recipe <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466>`_ Find shortest paths from the start node to all nodes nearer than or equal to the end node. Dijkstra's algorithm is only guaranteed to work correctly when all edge lengths are positive. This code does not verify this property for all edges (only the edges examined until the end vertex is reached), but will correctly compute shortest paths even for some graphs with negative edges, and will raise an exception if it discovers that a negative edge has caused it to make a mistake. *Adapted to altgraph by Istvan Albert, Pennsylvania State University - June, 9 2004*
def user(session, uid, ladder_ids=None): data = get_user(session, uid) resp = dict(data) if not ladder_ids: return resp resp['ladders'] = {} for ladder_id in ladder_ids: if isinstance(ladder_id, str): ladder_id = lookup_ladder_id(ladder_id) try: ladder_data = dict(get_ladder(session, ladder_id, user_id=uid)) resp['ladders'][ladder_id] = ladder_data except VooblyError: pass return resp
Get all possible user info by name.
def get_range(self, i): try: m = i.match(RE_INT_ITER) if m: return self.get_int_range(*m.groups()) m = i.match(RE_CHR_ITER) if m: return self.get_char_range(*m.groups()) except Exception: pass return None
Check and retrieve range if value is a valid range. Here we are looking to see if the value is series or range. We look for `{1..2[..inc]}` or `{a..z[..inc]}` (negative numbers are fine).
def get_image_command(image): info = docker_inspect_or_pull(image) try: return info['Config']['Cmd'] except KeyError as ke: raise DockerError('Failed to inspect image: JSON result missing key {}'.format(ke))
Gets the default command for an image
def __getTemplate(template_file_name): with open(template_file_name) as template_file: template_raw = template_file.read() template = parse(template_raw) return template
Get temaplte to save the ranking. :param template_file_name: path to the template. :type template_file_name: str. :return: template for the file. :rtype: pystache's template.
def getContainerName(job): return '--'.join([str(job), base64.b64encode(os.urandom(9), b'-_').decode('utf-8')])\ .replace("'", '').replace('"', '').replace('_', '')
Create a random string including the job name, and return it.
def _right(self): left, _, width, _ = self._extents return left + width
Index of column following the last column in range
def put_settings(self, using=None, **kwargs): return self._get_connection(using).indices.put_settings(index=self._name, **kwargs)
Change specific index level settings in real time. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_settings`` unchanged.
def add_instance(self, name, properties): if name in self.__instances: raise NameError(name) self.__instances[name] = properties
Stores the description of a component instance. The given properties are stored as is. :param name: Instance name :param properties: Instance properties :raise NameError: Already known instance name
def bytes_to_int(b, order='big'): if six.PY2: _b = b.__class__() if order != 'little': b = reversed(b) if not isinstance(_b, bytearray): b = six.iterbytes(b) return sum(c << (i * 8) for i, c in enumerate(b)) return int.from_bytes(b, order)
convert bytes to integer
def open_fastq(in_file): if objectstore.is_remote(in_file): return objectstore.open_file(in_file) else: return utils.open_gzipsafe(in_file)
open a fastq file, using gzip if it is gzipped
def as_string(self): if type(self.instruction) is str: return self.instruction if self.action == "FROM" and not isinstance(self.command, six.string_types): extra = "" if self.extra is NotSpecified else " {0}".format(self.extra) return "{0} {1}{2}".format(self.action, self.command.from_name, extra) else: return "{0} {1}".format(self.action, self.command)
Return the command as a single string for the docker file
def update_meta_data_for_state_view(graphical_editor_view, state_v, affects_children=False, publish=True): from gaphas.item import NW update_meta_data_for_port(graphical_editor_view, state_v, None) if affects_children: update_meta_data_for_name_view(graphical_editor_view, state_v.name_view, publish=False) for transition_v in state_v.get_transitions(): update_meta_data_for_transition_waypoints(graphical_editor_view, transition_v, None, publish=False) for child_state_v in state_v.child_state_views(): update_meta_data_for_state_view(graphical_editor_view, child_state_v, True, publish=False) rel_pos = calc_rel_pos_to_parent(graphical_editor_view.editor.canvas, state_v, state_v.handles()[NW]) state_v.model.set_meta_data_editor('size', (state_v.width, state_v.height)) state_v.model.set_meta_data_editor('rel_pos', rel_pos) if publish: graphical_editor_view.emit('meta_data_changed', state_v.model, "size", affects_children)
This method updates the meta data of a state view :param graphical_editor_view: Graphical Editor view the change occurred in :param state_v: The state view which has been changed/moved :param affects_children: Whether the children of the state view have been resized or not :param publish: Whether to publish the changes of the meta data
def lotus_root_geometry(): a_offset = np.pi / 2 apart = uniform_partition(a_offset, a_offset + 2 * np.pi * 366. / 360., 366) d_offset = 0.35 dpart = uniform_partition(d_offset - 60, d_offset + 60, 2240) geometry = FanBeamGeometry(apart, dpart, src_radius=540, det_radius=90) return geometry
Tomographic geometry for the lotus root dataset. Notes ----- See the article `Tomographic X-ray data of a lotus root filled with attenuating objects`_ for further information. See Also -------- lotus_root_geometry References ---------- .. _Tomographic X-ray data of a lotus root filled with attenuating objects: https://arxiv.org/abs/1609.07299