code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def getDebt(self): debt = float(self['principalBalance']) + float(self['interestBalance']) debt += float(self['feesBalance']) + float(self['penaltyBalance']) return debt
Sums up all the balances of the account and returns them.
def overlap(ival0, ival1): min0, max0 = ival0 min1, max1 = ival1 return max(0, min(max0, max1) - max(min0, min1)) > 0
Determine if two interval tuples have overlap. Args: iv0 ((int,int)): An interval tuple iv1 ((int,int)); An interval tuple Returns: (bool): True if the intervals overlap, otherwise False
async def publish(self, message): try: self.write('data: {}\n\n'.format(message)) await self.flush() except StreamClosedError: self.finished = True
Pushes data to a listener.
def removeClassBreak(self, label): for v in self._classBreakInfos: if v['label'] == label: self._classBreakInfos.remove(v) return True del v return False
removes a classification break value to the renderer
def load_z_meso(self,z_meso_path): self.z_meso = [] z_meso_file_path = os.path.join(z_meso_path, self.Z_MESO_FILE_NAME) if not os.path.exists(z_meso_file_path): raise Exception("z_meso.txt file: '{}' does not exist.".format(uwg_param_file)) f = open(z_meso_file_path,'r') for txtline in f: z_ = float("".join(txtline.split())) self.z_meso.append(z_) f.close()
Open the z_meso.txt file and return heights as list
def log_url (self, url_data): self.check_active_loggers() do_print = self.do_print(url_data) for log in self.loggers: log.log_filter_url(url_data, do_print)
Send new url to all configured loggers.
def phase_by(val: Any, phase_turns: float, qubit_index: int, default: TDefault = RaiseTypeErrorIfNotProvided): getter = getattr(val, '_phase_by_', None) result = NotImplemented if getter is None else getter( phase_turns, qubit_index) if result is not NotImplemented: return result if default is not RaiseTypeErrorIfNotProvided: return default if getter is None: raise TypeError("object of type '{}' " "has no _phase_by_ method.".format(type(val))) raise TypeError("object of type '{}' does have a _phase_by_ method, " "but it returned NotImplemented.".format(type(val)))
Returns a phased version of the effect. For example, an X gate phased by 90 degrees would be a Y gate. This works by calling `val`'s _phase_by_ method and returning the result. Args: val: The value to describe with a unitary matrix. phase_turns: The amount to phase the gate, in fractions of a whole turn. Divide by 2pi to get radians. qubit_index: The index of the target qubit the phasing applies to. For operations this is the index of the qubit within the operation's qubit list. For gates it's the index of the qubit within the tuple of qubits taken by the gate's `on` method. default: The default value to return if `val` can't be phased. If not specified, an error is raised when `val` can't be phased. Returns: If `val` has a _phase_by_ method and its result is not NotImplemented, that result is returned. Otherwise, the function will return the default value provided or raise a TypeError if none was provided. Raises: TypeError: `val` doesn't have a _phase_by_ method (or that method returned NotImplemented) and no `default` was specified.
def contains_is_html(cls, data): for key, val in data.items(): if isinstance(key, str) and key.endswith("IsHTML"): return True if isinstance(val, (OrderedDict, dict)) and cls.contains_is_html(val): return True return False
Detect if the problem has at least one "xyzIsHTML" key
def pause(self, queue_name, kw_in=None, kw_out=None, kw_all=None, kw_none=None, kw_state=None, kw_bcast=None): command = ["PAUSE", queue_name] if kw_in: command += ["in"] if kw_out: command += ["out"] if kw_all: command += ["all"] if kw_none: command += ["none"] if kw_state: command += ["state"] if kw_bcast: command += ["bcast"] return self.execute_command(*command)
Pause a queue. Unfortunately, the PAUSE keywords are mostly reserved words in Python, so I've been a little creative in the function variable names. Open to suggestions to change it (canardleteer) :param queue_name: The job queue we are modifying. :param kw_in: pause the queue in input. :param kw_out: pause the queue in output. :param kw_all: pause the queue in input and output (same as specifying both the in and out options). :param kw_none: clear the paused state in input and output. :param kw_state: just report the current queue state. :param kw_bcast: send a PAUSE command to all the reachable nodes of the cluster to set the same queue in the other nodes to the same state.
def bogoliubov_trans(p, q, theta): r expo = -4 * theta / np.pi yield cirq.X(p) yield cirq.S(p) yield cirq.ISWAP(p, q)**expo yield cirq.S(p) ** 1.5 yield cirq.X(p)
r"""The 2-mode Bogoliubov transformation is mapped to two-qubit operations. We use the identity X S^\dag X S X = Y X S^\dag Y S X = X to transform the Hamiltonian XY+YX to XX+YY type. The time evolution of the XX + YY Hamiltonian can be expressed as a power of the iSWAP gate. Args: p: the first qubit q: the second qubit theta: The rotational angle that specifies the Bogoliubov transformation, which is a function of the kinetic energy and the superconducting gap.
def decodeRPCErrorMsg(e): found = re.search( ( "(10 assert_exception: Assert Exception\n|" "3030000 tx_missing_posting_auth)" ".*: (.*)\n" ), str(e), flags=re.M, ) if found: return found.group(2).strip() else: return str(e)
Helper function to decode the raised Exception and give it a python Exception class
def start(self): if self.run is True: self.job = multiprocessing.Process(target=self.indicator) self.job.start() return self.job
Indicate that we are performing work in a thread. :returns: multiprocessing job object
def simulate(radius=5e-6, sphere_index=1.339, medium_index=1.333, wavelength=550e-9, grid_size=(80, 80), model="projection", pixel_size=None, center=None): if isinstance(grid_size, numbers.Integral): grid_size = (grid_size, grid_size) if pixel_size is None: rl = radius / wavelength if rl < 5: fact = 4 elif rl >= 5 and rl <= 10: fact = 4 - (rl - 5) / 5 else: fact = 3 pixel_size = fact * radius / np.min(grid_size) if center is None: center = (np.array(grid_size) - 1) / 2 model = model_dict[model] qpi = model(radius=radius, sphere_index=sphere_index, medium_index=medium_index, wavelength=wavelength, pixel_size=pixel_size, grid_size=grid_size, center=center) return qpi
Simulate scattering at a sphere Parameters ---------- radius: float Radius of the sphere [m] sphere_index: float Refractive index of the object medium_index: float Refractive index of the surrounding medium wavelength: float Vacuum wavelength of the imaging light [m] grid_size: tuple of ints or int Resulting image size in x and y [px] model: str Sphere model to use (see :const:`available`) pixel_size: float or None Pixel size [m]; if set to `None` the pixel size is chosen such that the radius fits at least three to four times into the grid. center: tuple of floats or None Center position in image coordinates [px]; if set to None, the center of the image (grid_size - 1)/2 is used. Returns ------- qpi: qpimage.QPImage Quantitative phase data set
def assert_estimator_equal(left, right, exclude=None, **kwargs): left_attrs = [x for x in dir(left) if x.endswith("_") and not x.startswith("_")] right_attrs = [x for x in dir(right) if x.endswith("_") and not x.startswith("_")] if exclude is None: exclude = set() elif isinstance(exclude, str): exclude = {exclude} else: exclude = set(exclude) assert (set(left_attrs) - exclude) == set(right_attrs) - exclude for attr in set(left_attrs) - exclude: l = getattr(left, attr) r = getattr(right, attr) _assert_eq(l, r, **kwargs)
Check that two Estimators are equal Parameters ---------- left, right : Estimators exclude : str or sequence of str attributes to skip in the check kwargs : dict Passed through to the dask `assert_eq` method.
def state_cpfs(self) -> List[CPF]: _, cpfs = self.cpfs state_cpfs = [] for cpf in cpfs: name = utils.rename_next_state_fluent(cpf.name) if name in self.state_fluents: state_cpfs.append(cpf) state_cpfs = sorted(state_cpfs, key=lambda cpf: cpf.name) return state_cpfs
Returns list of state-fluent CPFs.
def get_rng(obj=None): seed = (id(obj) + os.getpid() + int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295 if _RNG_SEED is not None: seed = _RNG_SEED return np.random.RandomState(seed)
Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG.
def get_cas_client(self, request, provider, renew=False): service_url = utils.get_current_url(request, {"ticket", "provider"}) self.service_url = service_url return CASFederateValidateUser(provider, service_url, renew=renew)
return a CAS client object matching provider :param django.http.HttpRequest request: The current request object :param cas_server.models.FederatedIendityProvider provider: the user identity provider :return: The user CAS client object :rtype: :class:`federate.CASFederateValidateUser <cas_server.federate.CASFederateValidateUser>`
def offset(self, offset): self.log(u"Applying offset to all fragments...") self.log([u" Offset %.3f", offset]) for fragment in self.fragments: fragment.interval.offset( offset=offset, allow_negative=False, min_begin_value=self.begin, max_end_value=self.end ) self.log(u"Applying offset to all fragments... done")
Move all the intervals in the list by the given ``offset``. :param offset: the shift to be applied :type offset: :class:`~aeneas.exacttiming.TimeValue` :raises TypeError: if ``offset`` is not an instance of ``TimeValue``
def query_source(self, source): return self._get_repo_filter(Layer.objects).filter(url=source)
Query by source
def on_before_transform_template(self, template_dict): template = SamTemplate(template_dict) self.existing_implicit_api_resource = copy.deepcopy(template.get(self.implicit_api_logical_id)) template.set(self.implicit_api_logical_id, ImplicitApiResource().to_dict()) errors = [] for logicalId, function in template.iterate(SamResourceType.Function.value): api_events = self._get_api_events(function) condition = function.condition if len(api_events) == 0: continue try: self._process_api_events(function, api_events, template, condition) except InvalidEventException as ex: errors.append(InvalidResourceException(logicalId, ex.message)) self._maybe_add_condition_to_implicit_api(template_dict) self._maybe_add_conditions_to_implicit_api_paths(template) self._maybe_remove_implicit_api(template) if len(errors) > 0: raise InvalidDocumentException(errors)
Hook method that gets called before the SAM template is processed. The template has pass the validation and is guaranteed to contain a non-empty "Resources" section. :param dict template_dict: Dictionary of the SAM template :return: Nothing
def app_start(name, profile, **kwargs): ctx = Context(**kwargs) ctx.execute_action('app:start', **{ 'node': ctx.repo.create_secure_service('node'), 'name': name, 'profile': profile })
Start an application with specified profile. Does nothing if application is already running.
def list_supported_categories(): categories = get_supported_categories(api) category_names = [category.name for category in categories] print ("Supported account categories by name: {0}".format( COMMA_WITH_SPACE.join(map(str, category_names))))
Prints a list of supported external account category names. For example, "AWS" is a supported external account category name.
def remove_suffix(text, suffix): rest, suffix, null = text.partition(suffix) return rest
Remove the suffix from the text if it exists. >>> remove_suffix('name.git', '.git') 'name' >>> remove_suffix('something special', 'sample') 'something special'
def update(self, resource, timeout=-1): return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES, uri=self.URI)
Updates a User. Args: resource (dict): Object to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Updated resource.
def search(self, **kwargs): return super(ApiV4As, self).get(self.prepare_url( 'api/v4/as/', kwargs))
Method to search asns based on extends search. :param search: Dict containing QuerySets to find asns. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing asns
def download_log(currentfile=None): if currentfile == None: return if not currentfile.endswith(".err.log"): currentfile=currentfile + ".err.log" list = get_base_ev3dev_cmd() + ['download','--force'] list.append(currentfile) env = os.environ.copy() env["PYTHONUSERBASE"] = THONNY_USER_BASE proc = subprocess.Popen(list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=env) dlg = MySubprocessDialog(get_workbench(), proc, "Downloading log of program from EV3", autoclose=True) dlg.wait_window() if dlg.returncode == 0: from pathlib import Path home = str(Path.home()) open_file(currentfile,home,True)
downloads log of given .py file from EV3.
def save_graph(cn_topo, filename, showintfs=False, showaddrs=False): __do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs) pyp.savefig(filename)
Save the topology to an image file
def format_content_type_object(repo, content_type, uuid): try: storage_manager = StorageManager(repo) model_class = load_model_class(repo, content_type) return dict(storage_manager.get(model_class, uuid)) except GitCommandError: raise NotFound('Object does not exist.')
Return a content object from a repository for a given content_type and uuid :param Repo repo: The git repository. :param str content_type: The content type to list :returns: dict
def enable(self, key_id, **kwargs): path = '%s/%s/enable' % (self.path, key_id) self.gitlab.http_post(path, **kwargs)
Enable a deploy key for a project. Args: key_id (int): The ID of the key to enable **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabProjectDeployKeyError: If the key could not be enabled
def change_name(self, username): self.release_name() try: self.server.register_name(username) except UsernameInUseException: logging.log(', '.join(self.server.registered_names)) self.server.register_name(self.name) raise self.name = username
changes the username to given username, throws exception if username used
def task2ics(): from argparse import ArgumentParser, FileType from sys import stdout parser = ArgumentParser(description='Converter from Taskwarrior to iCalendar syntax.') parser.add_argument('indir', nargs='?', help='Input Taskwarrior directory (default to ~/.task)', default=expanduser('~/.task')) parser.add_argument('outfile', nargs='?', type=FileType('w'), default=stdout, help='Output iCalendar file (default: stdout)') args = parser.parse_args() task = IcsTask(args.indir) args.outfile.write(task.to_vobject().serialize())
Command line tool to convert from Taskwarrior to iCalendar
def precompile_python_code(context: Context): from compileall import compile_dir kwargs = {} if context.verbosity < 2: kwargs['quiet'] = True compile_dir(context.app.django_app_name, **kwargs)
Pre-compiles python modules
def layerize(begin_update=None, predict=None, *args, **kwargs): if begin_update is not None: return FunctionLayer(begin_update, predict=predict, *args, **kwargs) def wrapper(begin_update): return FunctionLayer(begin_update, *args, **kwargs) return wrapper
Wrap a function into a layer
def set_as_error(self, color=Qt.red): self.format.setUnderlineStyle( QTextCharFormat.WaveUnderline) self.format.setUnderlineColor(color)
Highlights text as a syntax error. :param color: Underline color :type color: QtGui.QColor
def _parse(cls, data, key=None): parse = cls.parse if cls.parse is not None else cls.get_endpoint() if callable(parse): data = parse(data) elif isinstance(parse, str): data = data[key] else: raise Exception('"parse" should be a callable or string got, {0}' .format(parse)) return data
Parse a set of data to extract entity-only data. Use classmethod `parse` if available, otherwise use the `endpoint` class variable to extract data from a data blob.
def set_category(self, category): if isinstance(category, Category): name = category.name else: name = category self.find("category").text = name
Set package category Args: category: String of an existing category's name, or a Category object.
def capture(returns, factor_returns, period=DAILY): return (annual_return(returns, period=period) / annual_return(factor_returns, period=period))
Compute capture ratio. Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns : pd.Series or np.ndarray Noncumulative returns of the factor to which beta is computed. Usually a benchmark such as the market. - This is in the same style as returns. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 Returns ------- capture_ratio : float Note ---- See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for details.
def load_extra_data(cls, data): try: cls._extra_config.update(json.loads(data)) except ValueError as exception: sys.stderr.write('Could convert to JSON. {0:s}'.format(exception)) exit(-1)
Loads extra JSON configuration parameters from a data buffer. The data buffer must represent a JSON object. Args: data: str, the buffer to load the JSON data from.
def _ensure_slack(self, connector: Any, retries: int, backoff: Callable[[int], float]) -> None: connector = self._env_var if connector is None else connector slack: SlackClient = _create_slack(connector) self._slack = _SlackClientWrapper( slack=slack, retries=retries, backoff=backoff )
Ensure we have a SlackClient.
def twofilter_smoothing(self, t, info, phi, loggamma, linear_cost=False, return_ess=False, modif_forward=None, modif_info=None): ti = self.T - 2 - t if t < 0 or t >= self.T - 1: raise ValueError( 'two-filter smoothing: t must be in range 0,...,T-2') lwinfo = info.hist.wgt[ti].lw - loggamma(info.hist.X[ti]) if linear_cost: return self._twofilter_smoothing_ON(t, ti, info, phi, lwinfo, return_ess, modif_forward, modif_info) else: return self._twofilter_smoothing_ON2(t, ti, info, phi, lwinfo)
Two-filter smoothing. Parameters ---------- t: time, in range 0 <= t < T-1 info: SMC object the information filter phi: function test function, a function of (X_t,X_{t+1}) loggamma: function a function of (X_{t+1}) linear_cost: bool if True, use the O(N) variant (basic version is O(N^2)) Returns ------- Two-filter estimate of the smoothing expectation of phi(X_t,x_{t+1})
def _send(self, email_message): pre_send.send(self.__class__, message=email_message) if not email_message.recipients(): return False from_email = sanitize_address(email_message.from_email, email_message.encoding) recipients = [sanitize_address(addr, email_message.encoding) for addr in email_message.recipients()] message = email_message.message().as_bytes(linesep='\r\n') try: result = self.conn.send_raw_email( Source=from_email, Destinations=recipients, RawMessage={ 'Data': message } ) message_id = result['MessageId'] post_send.send( self.__class__, message=email_message, message_id=message_id ) except ClientError: if not self.fail_silently: raise return False return True
Sends an individual message via the Amazon SES HTTP API. Args: email_message: A single Django EmailMessage object. Returns: True if the EmailMessage was sent successfully, otherwise False. Raises: ClientError: An interaction with the Amazon SES HTTP API failed.
def post_build_time_coverage(self): from ambry.util.datestimes import expand_to_years years = set() if self.metadata.about.time: for year in expand_to_years(self.metadata.about.time): years.add(year) if self.identity.btime: for year in expand_to_years(self.identity.btime): years.add(year) for p in self.partitions: years |= set(p.time_coverage)
Collect all of the time coverage for the bundle.
def _get_gradient_log_pdf(self): sub_vec = self.variable_assignments - self.model.mean.flatten() grad = - np.dot(self.model.precision_matrix, sub_vec) log_pdf = 0.5 * np.dot(sub_vec, grad) return grad, log_pdf
Method that finds gradient and its log at position
def value(dtype, arg): if not isinstance(arg, ir.Expr): arg = ir.literal(arg) if not isinstance(arg, ir.AnyValue): raise com.IbisTypeError( 'Given argument with type {} is not a value ' 'expression'.format(type(arg)) ) value = getattr(arg.op(), 'value', None) if isinstance(dtype, type) and isinstance(arg.type(), dtype): return arg elif dt.castable(arg.type(), dt.dtype(dtype), value=value): return arg else: raise com.IbisTypeError( 'Given argument with datatype {} is not ' 'subtype of {} nor implicitly castable to ' 'it'.format(arg.type(), dtype) )
Validates that the given argument is a Value with a particular datatype Parameters ---------- dtype : DataType subclass or DataType instance arg : python literal or an ibis expression If a python literal is given the validator tries to coerce it to an ibis literal. Returns ------- arg : AnyValue An ibis value expression with the specified datatype
def login(session, user, password, database=None, server=None): if not user: user = click.prompt("Username", type=str) if not password: password = click.prompt("Password", hide_input=True, type=str) try: with click.progressbar(length=1, label="Logging in...") as progressbar: session.login(user, password, database, server) progressbar.update(1) if session.credentials: click.echo('Logged in as: %s' % session.credentials) session.load(database) return session.get_api() except mygeotab.AuthenticationException: click.echo('Incorrect credentials. Please try again.') sys.exit(0)
Logs into a MyGeotab server and stores the returned credentials. :param session: The current Session object. :param user: The username used for MyGeotab servers. Usually an email address. :param password: The password associated with the username. Optional if `session_id` is provided. :param database: The database or company name. Optional as this usually gets resolved upon authentication. :param server: The server ie. my23.geotab.com. Optional as this usually gets resolved upon authentication.
def internal_writer(self, outputs, stdout): for output in outputs: print("\t".join(map(self.internal_serialize, output)), file=stdout)
Writer which outputs the python repr for each item.
def start_event(self): if self.with_outframe_pool: self.update_config() for name in self.outputs: self.outframe_pool[name] = ObjectPool( Frame, self.new_frame, self.config['outframe_pool_len']) try: self.on_start() except Exception as ex: self.logger.exception(ex) raise StopIteration()
Called by the event loop when it is started. Creates the output frame pools (if used) then calls :py:meth:`on_start`. Creating the output frame pools now allows their size to be configured before starting the component.
def filter(configs, settings): if isinstance(configs, pd.DataFrame): configs = configs[['a', 'b', 'm', 'n']].values filter_funcs = { 'dd': _filter_dipole_dipole, 'schlumberger': _filter_schlumberger, } keys = ['dd', 'schlumberger', ] allowed_keys = settings.get('only_types', filter_funcs.keys()) results = {} configs_filtered = configs.copy().astype(float) for key in keys: if key in allowed_keys: configs_filtered, indices_filtered = filter_funcs[key]( configs_filtered, ) if len(indices_filtered) > 0: results[key] = indices_filtered results['not_sorted'] = np.where( ~np.all(np.isnan(configs_filtered), axis=1) )[0] return results
Main entry function to filtering configuration types Parameters ---------- configs: Nx4 array array containing A-B-M-N configurations settings: dict 'only_types': ['dd', 'other'], # filter only for those types Returns ------- dict results dict containing filter results (indices) for all registered filter functions. All remaining configs are stored under the keywords 'remaining'
def _get_json(self, path, params=None, base=JIRA_BASE_URL, ): url = self._get_url(path, base) r = self._session.get(url, params=params) try: r_json = json_loads(r) except ValueError as e: logging.error("%s\n%s" % (e, r.text)) raise e return r_json
Get the json for a given path and params. :param path: The subpath required :type path: str :param params: Parameters to filter the json query. :type params: Optional[Dict[str, Any]] :param base: The Base JIRA URL, defaults to the instance base. :type base: Optional[str] :rtype: Union[Dict[str, Any], List[Dict[str, str]]]
def add_pagination_meta(self, params, meta): meta['page_size'] = params['page_size'] meta['page'] = params['page'] meta['prev'] = "page={0}&page_size={1}".format( params['page'] - 1, params['page_size'] ) if meta['page'] > 0 else None meta['next'] = "page={0}&page_size={1}".format( params['page'] + 1, params['page_size'] ) if meta.get('has_more', True) else None
Extend default meta dictionary value with pagination hints. Note: This method handler attaches values to ``meta`` dictionary without changing it's reference. This means that you should never replace ``meta`` dictionary with any other dict instance but simply modify its content. Args: params (dict): dictionary of decoded parameter values meta (dict): dictionary of meta values attached to response
def __undo_filter_average(self, scanline): ai = -self.fu previous = self.prev for i in range(len(scanline)): x = scanline[i] if ai < 0: a = 0 else: a = scanline[ai] b = previous[i] scanline[i] = (x + ((a + b) >> 1)) & 0xff ai += 1
Undo average filter.
def _compute_mean(self, C, g, mag, hypo_depth, rrup, vs30, pga_rock, imt): if hypo_depth > 100: hypo_depth = 100 delta = 0.00724 * 10 ** (0.507 * mag) R = np.sqrt(rrup ** 2 + delta ** 2) s_amp = self._compute_soil_amplification(C, vs30, pga_rock, imt) mean = ( C['c1'] + C['c2'] * mag + C['c3'] * hypo_depth + C['c4'] * R - g * np.log10(R) + s_amp ) return mean
Compute mean according to equation 1, page 1706.
def connect(self): future = concurrent.Future() if self.connected: raise exceptions.ConnectError('already connected') LOGGER.debug('%s connecting', self.name) self.io_loop.add_future( self._client.connect(self.host, self.port), lambda f: self._on_connected(f, future)) return future
Connect to the Redis server if necessary. :rtype: :class:`~tornado.concurrent.Future` :raises: :class:`~tredis.exceptions.ConnectError` :class:`~tredis.exceptinos.RedisError`
def post(self, request, format=None): serializer_class = self.get_serializer_class() serializer = serializer_class(data=request.data, instance=request.user) if serializer.is_valid(): serializer.save() return Response({'detail': _(u'Password successfully changed')}) return Response(serializer.errors, status=400)
validate password change operation and return result
def hildatree2dgparentedtree(self): def transform(hilda_tree): if isinstance(hilda_tree, basestring) or not hasattr(hilda_tree, 'label'): return hilda_tree assert len(hilda_tree) == 2, "We can only handle binary trees." match = HILDA_REL_RE.match(hilda_tree.label()) assert match, "Relation '{}' does not match regex '{}'".format(hilda_tree.label(), HILDA_REL_RE) relname, left_child_nuc, right_child_nuc = match.groups() hilda_tree._label = relname for i, child_nuclearity in enumerate([left_child_nuc, right_child_nuc]): child = hilda_tree[i] hilda_tree[i] = Tree(child_nuclearity, [transform(child)]) return hilda_tree tree = transform(self.hildafile_tree) return DGParentedTree.convert(tree)
Convert the tree from HILDA's format into a conventional binary tree, which can be easily converted into output formats like RS3.
def config_as_dict(cfg): ret = cfg.__dict__.copy() del ret['rand_crop_samplers'] assert isinstance(cfg.rand_crop_samplers, list) ret = merge_dict(ret, zip_namedtuple(cfg.rand_crop_samplers)) num_crop_sampler = len(cfg.rand_crop_samplers) ret['num_crop_sampler'] = num_crop_sampler ret['rand_crop_prob'] = 1.0 / (num_crop_sampler + 1) * num_crop_sampler del ret['rand_pad'] ret = merge_dict(ret, cfg.rand_pad._asdict()) del ret['color_jitter'] ret = merge_dict(ret, cfg.color_jitter._asdict()) return ret
convert raw configuration to unified dictionary
def speed_difference(points): data = [0] for before, after in pairwise(points): data.append(before.vel - after.vel) return data
Computes the speed difference between each adjacent point Args: points (:obj:`Point`) Returns: :obj:`list` of int: Indexes of changepoints
def factorize_groupby_cols(self, groupby_cols): factor_list = [] values_list = [] for col in groupby_cols: if self.auto_cache or self.cache_valid(col): if not self.cache_valid(col): self.cache_factor([col]) col_rootdir = self[col].rootdir col_factor_rootdir = col_rootdir + '.factor' col_values_rootdir = col_rootdir + '.values' col_carray_factor = \ bcolz.carray(rootdir=col_factor_rootdir, mode='r') col_carray_values = \ bcolz.carray(rootdir=col_values_rootdir, mode='r') else: col_carray_factor, values = ctable_ext.factorize(self[col]) col_carray_values = \ bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype)) factor_list.append(col_carray_factor) values_list.append(col_carray_values) return factor_list, values_list
factorizes all columns that are used in the groupby it will use cache carrays if available if not yet auto_cache is valid, it will create cache carrays
def ls(path, pattern='*'): path_iter = glob(path, pattern, recursive=False) return sorted(list(path_iter))
like unix ls - lists all files and dirs in path
def _assert_has_data_for_time(da, start_date, end_date): if isinstance(start_date, str) and isinstance(end_date, str): logging.warning( 'When using strings to specify start and end dates, the check ' 'to determine if data exists for the full extent of the desired ' 'interval is not implemented. Therefore it is possible that ' 'you are doing a calculation for a lesser interval than you ' 'specified. If you would like this check to occur, use explicit ' 'datetime-like objects for bounds instead.') return if RAW_START_DATE_STR in da.coords: with warnings.catch_warnings(record=True): da_start = da[RAW_START_DATE_STR].values da_end = da[RAW_END_DATE_STR].values else: times = da.time.isel(**{TIME_STR: [0, -1]}) da_start, da_end = times.values message = ('Data does not exist for requested time range: {0} to {1};' ' found data from time range: {2} to {3}.') tol = datetime.timedelta(seconds=1) if isinstance(da_start, np.datetime64): tol = np.timedelta64(tol, 'ns') range_exists = ((da_start - tol) <= start_date and (da_end + tol) >= end_date) assert (range_exists), message.format(start_date, end_date, da_start, da_end)
Check to make sure data is in Dataset for the given time range. Parameters ---------- da : DataArray DataArray with a time variable start_date : datetime-like object or str start date end_date : datetime-like object or str end date Raises ------ AssertionError If the time range is not within the time range of the DataArray
def write(self, output): w = c_int32() self.WriteAnalogF64(self.bufsize, 0, 10.0, DAQmx_Val_GroupByChannel, output, w, None);
Writes the data to be output to the device buffer, output will be looped when the data runs out :param output: data to output :type output: numpy.ndarray
def long2ip(l): if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) return '%d.%d.%d.%d' % ( l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
Convert a network byte order 32-bit integer to a dotted quad ip address. >>> long2ip(2130706433) '127.0.0.1' >>> long2ip(MIN_IP) '0.0.0.0' >>> long2ip(MAX_IP) '255.255.255.255' >>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' >>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive >>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive >>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive :param l: Network byte order 32-bit integer. :type l: int :returns: Dotted-quad ip address (eg. '127.0.0.1'). :raises: TypeError
def query(self, *args, **kwargs): if not issubclass(kwargs.get('itercls', None), AsyncViewBase): raise ArgumentError.pyexc("itercls must be defined " "and must be derived from AsyncViewBase") return super(AsyncBucket, self).query(*args, **kwargs)
Reimplemented from base class. This method does not add additional functionality of the base class' :meth:`~couchbase.bucket.Bucket.query` method (all the functionality is encapsulated in the view class anyway). However it does require one additional keyword argument :param class itercls: A class used for instantiating the view object. This should be a subclass of :class:`~couchbase.asynchronous.view.AsyncViewBase`.
def get_code(node, coder=Coder()): return cgi.escape(str(coder.code(node)), quote=True)
Return a node's code
def spin_z(particles, index): mat = np.zeros((2**particles, 2**particles)) for i in range(2**particles): ispin = btest(i, index) if ispin == 1: mat[i, i] = 1 else: mat[i, i] = -1 return 1/2.*mat
Generates the spin_z projection operator for a system of N=particles and for the selected spin index name. where index=0..N-1
def auth_list(**kwargs): ctx = Context(**kwargs) ctx.execute_action('auth:group:list', **{ 'storage': ctx.repo.create_secure_service('storage'), })
Shows available authorization groups.
def get(self): self.set_status(401) self.set_header('WWW-Authenticate', 'Session') ret = {'status': '401 Unauthorized', 'return': 'Please log in'} self.write(self.serialize(ret))
All logins are done over post, this is a parked endpoint .. http:get:: /login :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: text GET /login HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 401 Unauthorized Content-Type: application/json Content-Length: 58 {"status": "401 Unauthorized", "return": "Please log in"}
def launch_shell(username, hostname, password, port=22): if not username or not hostname or not password: return False with tempfile.NamedTemporaryFile() as tmpFile: os.system(sshCmdLine.format(password, tmpFile.name, username, hostname, port)) return True
Launches an ssh shell
def get_data(self, datatype, data): result = {} params = StopforumspamClient._set_payload(datatype, data) response = self.client.get( 'https://api.stopforumspam.org/api', params=params, proxies=self.proxies) response.raise_for_status() report = response.json() if report['success']: data = report[StopforumspamClient._type_conversion[datatype]] result = self._data_conversion(data) else: pass return result
Look for an IP address or an email address in the spammer database. :param datatype: Which type of data is to be looked up. Allowed values are 'ip' or 'mail'. :param data: The value to be looked up through the API. :type datatype: str :type data: str :return: Data relative to the looked up artifact. :rtype: dict
def delete(self): r = self._client._request('DELETE', self._client._build_url('property', property_id=self.id)) if r.status_code != requests.codes.no_content: raise APIError("Could not delete property: {} with id {}".format(self.name, self.id))
Delete this property. :return: None :raises APIError: if delete was not successful
def get_version(self, dependency): logger.debug("getting installed version for %s", dependency) stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)]) version = [line for line in stdout if line.startswith('Version:')] if len(version) == 1: version = version[0].strip().split()[1] logger.debug("Installed version of %s is: %s", dependency, version) return version else: logger.error('Fades is having problems getting the installed version. ' 'Run with -v or check the logs for details') return ''
Return the installed version parsing the output of 'pip show'.
def format_to_json(data): if sys.stdout.isatty(): return json.dumps(data, indent=4, separators=(',', ': ')) else: return json.dumps(data)
Converts `data` into json If stdout is a tty it performs a pretty print.
def _format_options_usage(options): options_usage = "" for op in options: short, long = op.get_flags() if op.arg: flag = "{short} {arg} {long}={arg}".format( short=short, long=long, arg=op.arg) else: flag = "{short} {long}".format(short=short, long=long) wrapped_description = textwrap.wrap(inspect.cleandoc(op.__doc__), width=79, initial_indent=' ' * 32, subsequent_indent=' ' * 32) wrapped_description = "\n".join(wrapped_description).strip() options_usage += " {0:28} {1}\n".format(flag, wrapped_description) return options_usage
Format the Options-part of the usage text. Parameters ---------- options : list[sacred.commandline_options.CommandLineOption] A list of all supported commandline options. Returns ------- str Text formatted as a description for the commandline options
def unique_prefixes(context): res = {} for m in context.modules.values(): if m.keyword == "submodule": continue prf = new = m.i_prefix suff = 0 while new in res.values(): suff += 1 new = "%s%x" % (prf, suff) res[m] = new return res
Return a dictionary with unique prefixes for modules in `context`. Keys are 'module' statements and values are prefixes, disambiguated where necessary.
def prepare_image_data(extracted_image_data, output_directory, image_mapping): img_list = {} for image, caption, label in extracted_image_data: if not image or image == 'ERROR': continue image_location = get_image_location( image, output_directory, image_mapping.keys() ) if not image_location or not os.path.exists(image_location) or \ len(image_location) < 3: continue image_location = os.path.normpath(image_location) if image_location in img_list: if caption not in img_list[image_location]['captions']: img_list[image_location]['captions'].append(caption) else: img_list[image_location] = dict( url=image_location, original_url=image_mapping[image_location], captions=[caption], label=label, name=get_name_from_path(image_location, output_directory) ) return img_list.values()
Prepare and clean image-data from duplicates and other garbage. :param: extracted_image_data ([(string, string, list, list) ...], ...])): the images and their captions + contexts, ordered :param: tex_file (string): the location of the TeX (used for finding the associated images; the TeX is assumed to be in the same directory as the converted images) :param: image_list ([string, string, ...]): a list of the converted image file names :return extracted_image_data ([(string, string, list, list) ...], ...])) again the list of image data cleaned for output
def _total_counts(seqs, seqL, aligned=False): total = Counter() if isinstance(seqs, list): if not aligned: l = len([total.update(seqL[s].freq) for s in seqs]) else: l = len([total.update(seqL[s].freq) for s in seqs if seqL[s].align > 0]) elif isinstance(seqs, dict): [total.update(seqs[s].get_freq(seqL)) for s in seqs] l = sum(len(seqs[s].idmembers) for s in seqs) return total, l
Counts total seqs after each step
def _next_sample_index(self): idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 while self.streams_[idx] is None: idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 return idx
Rotates through each active sampler by incrementing the index
def array(self): if self._ind < self.shape: return self._values[:self._ind] if not self._cached: ind = int(self._ind % self.shape) self._cache[:self.shape - ind] = self._values[ind:] self._cache[self.shape - ind:] = self._values[:ind] self._cached = True return self._cache
Returns a numpy array containing the last stored values.
def socket(self, blocking=True): if self._socket_lock.acquire(blocking): try: yield self._socket finally: self._socket_lock.release()
Blockingly yield the socket
def run(self, format=None, reduce=False, *args, **kwargs): plan = self._generate_plan() if reduce: plan.graph.transitive_reduction() fn = FORMATTERS[format] fn(sys.stdout, plan.graph) sys.stdout.flush()
Generates the underlying graph and prints it.
def __telnet_event_listener(self, ip, callback): tn = telnetlib.Telnet(ip, 2708) self._last_event = "" self._telnet_running = True while self._telnet_running: try: raw_string = tn.read_until(b'.\n', 5) if len(raw_string) >= 2 and raw_string[-2:] == b'.\n': json_string = raw_string.decode('ascii')[0:-2] if json_string != self._last_event: callback(json.loads(json_string)) self._last_event = json_string except: pass tn.close()
creates a telnet connection to the lightpad
def mkdir(dir, enter): if not os.path.exists(dir): os.makedirs(dir)
Create directory with template for topic of the current environment
def alg2keytype(alg): if not alg or alg.lower() == "none": return "none" elif alg.startswith("RS") or alg.startswith("PS"): return "RSA" elif alg.startswith("HS") or alg.startswith("A"): return "oct" elif alg.startswith("ES") or alg.startswith("ECDH-ES"): return "EC" else: return None
Go from algorithm name to key type. :param alg: The algorithm name :return: The key type
def get(self): if self.arch.startswith("i") and self.arch.endswith("86"): self.arch = self.x86 elif self.meta.arch.startswith("arm"): self.arch = self.arm return self.arch
Return sbo arch
def source_sum(self): if self._is_completely_masked: return np.nan * self._data_unit else: return np.sum(self.values)
The sum of the unmasked ``data`` values within the source segment. .. math:: F = \\sum_{i \\in S} (I_i - B_i) where :math:`F` is ``source_sum``, :math:`(I_i - B_i)` is the ``data``, and :math:`S` are the unmasked pixels in the source segment. Non-finite pixel values (e.g. NaN, infs) are excluded (automatically masked).
def compose(*funcs): def compose_two(f1, f2): return lambda *args, **kwargs: f1(f2(*args, **kwargs)) return functools.reduce(compose_two, funcs)
Compose any number of unary functions into a single unary function. >>> import textwrap >>> from six import text_type >>> stripped = text_type.strip(textwrap.dedent(compose.__doc__)) >>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped True Compose also allows the innermost function to take arbitrary arguments. >>> round_three = lambda x: round(x, ndigits=3) >>> f = compose(round_three, int.__truediv__) >>> [f(3*x, x+1) for x in range(1,10)] [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
def get_crimes_area(self, points, date=None, category=None): if isinstance(category, CrimeCategory): category = category.id method = 'crimes-street/%s' % (category or 'all-crime') kwargs = { 'poly': encode_polygon(points), } crimes = [] if date is not None: kwargs['date'] = date for c in self.service.request('POST', method, **kwargs): crimes.append(Crime(self, data=c)) return crimes
Get crimes within a custom area. Uses the crime-street_ API call. .. _crime-street: https//data.police.uk/docs/method/crime-street/ :rtype: list :param list points: A ``list`` of ``(lat, lng)`` tuples. :param date: The month in which the crimes were reported in the format ``YYYY-MM`` (the latest date is used if ``None``). :type date: str or None :param category: The category of the crimes to filter by (either by ID or CrimeCategory object) :type category: str or CrimeCategory :return: A ``list`` of crimes which were reported within the specified boundary, in the given month (optionally filtered by category).
def _table_set_column(table, name, expr): expr = table._ensure_expr(expr) if expr._name != name: expr = expr.name(name) if name not in table: raise KeyError('{0} is not in the table'.format(name)) proj_exprs = [] for key in table.columns: if key == name: proj_exprs.append(expr) else: proj_exprs.append(table[key]) return table.projection(proj_exprs)
Replace an existing column with a new expression Parameters ---------- name : string Column name to replace expr : value expression New data for column Returns ------- set_table : TableExpr New table expression
def set_location(self, uri, size, checksum, storage_class=None): self.file = FileInstance() self.file.set_uri( uri, size, checksum, storage_class=storage_class ) db.session.add(self.file) return self
Set only URI location of for object. Useful to link files on externally controlled storage. If a file instance has already been set, this methods raises an ``FileInstanceAlreadySetError`` exception. :param uri: Full URI to object (which can be interpreted by the storage interface). :param size: Size of file. :param checksum: Checksum of file. :param storage_class: Storage class where file is stored ()
def get_smart_storage_config(self, smart_storage_config_url): return (smart_storage_config. HPESmartStorageConfig(self._conn, smart_storage_config_url, redfish_version=self.redfish_version))
Returns a SmartStorageConfig Instance for each controller.
def get_file(fn): fn = os.path.join(os.path.dirname(__file__), 'data', fn) f = open(fn, 'rb') lines = [line.decode('utf-8').strip() for line in f.readlines()] return lines
Returns file contents in unicode as list.
def keytype_path_to(args, keytype): if keytype == "admin": return '{cluster}.client.admin.keyring'.format( cluster=args.cluster) if keytype == "mon": return '{cluster}.mon.keyring'.format( cluster=args.cluster) return '{cluster}.bootstrap-{what}.keyring'.format( cluster=args.cluster, what=keytype)
Get the local filename for a keyring type
def findlast(*args, **kwargs): list_, idx = _index(*args, start=sys.maxsize, step=-1, **kwargs) if idx < 0: raise IndexError("element not found") return list_[idx]
Find the last matching element in a list and return it. Usage:: findlast(element, list_) findlast(of=element, in_=list_) findlast(where=predicate, in_=list_) :param element, of: Element to search for (by equality comparison) :param where: Predicate defining an element to search for. This should be a callable taking a single argument and returning a boolean result. :param list_, in_: List to search in :return: Last matching element :raise IndexError: If no matching elements were found .. versionadded:: 0.0.4
def _validate_namespaces(self, input_namespaces): output_namespaces = [] if input_namespaces == []: return output_namespaces elif '*' in input_namespaces: if len(input_namespaces) > 1: warning = 'Warning: Multiple namespaces are ' warning += 'ignored when one namespace is "*"\n' sys.stderr.write(warning) return output_namespaces else: for namespace in input_namespaces: if not isinstance(namespace, unicode): namespace = unicode(namespace) namespace_tuple = self._tuplefy_namespace(namespace) if namespace_tuple is None: warning = 'Warning: Invalid namespace ' + namespace warning += ' will be ignored\n' sys.stderr.write(warning) else: if namespace_tuple not in output_namespaces: output_namespaces.append(namespace_tuple) else: warning = 'Warning: Duplicate namespace ' + namespace warning += ' will be ignored\n' sys.stderr.write(warning) return output_namespaces
Converts a list of db namespaces to a list of namespace tuples, supporting basic commandline wildcards
def string(self, units: typing.Optional[str] = None) -> str: if not units: _units: str = self._units else: if not units.upper() in CustomPressure.legal_units: raise UnitsError("unrecognized pressure unit: '" + units + "'") _units = units.upper() val = self.value(units) if _units == "MB": return "%.0f mb" % val if _units == "HPA": return "%.0f hPa" % val if _units == "IN": return "%.2f inches" % val if _units == "MM": return "%.0f mmHg" % val raise ValueError(_units)
Return a string representation of the pressure, using the given units.
def set(self, field, value): if field == 'uuid': raise ValueError('uuid cannot be set') elif field == 'key': raise ValueError( 'key cannot be set. Use \'reset_key\' method') else: self.data[field] = value
Sets the value of an app field. :param str field: The name of the app field. Trying to set immutable fields ``uuid`` or ``key`` will raise a ValueError. :param value: The new value of the app field. :raises: ValueError
def set(self, section, option, value=''): self._string_check(value) super(GitConfigParser, self).set(section, option, value)
This is overridden from the RawConfigParser merely to change the default value for the 'value' argument.
def render_source(self, source, variables=None): if variables is None: variables = {} template = self._engine.from_string(source) return template.render(**variables)
Render a source with the passed variables.
def translate(self, exc): from boto.exception import StorageResponseError if isinstance(exc, StorageResponseError): if exc.status == 404: return self.error_cls(str(exc)) return None
Return whether or not to do translation.
def ekf1_pos(EKF1): global ekf_home from . import mavutil self = mavutil.mavfile_global if ekf_home is None: if not 'GPS' in self.messages or self.messages['GPS'].Status != 3: return None ekf_home = self.messages['GPS'] (ekf_home.Lat, ekf_home.Lng) = gps_offset(ekf_home.Lat, ekf_home.Lng, -EKF1.PE, -EKF1.PN) (lat,lon) = gps_offset(ekf_home.Lat, ekf_home.Lng, EKF1.PE, EKF1.PN) return (lat, lon)
calculate EKF position when EKF disabled