code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def SaveData(self, raw_data): if self.filename is None: raise IOError("Unknown filename") logging.info("Writing back configuration to file %s", self.filename) try: os.makedirs(os.path.dirname(self.filename)) except (IOError, OSError): pass try: mode = os.O_WRONLY | os.O_CREAT | os.O_TRUNC fd = os.open(self.filename, mode, 0o600) with os.fdopen(fd, "wb") as config_file: self.SaveDataToFD(raw_data, config_file) except OSError as e: logging.warning("Unable to write config file %s: %s.", self.filename, e)
Store the raw data as our configuration.
def float(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_FLOAT
Remove power from the motor.
def _sexagesimalize_to_int(value, places=0): sign = int(np.sign(value)) value = abs(value) power = 10 ** places n = int(7200 * power * value + 1) // 2 n, fraction = divmod(n, power) n, seconds = divmod(n, 60) n, minutes = divmod(n, 60) return sign, n, minutes, seconds, fraction
Decompose `value` into units, minutes, seconds, and second fractions. This routine prepares a value for sexagesimal display, with its seconds fraction expressed as an integer with `places` digits. The result is a tuple of five integers: ``(sign [either +1 or -1], units, minutes, seconds, second_fractions)`` The integers are properly rounded per astronomical convention so that, for example, given ``places=3`` the result tuple ``(1, 11, 22, 33, 444)`` means that the input was closer to 11u 22' 33.444" than to either 33.443" or 33.445" in its value.
def open_ioc(fn): parsed_xml = xmlutils.read_xml_no_ns(fn) if not parsed_xml: raise IOCParseError('Error occured parsing XML') root = parsed_xml.getroot() metadata_node = root.find('metadata') top_level_indicator = get_top_level_indicator_node(root) parameters_node = root.find('parameters') if parameters_node is None: parameters_node = ioc_et.make_parameters_node() root.append(parameters_node) return root, metadata_node, top_level_indicator, parameters_node
Opens an IOC file, or XML string. Returns the root element, top level indicator element, and parameters element. If the IOC or string fails to parse, an IOCParseError is raised. This is a helper function used by __init__. :param fn: This is a path to a file to open, or a string containing XML representing an IOC. :return: a tuple containing three elementTree Element objects The first element, the root, contains the entire IOC itself. The second element, the top level OR indicator, allows the user to add additional IndicatorItem or Indicator nodes to the IOC easily. The third element, the parameters node, allows the user to quickly parse the parameters.
async def _load(self, data, check=True): log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done()
Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), }
def list_arguments(self): size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXSymbolListArguments( self.handle, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
Lists all the arguments in the symbol. Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> c.list_arguments ['a', 'b'] Returns ------- args : list of string List containing the names of all the arguments required to compute the symbol.
def visit_Name(self, node): if isinstance(node.ctx, (ast.Store, ast.Param)): self.result.add(node.id)
Any node with Store or Param context is a new identifier.
def parse_rule(rule: str, raise_error=False): parser = Parser(raise_error) return parser.parse(rule)
Parses policy to a tree of Check objects.
def create(url, filename): files = {'file': open(filename, 'rb')} response = requests.post(url, files=files) if response.status_code != 201: raise ValueError('invalid file: ' + filename) return references_to_dict(response.json()['links'])[REF_SELF]
Create new fMRI for given experiment by uploading local file. Expects an tar-archive. Parameters ---------- url : string Url to POST fMRI create request filename : string Path to tar-archive on local disk Returns ------- string Url of created functional data resource
def _Notify(username, notification_type, message, object_reference): if username in aff4_users.GRRUser.SYSTEM_USERS: return if object_reference: uc = object_reference.UnionCast() if hasattr(uc, "client_id"): message = _HostPrefix(uc.client_id) + message n = rdf_objects.UserNotification( username=username, notification_type=notification_type, state=rdf_objects.UserNotification.State.STATE_PENDING, message=message, reference=object_reference) data_store.REL_DB.WriteUserNotification(n)
Schedules a new-style REL_DB user notification.
def create(cls, data=None, *args, **kwargs): cls.validate(data) getattr(Entity, 'create').__func__(cls, data=data, *args, **kwargs)
Validate and then create a Vendor entity.
def dump(self, blob, stream): json.dump( blob, stream, indent=self.indent, sort_keys=True, separators=self.separators, )
Call json.dump with the attributes of this instance as arguments.
def on_accel_cleared(self, cellrendereraccel, path): dconf_path = self.store[path][HOTKET_MODEL_INDEX_DCONF] if dconf_path == "show-hide": log.warn("Cannot disable 'show-hide' hotkey") self.settings.keybindingsGlobal.set_string(dconf_path, old_accel) else: self.store[path][HOTKET_MODEL_INDEX_HUMAN_ACCEL] = "" self.store[path][HOTKET_MODEL_INDEX_ACCEL] = "None" if dconf_path == "show-focus": self.settings.keybindingsGlobal.set_string(dconf_path, 'disabled') else: self.settings.keybindingsLocal.set_string(dconf_path, 'disabled')
If the user tries to clear a keybinding with the backspace key this callback will be called and it just fill the model with an empty key and set the 'disabled' string in dconf path.
def authenticate(self, username, password): r = requests.post(self.apiurl + "/token", params={"grant_type": "password", "username": username, "password": password, "client_id": self.cid, "client_secret": self.csecret}) if r.status_code != 200: raise ServerError jsd = r.json() if self.remember: self.token_storage[username] = {'token': jsd['access_token'], 'refresh': jsd['refresh_token'], 'expiration': int(jsd['created_at']) + int(jsd['expires_in'])} return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at']), jsd['refresh_token']
Obtain an oauth token. Pass username and password. Get a token back. If KitsuAuth is set to remember your tokens for this session, it will store the token under the username given. :param username: username :param password: password :param alias: A list of alternative names for a person if using the KitsuAuth token storage :return: A tuple of (token, expiration time in unix time stamp, refresh_token) or ServerError
def read_gps_ifd(fh, byteorder, dtype, count, offsetsize): return read_tags(fh, byteorder, offsetsize, TIFF.GPS_TAGS, maxifds=1)
Read GPS tags from file and return as dict.
def read(self, fname, psw=None): with self.open(fname, 'r', psw) as f: return f.read()
Return uncompressed data for archive entry. For longer files using :meth:`RarFile.open` may be better idea. Parameters: fname filename or RarInfo instance psw password to use for extracting.
def gen_WS_DF(df_WS_data): df_fs = gen_FS_DF(df_WS_data) list_index = [('mean', 'T2'), ('max', 'T2'), ('min', 'T2'), ('mean', 'U10'), ('max', 'U10'), ('min', 'U10'), ('mean', 'RH2'), ('max', 'RH2'), ('min', 'RH2'), ('mean', 'Kdown')] list_const = [getattr(const, attr) for attr in ['T_MEAN', 'T_MAX', 'T_MIN', 'WIND_MEAN', 'WIND_MAX', 'WIND_MIN', 'RH_MEAN', 'RH_MAX', 'RH_MIN', 'SOLAR_RADIATION_GLOBAL']] list_ws = [df_fs.loc[idx] * cst for idx, cst in zip(list_index, list_const)] df_ws = pd.concat(list_ws, axis=1).sum(axis=1).unstack().dropna() return df_ws
generate DataFrame of weighted sums. Parameters ---------- df_WS_data : type Description of parameter `df_WS_data`. Returns ------- type Description of returned object.
def average(self, rows: List[Row], column: NumberColumn) -> Number: cell_values = [row.values[column.name] for row in rows] if not cell_values: return 0.0 return sum(cell_values) / len(cell_values)
Takes a list of rows and a column and returns the mean of the values under that column in those rows.
def name_resolve(self, name=None, recursive=False, nocache=False, **kwargs): kwargs.setdefault("opts", {"recursive": recursive, "nocache": nocache}) args = (name,) if name is not None else () return self._client.request('/name/resolve', args, decoder='json', **kwargs)
Gets the value currently published at an IPNS name. IPNS is a PKI namespace, where names are the hashes of public keys, and the private key enables publishing new (signed) values. In resolve, the default value of ``name`` is your own identity public key. .. code-block:: python >>> c.name_resolve() {'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'} Parameters ---------- name : str The IPNS name to resolve (defaults to the connected node) recursive : bool Resolve until the result is not an IPFS name (default: false) nocache : bool Do not use cached entries (default: false) Returns ------- dict : The IPFS path the IPNS hash points at
def open_conn(host, db, user, password, retries=0, sleep=0.5): assert retries >= 0 try: return MySQLdb.connect(host=host, user=user, passwd=password, db=db) except Exception: if retries > 0: time.sleep(sleep) return open_conn(host, db, user, password, retries - 1, sleep) else: raise
Return an open mysql db connection using the given credentials. Use `retries` and `sleep` to be robust to the occassional transient connection failure. retries: if an exception when getting the connection, try again at most this many times. sleep: pause between retries for this many seconds. a float >= 0.
def clone(self, repo, ref, deps=()): if os.path.isdir(repo): repo = os.path.abspath(repo) def clone_strategy(directory): env = git.no_git_env() def _git_cmd(*args): cmd_output('git', *args, cwd=directory, env=env) _git_cmd('init', '.') _git_cmd('remote', 'add', 'origin', repo) try: self._shallow_clone(ref, _git_cmd) except CalledProcessError: self._complete_clone(ref, _git_cmd) return self._new_repo(repo, ref, deps, clone_strategy)
Clone the given url and checkout the specific ref.
def write_grindstone(self): with open(self.grindstone_path, 'w') as f: f.write(json.dumps(self.grindstone))
Writes self.gs to self.grindstone_path.
def likelihood(self, outcomes, modelparams, expparams): r self._call_count += ( safe_shape(outcomes) * safe_shape(modelparams) * safe_shape(expparams) )
r""" Calculates the probability of each given outcome, conditioned on each given model parameter vector and each given experimental control setting. :param np.ndarray modelparams: A shape ``(n_models, n_modelparams)`` array of model parameter vectors describing the hypotheses for which the likelihood function is to be calculated. :param np.ndarray expparams: A shape ``(n_experiments, )`` array of experimental control settings, with ``dtype`` given by :attr:`~qinfer.Simulatable.expparams_dtype`, describing the experiments from which the given outcomes were drawn. :rtype: np.ndarray :return: A three-index tensor ``L[i, j, k]``, where ``i`` is the outcome being considered, ``j`` indexes which vector of model parameters was used, and where ``k`` indexes which experimental parameters where used. Each element ``L[i, j, k]`` then corresponds to the likelihood :math:`\Pr(d_i | \vec{x}_j; e_k)`.
def to_files(self, resource, directory): collections = self.__collect(resource) for (mb_cls, coll) in iteritems_(collections): fn = get_write_collection_path(mb_cls, self.__content_type, directory=directory) with open_text(os.path.join(directory, fn)) as strm: dump_resource(coll, strm, content_type=self.__content_type)
Dumps the given resource and all resources linked to it into a set of representation files in the given directory.
def status_code(code): redirect = dict(headers=dict(location=REDIRECT_LOCATION)) code_map = { 301: redirect, 302: redirect, 303: redirect, 304: dict(data=''), 305: redirect, 307: redirect, 401: dict(headers={'WWW-Authenticate': 'Basic realm="Fake Realm"'}), 402: dict( data='Fuck you, pay me!', headers={ 'x-more-info': 'http://vimeo.com/22053820' } ), 406: dict(data=json.dumps({ 'message': 'Client did not request a supported media type.', 'accept': ACCEPTED_MEDIA_TYPES }), headers={ 'Content-Type': 'application/json' }), 407: dict(headers={'Proxy-Authenticate': 'Basic realm="Fake Realm"'}), 418: dict( data=ASCII_ART, headers={ 'x-more-info': 'http://tools.ietf.org/html/rfc2324' } ), } r = make_response() r.status_code = code if code in code_map: m = code_map[code] if 'data' in m: r.data = m['data'] if 'headers' in m: r.headers = m['headers'] return r
Returns response object of given status code.
def execute(self, *args, **options): try: super(EmailNotificationCommand, self).execute(*args, **options) except Exception: if options['email_exception'] or getattr(self, 'email_exception', False): self.send_email_notification(include_traceback=True) raise
Overriden in order to send emails on unhandled exception. If an unhandled exception in ``def handle(self, *args, **options)`` occurs and `--email-exception` is set or `self.email_exception` is set to True send an email to ADMINS with the traceback and then reraise the exception.
def open(self): self.hwman = HardwareManager(port=self._port) self.opened = True if self._connection_string is not None: try: self.hwman.connect_direct(self._connection_string) except HardwareError: self.hwman.close() raise elif self._connect_id is not None: try: self.hwman.connect(self._connect_id) except HardwareError: self.hwman.close() raise
Open and potentially connect to a device.
def query_string_attribute(self, target, display_mask, attr): reply = NVCtrlQueryStringAttributeReplyRequest(display=self.display, opcode=self.display.get_extension_major(extname), target_id=target.id(), target_type=target.type(), display_mask=display_mask, attr=attr) if not reply._data.get('flags'): return None return str(reply._data.get('string')).strip('\0')
Return the value of a string attribute
def compute_knot_vector(degree, num_points, params): kv = [0.0 for _ in range(degree + 1)] for i in range(num_points - degree - 1): temp_kv = (1.0 / degree) * sum([params[j] for j in range(i + 1, i + degree + 1)]) kv.append(temp_kv) kv += [1.0 for _ in range(degree + 1)] return kv
Computes a knot vector from the parameter list using averaging method. Please refer to the Equation 9.8 on The NURBS Book (2nd Edition), pp.365 for details. :param degree: degree :type degree: int :param num_points: number of data points :type num_points: int :param params: list of parameters, :math:`\\overline{u}_{k}` :type params: list, tuple :return: knot vector :rtype: list
def delete(self, task, params={}, **options): path = "/tasks/%s" % (task) return self.client.delete(path, params, **options)
A specific, existing task can be deleted by making a DELETE request on the URL for that task. Deleted tasks go into the "trash" of the user making the delete request. Tasks can be recovered from the trash within a period of 30 days; afterward they are completely removed from the system. Returns an empty data record. Parameters ---------- task : {Id} The task to delete.
def listen(ctx): wva = get_wva(ctx) es = wva.get_event_stream() def cb(event): cli_pprint(event) es.add_event_listener(cb) es.enable() while True: time.sleep(5)
Output the contents of the WVA event stream This command shows the data being received from the WVA event stream based on the subscriptions that have been set up and the data on the WVA vehicle bus. \b $ wva subscriptions listen {'data': {'VehicleSpeed': {'timestamp': '2015-03-25T00:11:53Z', 'value': 198.272461}, 'sequence': 124, 'short_name': 'speed', 'timestamp': '2015-03-25T00:11:53Z', 'uri': 'vehicle/data/VehicleSpeed'}} {'data': {'EngineSpeed': {'timestamp': '2015-03-25T00:11:54Z', 'value': 6425.5}, 'sequence': 274, 'short_name': 'rpm', 'timestamp': '2015-03-25T00:11:54Z', 'uri': 'vehicle/data/EngineSpeed'}} ... ^C Aborted! This command can be useful for debugging subscriptions or getting a quick glimpse at what data is coming in to a WVA device.
def asset(self, id): data = None if int(id) > 0: url = self._build_url('releases', 'assets', str(id), base_url=self._api) data = self._json(self._get(url, headers=Release.CUSTOM_HEADERS), 200) return Asset(data, self) if data else None
Returns a single Asset. :param int id: (required), id of the asset :returns: :class:`Asset <github3.repos.release.Asset>`
def mean_cl_boot(series, n_samples=1000, confidence_interval=0.95, random_state=None): return bootstrap_statistics(series, np.mean, n_samples=n_samples, confidence_interval=confidence_interval, random_state=random_state)
Bootstrapped mean with confidence limits
def assert_not_called(_mock_self): self = _mock_self if self.call_count != 0: msg = ("Expected '%s' to not have been called. Called %s times." % (self._mock_name or 'mock', self.call_count)) raise AssertionError(msg)
assert that the mock was never called.
def rfc2822_format(val): if isinstance(val, six.string_types): return val elif isinstance(val, (datetime.datetime, datetime.date)): val = time.mktime(val.timetuple()) if isinstance(val, numbers.Number): return email.utils.formatdate(val) else: return val
Takes either a date, a datetime, or a string, and returns a string that represents the value in RFC 2822 format. If a string is passed it is returned unchanged.
def parse_url_to_dict(url): p = urlparse(url) return { 'scheme': p.scheme, 'netloc': p.netloc, 'path': p.path, 'params': p.params, 'query': p.query, 'fragment': p.fragment, 'username': p.username, 'password': p.password, 'hostname': p.hostname, 'port': p.port }
Parse a url and return a dict with keys for all of the parts. The urlparse function() returns a wacky combination of a namedtuple with properties.
def delete_project(self): if self.current_active_project: self.switch_to_plugin() path = self.current_active_project.root_path buttons = QMessageBox.Yes | QMessageBox.No answer = QMessageBox.warning( self, _("Delete"), _("Do you really want to delete <b>{filename}</b>?<br><br>" "<b>Note:</b> This action will only delete the project. " "Its files are going to be preserved on disk." ).format(filename=osp.basename(path)), buttons) if answer == QMessageBox.Yes: try: self.close_project() shutil.rmtree(osp.join(path, '.spyproject')) except EnvironmentError as error: QMessageBox.critical( self, _("Project Explorer"), _("<b>Unable to delete <i>{varpath}</i></b>" "<br><br>The error message was:<br>{error}" ).format(varpath=path, error=to_text_string(error)))
Delete the current project without deleting the files in the directory.
def real_dtype(self): base = self.base_dtype if base == complex64: return float32 elif base == complex128: return float64 else: return self
Returns the dtype correspond to this dtype's real part.
def is_adjacent_before(self, other): if not isinstance(other, TimeInterval): raise TypeError(u"other is not an instance of TimeInterval") return (self.end == other.begin)
Return ``True`` if this time interval ends when the given other time interval begins. :param other: the other interval :type other: :class:`~aeneas.exacttiming.TimeInterval` :raises TypeError: if ``other`` is not an instance of ``TimeInterval`` :rtype: bool
def csd(timeseries, other, segmentlength, noverlap=None, **kwargs): try: freqs, csd_ = scipy.signal.csd( timeseries.value, other.value, noverlap=noverlap, fs=timeseries.sample_rate.decompose().value, nperseg=segmentlength, **kwargs) except AttributeError as exc: exc.args = ('{}, scipy>=0.16 is required'.format(str(exc)),) raise unit = scale_timeseries_unit(timeseries.unit, kwargs.get('scaling', 'density')) return FrequencySeries( csd_, unit=unit, frequencies=freqs, name=str(timeseries.name)+'---'+str(other.name), epoch=timeseries.epoch, channel=timeseries.channel)
Calculate the CSD of two `TimeSeries` using Welch's method Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` time-series of data other : `~gwpy.timeseries.TimeSeries` time-series of data segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. **kwargs other keyword arguments are passed to :meth:`scipy.signal.csd` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- scipy.signal.csd
def cast_to_seq_record(obj, alphabet=IUPAC.extended_protein, id="<unknown id>", name="<unknown name>", description="<unknown description>", dbxrefs=None, features=None, annotations=None, letter_annotations=None): if isinstance(obj, SeqRecord): return obj if isinstance(obj, Seq): return SeqRecord(obj, id, name, description, dbxrefs, features, annotations, letter_annotations) if isinstance(obj, str): obj = obj.upper() return SeqRecord(Seq(obj, alphabet), id, name, description, dbxrefs, features, annotations, letter_annotations) else: raise ValueError('Must provide a string, Seq, or SeqRecord object.')
Return a SeqRecord representation of a string or Seq object. Args: obj (str, Seq, SeqRecord): Sequence string or Biopython Seq object alphabet: See Biopython SeqRecord docs id: See Biopython SeqRecord docs name: See Biopython SeqRecord docs description: See Biopython SeqRecord docs dbxrefs: See Biopython SeqRecord docs features: See Biopython SeqRecord docs annotations: See Biopython SeqRecord docs letter_annotations: See Biopython SeqRecord docs Returns: SeqRecord: SeqRecord representation of the sequence
def GetAdGroups(self, client_customer_id, campaign_id): self.client.SetClientCustomerId(client_customer_id) selector = { 'fields': ['Id', 'Name', 'Status'], 'predicates': [ { 'field': 'CampaignId', 'operator': 'EQUALS', 'values': [campaign_id] }, { 'field': 'Status', 'operator': 'NOT_EQUALS', 'values': ['REMOVED'] } ] } adgroups = self.client.GetService('AdGroupService').get(selector) if int(adgroups['totalNumEntries']) > 0: return adgroups['entries'] else: return None
Retrieves all AdGroups for the given campaign that haven't been removed. Args: client_customer_id: str Client Customer Id being used in API request. campaign_id: str id of the campaign for which to fetch ad groups. Returns: list List of AdGroup data objects.
def flexibility(communities): flex = np.zeros(communities.shape[0]) for t in range(1, communities.shape[1]): flex[communities[:, t] != communities[:, t-1]] += 1 flex = flex / (communities.shape[1] - 1) return flex
Amount a node changes community Parameters ---------- communities : array Community array of shape (node,time) Returns -------- flex : array Size with the flexibility of each node. Notes ----- Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary. References ----------- Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6.
def get_content_models(self): models = super(PageAdmin, self).get_content_models() order = [name.lower() for name in settings.ADD_PAGE_ORDER] def sort_key(page): name = "%s.%s" % (page._meta.app_label, page._meta.object_name) unordered = len(order) try: return (order.index(name.lower()), "") except ValueError: return (unordered, page.meta_verbose_name) return sorted(models, key=sort_key)
Return all Page subclasses that are admin registered, ordered based on the ``ADD_PAGE_ORDER`` setting.
def option_from_wire(otype, wire, current, olen): cls = get_option_class(otype) return cls.from_wire(otype, wire, current, olen)
Build an EDNS option object from wire format @param otype: The option type @type otype: int @param wire: The wire-format message @type wire: string @param current: The offet in wire of the beginning of the rdata. @type current: int @param olen: The length of the wire-format option data @type olen: int @rtype: dns.ends.Option instance
async def execute(self, dc=None, token=None): token_id = extract_attr(token, keys=["ID"]) try: response = await self._api.put( "/v1/txn", data=self.operations, params={ "dc": dc, "token": token_id }) except ConflictError as error: errors = {elt["OpIndex"]: elt for elt in error.value["Errors"]} operations = [op["KV"] for op in self.operations] meta = error.meta raise TransactionError(errors, operations, meta) from error except Exception as error: raise error else: self.operations[:] = [] results = [] for _ in response.body["Results"]: data = _["KV"] if data["Value"] is not None: data["Value"] = decode_value(data["Value"], data["Flags"]) results.append(data) return results
Execute stored operations Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. token (ObjectID): Token ID Returns: Collection: Results of operations. Raises: TransactionError: Transaction failed
def create_view(self, callback, method, request=None): view = super(WaldurSchemaGenerator, self).create_view(callback, method, request) if is_disabled_action(view): view.exclude_from_schema = True return view
Given a callback, return an actual view instance.
def get_initial_data(self, request, user, profile, client): if INITAL_DATA_FUNCTION: func = self.import_attribute(INITAL_DATA_FUNCTION) return func(request, user, profile, client) return {}
Return initial data for the setup form. The function can be controlled with ``SOCIALREGISTRATION_INITIAL_DATA_FUNCTION``. :param request: The current request object :param user: The unsaved user object :param profile: The unsaved profile object :param client: The API client
def _get_credentials_from_settings(self): remember_me = CONF.get('main', 'report_error/remember_me') remember_token = CONF.get('main', 'report_error/remember_token') username = CONF.get('main', 'report_error/username', '') if not remember_me: username = '' return username, remember_me, remember_token
Get the stored credentials if any.
def set_ylim(self, xlims, dx, xscale, reverse=False): self._set_axis_limits('y', xlims, dx, xscale, reverse) return
Set y limits for plot. This will set the limits for the y axis for the specific plot. Args: ylims (len-2 list of floats): The limits for the axis. dy (float): Amount to increment by between the limits. yscale (str): Scale of the axis. Either `log` or `lin`. reverse (bool, optional): If True, reverse the axis tick marks. Default is False.
def markAsDelivered(self, thread_id, message_id): data = { "message_ids[0]": message_id, "thread_ids[%s][0]" % thread_id: message_id, } r = self._post(self.req_url.DELIVERED, data) return r.ok
Mark a message as delivered :param thread_id: User/Group ID to which the message belongs. See :ref:`intro_threads` :param message_id: Message ID to set as delivered. See :ref:`intro_threads` :return: Whether the request was successful :raises: FBchatException if request failed
def Reset(self): self._displayed = 0 self._currentpagelines = 0 self._lastscroll = 1 self._lines_to_show = self._cli_lines
Reset the pager to the top of the text.
def reverse(self, query, lang='en', exactly_one=True, timeout=DEFAULT_SENTINEL): lang = lang.lower() params = { 'coords': self._coerce_point_to_string(query), 'lang': lang.lower(), 'key': self.api_key, } url = "?".join((self.reverse_api, urlencode(params))) logger.debug("%s.reverse: %s", self.__class__.__name__, url) return self._parse_reverse_json( self._call_geocoder(url, timeout=timeout), exactly_one=exactly_one )
Return a `3 words` address by location point. Each point on surface has a `3 words` address, so there's always a non-empty response. :param query: The coordinates for which you wish to obtain the 3 word address. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param str lang: two character language codes as supported by the API (https://docs.what3words.com/api/v2/#lang). :param bool exactly_one: Return one result or a list of results, if available. Due to the address scheme there is always exactly one result for each `3 words` address, so this parameter is rather useless for this geocoder. .. versionchanged:: 1.14.0 ``exactly_one=False`` now returns a list of a single location. This option wasn't respected before. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.
def set_status(self, value): if not self._status == value: old = self._status self._status = value logger.info("{} changing status from {} to {}".format(self, old.name, value.name)) self._statusChanged(old, value)
Set the status of the motor to the specified value if not already set.
def is_missing_variable( value_node: ValueNode, variables: Dict[str, Any] = None ) -> bool: return isinstance(value_node, VariableNode) and ( not variables or is_invalid(variables.get(value_node.name.value, INVALID)) )
Check if `value_node` is a variable not defined in the `variables` dict.
def almostequal(first, second, places=7, printit=True): if first == second: return True if round(abs(second - first), places) != 0: if printit: print(round(abs(second - first), places)) print("notalmost: %s != %s to %i places" % (first, second, places)) return False else: return True
Test if two values are equal to a given number of places. This is based on python's unittest so may be covered by Python's license.
def parse(self, extent, length, fp, log_block_size): if self._initialized: raise pycdlibexception.PyCdlibInternalError('Inode is already initialized') self.orig_extent_loc = extent self.data_length = length self.data_fp = fp self.manage_fp = False self.fp_offset = extent * log_block_size self.original_data_location = self.DATA_ON_ORIGINAL_ISO self._initialized = True
Parse an existing Inode. This just saves off the extent for later use. Parameters: extent - The original extent that the data lives at. Returns: Nothing.
def get_fragment(self, offset): fragment_len = 10 s = '%r' % (self.source[offset:offset + fragment_len]) if offset + fragment_len < len(self.source): s += '...' return s
Get the part of the source which is causing a problem.
def handle_reboot(self): self.services.stop_all() try: yield finally: self.wait_for_boot_completion() if self.is_rootable: self.root_adb() self.services.start_all()
Properly manage the service life cycle when the device needs to temporarily disconnect. The device can temporarily lose adb connection due to user-triggered reboot. Use this function to make sure the services started by Mobly are properly stopped and restored afterwards. For sample usage, see self.reboot().
def label(self): for c in self.table.columns: if c.parent == self.name and 'label' in c.valuetype: return PartitionColumn(c, self._partition)
Return first child that of the column that is marked as a label
def add_time(data): payload = data['data'] updated = data['updated'].date() if updated == date.today(): payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S') elif updated >= (date.today() - timedelta(days=1)): payload['last_updated'] = 'yesterday' elif updated >= (date.today() - timedelta(days=7)): payload['last_updated'] = updated.strftime('on %A') else: payload['last_updated'] = updated.strftime('%Y-%m-%d') return payload
And a friendly update time to the supplied data. Arguments: data (:py:class:`dict`): The response data and its update time. Returns: :py:class:`dict`: The data with a friendly update time.
def calc_fwhm(img, region, fexpand=3, axis=0): xpregion = expand_region(region, fexpand, fexpand) cslit = img[xpregion] pslit = cslit.mean(axis=axis) x2 = len(pslit) y1, y2 = pslit[0], pslit[-1] mslope = (y2-y1) / x2 backstim = mslope*numpy.arange(x2) + y1 qslit = pslit-backstim pidx = numpy.argmax(qslit) peak, fwhm = fmod.compute_fwhm_1d_simple(qslit, pidx) return fwhm
Compute the FWHM in the direction given by axis
def inspect(self, **kwargs): what = kwargs.pop("what", "hist") if what == "hist": with self.open_hist() as hist: return hist.plot(**kwargs) if hist else None elif what == "scf": relaxation = abiinspect.Relaxation.from_file(self.output_file.path) if "title" not in kwargs: kwargs["title"] = str(self) return relaxation.plot(**kwargs) if relaxation is not None else None else: raise ValueError("Wrong value for what %s" % what)
Plot the evolution of the structural relaxation with matplotlib. Args: what: Either "hist" or "scf". The first option (default) extracts data from the HIST file and plot the evolution of the structural parameters, forces, pressures and energies. The second option, extracts data from the main output file and plot the evolution of the SCF cycles (etotal, residuals, etc). Returns: `matplotlib` figure, None if some error occurred.
def freeze(self): remote_path = os.path.join(self.venv, 'requirements.txt') run('{} freeze > {}'.format(self.pip(), remote_path)) get(remote_path, self.requirements)
Use pip to freeze the requirements and save them to the local requirements.txt file.
def dump_json(obj): return simplejson.dumps(obj, ignore_nan=True, default=json_util.default)
Dump Python object as JSON string.
def exists(self, file_path, check_link=False): if check_link and self.islink(file_path): return True file_path = make_string_path(file_path) if file_path is None: raise TypeError if not file_path: return False if file_path == self.dev_null.name: return not self.is_windows_fs try: if self.is_filepath_ending_with_separator(file_path): return False file_path = self.resolve_path(file_path) except (IOError, OSError): return False if file_path == self.root.name: return True path_components = self._path_components(file_path) current_dir = self.root for component in path_components: current_dir = self._directory_content(current_dir, component)[1] if not current_dir: return False return True
Return true if a path points to an existing file system object. Args: file_path: The path to examine. Returns: (bool) True if the corresponding object exists. Raises: TypeError: if file_path is None.
def upload_profiler_report(url, filename, config): try: logger.debug("Uploading profiler report to IOpipe") with open(filename, "rb") as data: response = requests.put(url, data=data, timeout=config["network_timeout"]) response.raise_for_status() except Exception as e: logger.debug("Error while uploading profiler report: %s", e) if hasattr(e, "response"): logger.debug(e.response.content) else: logger.debug("Profiler report uploaded successfully") finally: if os.path.isfile(filename): os.remove(filename)
Uploads a profiler report to IOpipe :param url: The signed URL :param filename: The profiler report file :param config: The IOpipe config
def make_project(self, executable, target): command = 'make -f ../Makefile -C {0} {1}'.format(SRC_PATH, target) pipe = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT, env=CHILD_ENV) output = pipe.communicate()[0] if pipe.returncode != 0: raise MakeFailed(output) if not os.path.isfile(os.path.join(SRC_PATH, executable)): raise NonexistentExecutable(output) return output
Build the project and verify the executable exists.
def _merge_default_values(self): values = self._get_default_values() for key, value in values.items(): if not self.data.get(key): self.data[key] = value
Merge default values with resource data.
def _check_fields(self, x, y): if x is None: if self.x is None: self.err( self._check_fields, "X field is not set: please specify a parameter") return x = self.x if y is None: if self.y is None: self.err( self._check_fields, "Y field is not set: please specify a parameter") return y = self.y return x, y
Check x and y fields parameters and initialize
async def setup(self): try: db = await self.db collections = await db.list_collection_names() created = False if self.table_name not in collections: logger.info("Creating MongoDB collection [{}]".format(self.table_name)) await db.create_collection(self.table_name) await db[self.table_name].create_index([("target_id", DESCENDING), ("post_id", DESCENDING)]) created = True if self.control_table_name and self.control_table_name not in collections: logger.info("Creating MongoDB control data collection [{}]".format(self.control_table_name)) await db.create_collection(self.control_table_name) created = True return created except Exception as exc: logger.error("[DB] Error when setting up MongoDB collections: {}".format(exc)) return False
Setting up MongoDB collections, if they not exist.
def priv(x): if x.startswith(u'172.'): return 16 <= int(x.split(u'.')[1]) < 32 return x.startswith((u'192.168.', u'10.', u'172.'))
Quick and dirty method to find an IP on a private network given a correctly formatted IPv4 quad.
def find_column(t): pos = t.lexer.lexpos data = t.lexer.lexdata last_cr = data.rfind('\n', 0, pos) if last_cr < 0: last_cr = -1 column = pos - last_cr return column
Get cursor position, based on previous newline
def get_sequence_rule_enablers_by_search(self, sequence_rule_enabler_query, sequence_rule_enabler_search): if not self._can('search'): raise PermissionDenied() return self._provider_session.get_sequence_rule_enablers_by_search(sequence_rule_enabler_query, sequence_rule_enabler_search)
Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search
def validate_tag(self, key, value): if key == 'owner': return validate_email(value, self.partial_owner_match) elif key == self.gdpr_tag: return value in self.gdpr_tag_values else: return True
Check whether a tag value is valid Args: key: A tag key value: A tag value Returns: `(True or False)` A boolean indicating whether or not the value is valid
def request(self, host, handler, request_body, verbose): self.verbose = verbose url = 'http://' + host + handler request = urllib2.Request(url) request.add_data(request_body) request.add_header('User-Agent', self.user_agent) request.add_header('Content-Type', 'text/xml') proxy_handler = urllib2.ProxyHandler() opener = urllib2.build_opener(proxy_handler) fhandle = opener.open(request) return(self.parse_response(fhandle))
Send xml-rpc request using proxy
def as_dict(self, voigt=False): input_array = self.voigt if voigt else self d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "input_array": input_array.tolist()} if voigt: d.update({"voigt": voigt}) return d
Serializes the tensor object Args: voigt (bool): flag for whether to store entries in voigt-notation. Defaults to false, as information may be lost in conversion. Returns (Dict): serialized format tensor object
def get_window_at_mouse(self): window_ret = ctypes.c_ulong(0) _libxdo.xdo_get_window_at_mouse(self._xdo, ctypes.byref(window_ret)) return window_ret.value
Get the window the mouse is currently over
def ReportLength(cls, header): parsed_header = cls._parse_header(header) auth_size = cls._AUTH_BLOCK_LENGTHS.get(parsed_header.auth_type) if auth_size is None: raise DataError("Unknown auth block size in BroadcastReport") return cls._HEADER_LENGTH + parsed_header.reading_length + auth_size
Given a header of HeaderLength bytes, calculate the size of this report. Returns: int: The total length of the report including the header that we are passed.
def from_gpx(gpx_segment): points = [] for point in gpx_segment.points: points.append(Point.from_gpx(point)) return Segment(points)
Creates a segment from a GPX format. No preprocessing is done. Arguments: gpx_segment (:obj:`gpxpy.GPXTrackSegment`) Return: :obj:`Segment`
def add_bgp_error_metadata(code, sub_code, def_desc='unknown'): if _EXCEPTION_REGISTRY.get((code, sub_code)) is not None: raise ValueError('BGPSException with code %d and sub-code %d ' 'already defined.' % (code, sub_code)) def decorator(subclass): if issubclass(subclass, BGPSException): _EXCEPTION_REGISTRY[(code, sub_code)] = subclass subclass.CODE = code subclass.SUB_CODE = sub_code subclass.DEF_DESC = def_desc return subclass return decorator
Decorator for all exceptions that want to set exception class meta-data.
def map_metabolite2kegg(metabolite): logger.debug("Looking for KEGG compound identifier for %s.", metabolite.id) kegg_annotation = metabolite.annotation.get("kegg.compound") if kegg_annotation is None: logger.warning("No kegg.compound annotation for metabolite %s.", metabolite.id) return if isinstance(kegg_annotation, string_types) and \ kegg_annotation.startswith("C"): return kegg_annotation elif isinstance(kegg_annotation, Iterable): try: return get_smallest_compound_id(kegg_annotation) except ValueError: return logger.warning( "No matching kegg.compound annotation for metabolite %s.", metabolite.id ) return
Return a KEGG compound identifier for the metabolite if it exists. First see if there is an unambiguous mapping to a single KEGG compound ID provided with the model. If not, check if there is any KEGG compound ID in a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG compound IDs are sorted so we keep the lowest that is there. If none of this works try mapping to KEGG via the CompoundMatcher by the name of the metabolite. If the metabolite cannot be mapped at all we simply map it back to its own ID. Parameters ---------- metabolite : cobra.Metabolite The metabolite to be mapped to its KEGG compound identifier. Returns ------- None If the metabolite could not be mapped. str The smallest KEGG compound identifier that was found.
def uncheck_all_local(self): for buttons in self.event['local'].values(): if not buttons[0].get_value(): self.event['global']['all_local'].setChecked(False) if buttons[1].isEnabled() and not buttons[1].get_value(): self.event['global']['all_local_prep'].setChecked(False)
Uncheck 'all local' box when a local event is unchecked.
def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False): egy = psf.energies x = make_pixel_distance(npix, xpix, ypix) x *= cdelt k = np.zeros((len(egy), npix, npix)) for i in range(len(egy)): k[i] = psf.eval(i, x, scale_fn=psf_scale_fn) if normalize: k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2) return k
Generate a kernel for a point-source. Parameters ---------- psf : `~fermipy.irfs.PSFModel` npix : int Number of pixels in X and Y dimensions. cdelt : float Pixel size in degrees.
def get(self, blc=(), trc=(), inc=()): return nma.masked_array(self.getdata(blc, trc, inc), self.getmask(blc, trc, inc))
Get image data and mask. Get the image data and mask (see ::func:`getdata` and :func:`getmask`) as a numpy masked array.
def get_credentials(self, **kwargs): login = ( kwargs.get("user") or os.environ.get("POLARION_USERNAME") or self.config.get("username") ) pwd = ( kwargs.get("password") or os.environ.get("POLARION_PASSWORD") or self.config.get("password") ) if not all([login, pwd]): raise Dump2PolarionException("Failed to submit to Polarion - missing credentials") self.credentials = (login, pwd)
Sets credentails.
def get_child_vaults(self, vault_id): if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=vault_id) return VaultLookupSession( self._proxy, self._runtime).get_vaults_by_ids( list(self.get_child_vault_ids(vault_id)))
Gets the children of the given vault. arg: vault_id (osid.id.Id): the ``Id`` to query return: (osid.authorization.VaultList) - the children of the vault raise: NotFound - ``vault_id`` is not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def _create_keywords_wizard_action(self): icon = resources_path('img', 'icons', 'show-keyword-wizard.svg') self.action_keywords_wizard = QAction( QIcon(icon), self.tr('Keywords Creation Wizard'), self.iface.mainWindow()) self.action_keywords_wizard.setStatusTip(self.tr( 'Open InaSAFE keywords creation wizard')) self.action_keywords_wizard.setWhatsThis(self.tr( 'Open InaSAFE keywords creation wizard')) self.action_keywords_wizard.setEnabled(False) self.action_keywords_wizard.triggered.connect( self.show_keywords_wizard) self.add_action(self.action_keywords_wizard, add_to_legend=True)
Create action for keywords creation wizard.
def _init_view(self): views_engine = get_config('rails.views.engine', 'jinja') templates_dir = os.path.join(self._project_dir, "views", "templates") self._view = View(views_engine, templates_dir)
Initialize View with project settings.
def __get_all_lowpoints(dfs_data): lowpoint_1_lookup = {} lowpoint_2_lookup = {} ordering = dfs_data['ordering'] for node in ordering: low_1, low_2 = __get_lowpoints(node, dfs_data) lowpoint_1_lookup[node] = low_1 lowpoint_2_lookup[node] = low_2 return lowpoint_1_lookup, lowpoint_2_lookup
Calculates the lowpoints for each node in a graph.
def contains(self, location): for i, coord in enumerate(location): if self.almostEqual(abs(coord), self.dimensions[i] / 2.): return True return False
A location is on the box if one of the dimension is "satured").
def create_result(self, env_name, other_val, meta, val, dividers): args = [env_name] if other_val is NotSpecified: other_val = None if not dividers: args.extend([None, None]) elif dividers[0] == ':': args.extend([other_val, None]) elif dividers[0] == '=': args.extend([None, other_val]) return Environment(*args)
Set default_val and set_val depending on the seperator
def write(self, data, size=-1): try: data = self._ffi.from_buffer(data) except TypeError: pass if size < 0: size, rest = divmod(self._ffi.sizeof(data), self.elementsize) if rest: raise ValueError('data size must be multiple of elementsize') return self._lib.PaUtil_WriteRingBuffer(self._ptr, data, size)
Write data to the ring buffer. This advances the write index after writing; calling :meth:`advance_write_index` is *not* necessary. :param data: Data to write to the buffer. :type data: CData pointer or buffer or bytes :param size: The number of elements to be written. :type size: int, optional :returns: The number of elements written. :rtype: int
def stop(self): BufferedReader.stop(self) self._stop_running_event.set() self._writer_thread.join() BaseIOHandler.stop(self)
Stops the reader an writes all remaining messages to the database. Thus, this might take a while and block.
def calculate_matrices(states, Omega=1): r iso = states[0].isotope element = states[0].element for state in states[1:]: if state.element != element: raise ValueError('All states must belong to the same element.') if state.isotope != iso: raise ValueError('All states must belong to the same isotope.') fine_states = find_fine_states(states) full_magnetic_states = make_list_of_states(fine_states, 'magnetic', verbose=0) omega_full = calculate_omega_matrix(full_magnetic_states, Omega) gamma_full = calculate_gamma_matrix(full_magnetic_states, Omega) reduced_matrix_elements = calculate_reduced_matrix_elements(fine_states) r_full = calculate_r_matrices(fine_states, reduced_matrix_elements) omega = omega_full r = r_full gamma = gamma_full return omega, gamma, r
r"""Calculate the matrices omega_ij, gamma_ij, r_pij. This function calculates the matrices omega_ij, gamma_ij and r_pij given a list of atomic states. The states can be arbitrarily in their fine, hyperfine or magnetic detail.
def unshare(flags): res = lib.unshare(flags) if res != 0: _check_error(ffi.errno)
Disassociate parts of the process execution context. :param flags int: A bitmask that specifies which parts of the execution context should be unshared.
def number_of_trajectories(self, stride=None): r if not IteratorState.is_uniform_stride(stride): n = len(np.unique(stride[:, 0])) else: n = self.ntraj return n
r""" Returns the number of trajectories. Parameters ---------- stride: None (default) or np.ndarray Returns ------- int : number of trajectories
def get_stop_words(self, language, fail_safe=False): try: language = self.language_codes[language] except KeyError: pass collection = self.LOADED_LANGUAGES_CACHE.get(language) if collection is None: try: collection = self._get_stop_words(language) self.LOADED_LANGUAGES_CACHE[language] = collection except StopWordError as error: if not fail_safe: raise error collection = [] stop_words = StopWord(language, collection) return stop_words
Returns a StopWord object initialized with the stop words collection requested by ``language``. If the requested language is not available a StopWordError is raised. If ``fail_safe`` is set to True, an empty StopWord object is returned.
def construct(generator, subtopic): type = subtopic[3] if type not in Item.constructors: raise LookupError(type) return Item.constructors[type](generator, subtopic)
Method constructor of Item-derived classes. Given a subtopic tuple, this method attempts to construct an Item-derived class, currently either ItemText or ItemImage, from the subtopic's type, found in its 4th element. :param generator: Reference to the owning ReportGenerator instance :param subtopic: Tuple containing content_id, meta_url, subtopic_id, type and type-specific data. :returns An instantiated Item-derived class.
def get_perms(self, username): account = self.get_account(username) return account and account.get_perms()
return user permissions