code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def setDashboardOverlaySceneProcess(self, ulOverlayHandle, unProcessId): fn = self.function_table.setDashboardOverlaySceneProcess result = fn(ulOverlayHandle, unProcessId) return result
Sets the dashboard overlay to only appear when the specified process ID has scene focus
def delete_host_template(self, name): return host_templates.delete_host_template(self._get_resource_root(), name, self.name)
Deletes a host template. @param name: Name of the host template to delete. @return: An ApiHostTemplate object.
def freeze(self): self.target.disable() self.filter.configure(state='disable') self.prog_ob.configure(state='disable') self.pi.configure(state='disable') self.observers.configure(state='disable') self.comment.configure(state='disable')
Freeze all settings so that they can't be altered
def dump(self): self.print("Dumping data to {}".format(self.dump_filename)) pickle.dump({ 'data': self.counts, 'livetime': self.get_livetime() }, open(self.dump_filename, "wb"))
Write coincidence counts into a Python pickle
def create_statement( self, session_id: int, code: str, kind: StatementKind = None ) -> Statement: data = {"code": code} if kind is not None: if self.legacy_server(): LOGGER.warning("statement kind ignored on Livy<0.5.0") data["kind"] = kind.value response = self._client.post( f"/sessions/{session_id}/statements", data=data ) return Statement.from_json(session_id, response)
Run a statement in a session. :param session_id: The ID of the session. :param code: The code to execute. :param kind: The kind of code to execute.
def resolve_class(classref): if classref is None: return None elif isinstance(classref, six.class_types): return classref elif isinstance(classref, six.string_types): return import_class(classref) else: raise ValueError("Unable to resolve class for '%s'" % classref)
Attempt to return a Python class for the input class reference. If `classref` is a class or None, return it. If `classref` is a python classpath (e.g., "foo.bar.MyClass") import the class and return it. Args: classref: A fully-qualified Python path to class, or a Python class. Returns: A class.
def _start_nodes_parallel(self, nodes, max_thread_pool_size): thread_pool_size = min(len(nodes), max_thread_pool_size) thread_pool = Pool(processes=thread_pool_size) log.debug("Created pool of %d threads", thread_pool_size) keep_running = True def sigint_handler(signal, frame): log.error( "Interrupted: will save cluster state and exit" " after all nodes have started.") keep_running = False with sighandler(signal.SIGINT, sigint_handler): result = thread_pool.map_async(self._start_node, nodes) while not result.ready(): result.wait(1) if not keep_running: log.error("Aborting upon user interruption ...") thread_pool.close() thread_pool.join() self.repository.save_or_update(self) sys.exit(1) return set(node for node, ok in itertools.izip(nodes, result.get()) if ok)
Start the nodes using a pool of multiprocessing threads for speed-up. Return set of nodes that were actually started.
def _is_broken_ref(key1, value1, key2, value2): if key1 != 'Link' or key2 != 'Str': return False n = 0 if _PANDOCVERSION < '1.16' else 1 if isinstance(value1[n][0]['c'], list): return False s = value1[n][0]['c'] + value2 return True if _REF.match(s) else False
True if this is a broken reference; False otherwise.
def spelling(self): if not self.is_tagged(WORDS): self.tokenize_words() return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts, suggestions=False)]
Flag incorrectly spelled words. Returns a list of booleans, where element at each position denotes, if the word at the same position is spelled correctly.
def query(self, q, format="", convert=True): lines = ["PREFIX %s: <%s>" % (k, r) for k, r in self.prefixes.iteritems()] lines.extend(q.split("\n")) query = "\n".join(lines) if self.verbose: print(query, "\n\n") return self.__doQuery(query, format, convert)
Generic SELECT query structure. 'q' is the main body of the query. The results passed out are not converted yet: see the 'format' method Results could be iterated using the idiom: for l in obj : do_something_with_line(l) If convert is False, we return the collection of rdflib instances
def do(self, func, *args, **kwargs): if not callable(func): func = getattr(self.engine.function, func) func(self, *args, **kwargs) return self
Apply the function to myself, and return myself. Look up the function in the database if needed. Pass it any arguments given, keyword or positional. Useful chiefly when chaining.
def rect(self): if self._w3c: return self._execute(Command.GET_ELEMENT_RECT)['value'] else: rect = self.size.copy() rect.update(self.location) return rect
A dictionary with the size and location of the element.
def classes(self): p = lambda o: isinstance(o, Class) and self._docfilter(o) return sorted(filter(p, self.doc.values()))
Returns all documented module level classes in the module sorted alphabetically as a list of `pydoc.Class`.
def publish(self, exchange, routing_key, body, properties=None): future = concurrent.Future() properties = properties or {} properties.setdefault('app_id', self.default_app_id) properties.setdefault('message_id', str(uuid.uuid4())) properties.setdefault('timestamp', int(time.time())) if self.ready: if self.publisher_confirmations: self.message_number += 1 self.messages[self.message_number] = future else: future.set_result(None) try: self.channel.basic_publish( exchange, routing_key, body, pika.BasicProperties(**properties), True) except exceptions.AMQPError as error: future.set_exception( PublishingFailure( properties['message_id'], exchange, routing_key, error.__class__.__name__)) else: future.set_exception(NotReadyError( self.state_description, properties['message_id'])) return future
Publish a message to RabbitMQ. If the RabbitMQ connection is not established or is blocked, attempt to wait until sending is possible. :param str exchange: The exchange to publish the message to. :param str routing_key: The routing key to publish the message with. :param bytes body: The message body to send. :param dict properties: An optional dict of additional properties to append. :rtype: tornado.concurrent.Future :raises: :exc:`sprockets.mixins.amqp.NotReadyError` :raises: :exc:`sprockets.mixins.amqp.PublishingError`
def _latex_format(obj: Any) -> str: if isinstance(obj, float): try: return sympy.latex(symbolize(obj)) except ValueError: return "{0:.4g}".format(obj) return str(obj)
Format an object as a latex string.
def get_fields(model_class, field_name='', path=''): fields = get_direct_fields_from_model(model_class) app_label = model_class._meta.app_label if field_name != '': field, model, direct, m2m = _get_field_by_name(model_class, field_name) path += field_name path += '__' if direct: try: new_model = _get_remote_field(field).parent_model except AttributeError: new_model = _get_remote_field(field).model else: new_model = field.related_model fields = get_direct_fields_from_model(new_model) app_label = new_model._meta.app_label return { 'fields': fields, 'path': path, 'app_label': app_label, }
Get fields and meta data from a model :param model_class: A django model class :param field_name: The field name to get sub fields from :param path: path of our field in format field_name__second_field_name__ect__ :returns: Returns fields and meta data about such fields fields: Django model fields properties: Any properties the model has path: Our new path :rtype: dict
def get_manifest_list(image, registry, insecure=False, dockercfg_path=None): version = 'v2_list' registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path) response, _ = get_manifest(image, registry_session, version) return response
Return manifest list for image. :param image: ImageName, the remote image to inspect :param registry: str, URI for registry, if URI schema is not provided, https:// will be used :param insecure: bool, when True registry's cert is not verified :param dockercfg_path: str, dirname of .dockercfg location :return: response, or None, with manifest list
def __normalize_name(self, name): if not name.startswith(self.__root_namespace): name = foundations.namespace.set_namespace(self.__root_namespace, foundations.namespace.set_namespace(self.__default_namespace, name)) LOGGER.debug("> Normalized name: '{0}'.".format(name)) return name else: LOGGER.debug("> Name '{0}' is already normalized!".format(name)) return name
Normalizes given action name. :param name: Action name. :type name: unicode :return: Normalized name. :rtype: bool
def get_context_data(self, **kwargs): self.request.session.set_test_cookie() if not self.request.session.test_cookie_worked(): messages.add_message( self.request, messages.ERROR, "Please enable cookies.") self.request.session.delete_test_cookie() return super().get_context_data(**kwargs)
Tests cookies.
def do_quit(self, args): self._interp.set_break(self._interp.BREAK_NONE) return True
The quit command
def role_get(user): user_roles = [] with salt.utils.files.fopen('/etc/user_attr', 'r') as user_attr: for role in user_attr: role = salt.utils.stringutils.to_unicode(role) role = role.strip().strip().split(':') if len(role) != 5: continue if role[0] != user: continue attrs = {} for attr in role[4].strip().split(';'): attr_key, attr_val = attr.strip().split('=') if attr_key in ['auths', 'profiles', 'roles']: attrs[attr_key] = attr_val.strip().split(',') else: attrs[attr_key] = attr_val if 'roles' in attrs: user_roles.extend(attrs['roles']) return list(set(user_roles))
List roles for user user : string username CLI Example: .. code-block:: bash salt '*' rbac.role_get leo
def encoded_query(self): if self.query is not None and self.query != '' and self.query != {}: try: return urlencode(self.query, doseq=True, quote_via=urlquote) except TypeError: return '&'.join(["{0}={1}".format(urlquote(k), urlquote(self.query[k][0])) for k in self.query]) else: return ''
Returns the encoded query string of the URL. This may be different from the rawquery element, as that contains the query parsed by urllib but unmodified. The return value takes the form of key=value&key=value, and it never contains a leading question mark.
def getenv(key, value=None): key = path2fsn(key) if is_win and PY2: return environ.get(key, value) return os.getenv(key, value)
Like `os.getenv` but returns unicode under Windows + Python 2 Args: key (pathlike): The env var to get value (object): The value to return if the env var does not exist Returns: `fsnative` or `object`: The env var or the passed value if it doesn't exist
def begin(self): if not hasattr(self.local, 'tx'): self.local.tx = [] self.local.tx.append(self.executable.begin())
Enter a transaction explicitly. No data will be written until the transaction has been committed.
def _subscribe_resp(self, data): if _is_subscribe_response(data): status = bytes([data[23]]) _LOGGER.debug("Successfully subscribed to %s, state: %s", self.host, ord(status)) return status
Handle a subscribe response. :param data: Payload. :returns: State (ON/OFF)
def reward_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) scope.update(self.next_state_scope(next_state)) return scope
Returns the complete reward fluent scope for the current `state`, `action` fluents, and `next_state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. next_state (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
def ecg_simulate(duration=10, sampling_rate=1000, bpm=60, noise=0.01): cardiac = scipy.signal.wavelets.daub(10) cardiac = np.concatenate([cardiac, np.zeros(10)]) num_heart_beats = int(duration * bpm / 60) ecg = np.tile(cardiac , num_heart_beats) noise = np.random.normal(0, noise, len(ecg)) ecg = noise + ecg ecg = scipy.signal.resample(ecg, sampling_rate*duration) return(ecg)
Simulates an ECG signal. Parameters ---------- duration : int Desired recording length. sampling_rate : int Desired sampling rate. bpm : int Desired simulated heart rate. noise : float Desired noise level. Returns ---------- ECG_Response : dict Event-related ECG response features. Example ---------- >>> import neurokit as nk >>> import pandas as pd >>> >>> ecg = nk.ecg_simulate(duration=10, bpm=60, sampling_rate=1000, noise=0.01) >>> pd.Series(ecg).plot() Notes ---------- *Authors* - `Diarmaid O Cualain <https://github.com/diarmaidocualain>`_ - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - numpy - scipy.signal References -----------
def _file_lines(self, filename): try: return self._file_lines_cache[filename] except KeyError: if os.path.isfile(filename): with open(filename) as python_file: self._file_lines_cache[filename] = python_file.readlines() else: self._file_lines_cache[filename] = "" return self._file_lines_cache[filename]
Get lines for filename, caching opened files.
def get_orders(self, product_id=None, status=None, **kwargs): params = kwargs if product_id is not None: params['product_id'] = product_id if status is not None: params['status'] = status return self._send_paginated_message('/orders', params=params)
List your current open orders. This method returns a generator which may make multiple HTTP requests while iterating through it. Only open or un-settled orders are returned. As soon as an order is no longer open and settled, it will no longer appear in the default request. Orders which are no longer resting on the order book, will be marked with the 'done' status. There is a small window between an order being 'done' and 'settled'. An order is 'settled' when all of the fills have settled and the remaining holds (if any) have been removed. For high-volume trading it is strongly recommended that you maintain your own list of open orders and use one of the streaming market data feeds to keep it updated. You should poll the open orders endpoint once when you start trading to obtain the current state of any open orders. Args: product_id (Optional[str]): Only list orders for this product status (Optional[list/str]): Limit list of orders to this status or statuses. Passing 'all' returns orders of all statuses. ** Options: 'open', 'pending', 'active', 'done', 'settled' ** default: ['open', 'pending', 'active'] Returns: list: Containing information on orders. Example:: [ { "id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2", "price": "0.10000000", "size": "0.01000000", "product_id": "BTC-USD", "side": "buy", "stp": "dc", "type": "limit", "time_in_force": "GTC", "post_only": false, "created_at": "2016-12-08T20:02:28.53864Z", "fill_fees": "0.0000000000000000", "filled_size": "0.00000000", "executed_value": "0.0000000000000000", "status": "open", "settled": false }, { ... } ]
def makeSoftwareVersion(store, version, systemVersion): return store.findOrCreate(SoftwareVersion, systemVersion=systemVersion, package=unicode(version.package), version=unicode(version.short()), major=version.major, minor=version.minor, micro=version.micro)
Return the SoftwareVersion object from store corresponding to the version object, creating it if it doesn't already exist.
def _run_detection(self): if self.verbose: print('Running QRS detection...') self.qrs_inds = [] self.backsearch_qrs_inds = [] for self.peak_num in range(self.n_peaks_i): if self._is_qrs(self.peak_num): self._update_qrs(self.peak_num) else: self._update_noise(self.peak_num) if self._require_backsearch(): self._backsearch() if self.qrs_inds: self.qrs_inds = np.array(self.qrs_inds) + self.sampfrom else: self.qrs_inds = np.array(self.qrs_inds) if self.verbose: print('QRS detection complete.')
Run the qrs detection after all signals and parameters have been configured and set.
def getelm(frstyr, lineln, lines): frstyr = ctypes.c_int(frstyr) lineln = ctypes.c_int(lineln) lines = stypes.listToCharArrayPtr(lines, xLen=lineln, yLen=2) epoch = ctypes.c_double() elems = stypes.emptyDoubleVector(10) libspice.getelm_c(frstyr, lineln, lines, ctypes.byref(epoch), elems) return epoch.value, stypes.cVectorToPython(elems)
Given a the "lines" of a two-line element set, parse the lines and return the elements in units suitable for use in SPICE software. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/getelm_c.html :param frstyr: Year of earliest representable two-line elements. :type frstyr: int :param lineln: Length of strings in lines array. :type lineln: int :param lines: A pair of "lines" containing two-line elements. :type lines: list of str :return: The epoch of the elements in seconds past J2000, The elements converted to SPICE units. :rtype: tuple
def _resize_to_minimum(worksheet, rows=None, cols=None): current_cols, current_rows = ( worksheet.col_count, worksheet.row_count ) if rows is not None and rows <= current_rows: rows = None if cols is not None and cols <= current_cols: cols = None if cols is not None or rows is not None: worksheet.resize(rows, cols)
Resize the worksheet to guarantee a minimum size, either in rows, or columns, or both. Both rows and cols are optional.
def list(self): mylist = [] for prod in self.product_list: if self.purchasable(prod) and not self.entitled(prod): mylist.append(prod) return mylist
return list of purchasable and not entitled products
def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[str, Any]={}): for area in subscriptions: init_full(self, area, subscriptions[area]) subscriptions[area] = {'slots': subscriptions[area]} if clock_name is not None: self.clock_name = clock_name self.clock_slots = clock_slots subscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1} self.setup(puller=True, subscriptions=subscriptions)
Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run. Args: clock_name: The name of the Area that is used as synchronizing Clock. clock_slots: The slots of the Clock relevant to this Area. subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values.
def get_std_dev_area(self, mag, rake): assert rake is None or -180 <= rake <= 180 if rake is None: return 0.24 elif (-45 <= rake <= 45) or (rake >= 135) or (rake <= -135): return 0.22 elif rake > 0: return 0.26 else: return 0.22
Standard deviation for WC1994. Magnitude is ignored.
def step(self, t, x_im1, v_im1_2, dt): x_i = x_im1 + v_im1_2 * dt F_i = self.F(t, np.vstack((x_i, v_im1_2)), *self._func_args) a_i = F_i[self.ndim:] v_i = v_im1_2 + a_i * dt / 2 v_ip1_2 = v_i + a_i * dt / 2 return x_i, v_i, v_ip1_2
Step forward the positions and velocities by the given timestep. Parameters ---------- dt : numeric The timestep to move forward.
def iter_grants(self, as_json=True): self._connect() result = self.db_connection.cursor().execute( "SELECT data, format FROM grants" ) for data, data_format in result: if (not as_json) and data_format == 'json': raise Exception("Cannot convert JSON source to XML output.") elif as_json and data_format == 'xml': data = self.grantxml2json(data) elif as_json and data_format == 'json': data = json.loads(data) yield data self._disconnect()
Fetch records from the SQLite database.
def convert_args(self, command, args): for wanted, arg in zip(command.argtypes(), args): wanted = wanted.type_ if(wanted == "const"): try: yield to_int(arg) except: if(arg in self.processor.constants): yield self.processor.constants[arg] else: yield arg if(wanted == "register"): yield self.register_indices[arg]
Converts ``str -> int`` or ``register -> int``.
def wait_for_completion(report, interval=10): for jobid in report.collect('jobid'): try: if not Job.finished(jobid): logging.info('waiting for SLURM job %s', jobid) time.sleep(interval) while not Job.finished(jobid): time.sleep(interval) yield Job.fromid(jobid)._asdict() except OSError as e: if e.errno == errno.ENOENT: yield dict(id=str(jobid)) else: raise e
Wait for asynchronous jobs stil running in the given campaign. :param report: memory representation of a campaign report :type campaign: ReportNode :param interval: wait interval :type interval: int or float :return: list of asynchronous job identifiers
def emit_event(self, event_name, event_body): for transport in self.event_transports: transport.emit_event(event_name, event_body)
Publishes an event of type ``event_name`` to all subscribers, having the body ``event_body``. The event is pushed through all available event transports. The event body must be a Python object that can be represented as a JSON. :param event_name: a ``str`` representing the event type :param event_body: a Python object that can be represented as JSON. .. versionadded:: 0.5.0 .. versionchanged:: 0.10.0 Added parameter broadcast
def read_path(source, path, separator='/'): parts = path.strip(separator).split(separator) current = source for part in parts[:-1]: if part not in current: return current = current[part] if not isinstance(current, dict): return return current.get(parts[-1])
Read a value from a dict supporting a deep path as a key. :param source: a dict to read data from :param path: a key or path to a key (path is delimited by `separator`) :keyword separator: the separator used in the path (ex. Could be "." for a json/mongodb type of value)
def get_by_id(self, institution_id, _options=None): options = _options or {} return self.client.post_public_key('/institutions/get_by_id', { 'institution_id': institution_id, 'options': options, })
Fetch a single institution by id. :param str institution_id:
def paranoidconfig(**kwargs): def _decorator(func): for k,v in kwargs.items(): Settings._set(k, v, function=func) return _wrap(func) return _decorator
A function decorator to set a local setting. Settings may be set either globally (using settings.Settings.set()) or locally using this decorator. The setting name should be passed as a keyword argument, and the value to assign the setting should be passed as the value. See settings.Settings for the different settings which can be set. Example usage: | @returns(Number) | @paranoidconfig(enabled=False) | def slow_function(): | ...
def reset(self): self.terms = OrderedDict() self.y = None self.backend = None self.added_terms = [] self._added_priors = {} self.completes = [] self.clean_data = None
Reset list of terms and y-variable.
def wait_all_tasks_done(self, timeout=None, delay=0.5, interval=0.1): timeout = self._timeout if timeout is None else timeout timeout = timeout or float("inf") start_time = time.time() time.sleep(delay) while 1: if not self.todo_tasks: return self.all_tasks if time.time() - start_time > timeout: return self.done_tasks time.sleep(interval)
Block, only be used while loop running in a single non-main thread.
def CreateSmartShoppingAd(client, ad_group_id): ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809') adgroup_ad = { 'adGroupId': ad_group_id, 'ad': { 'xsi_type': 'GoalOptimizedShoppingAd' } } ad_operation = { 'operator': 'ADD', 'operand': adgroup_ad } ad_result = ad_group_ad_service.mutate([ad_operation]) for adgroup_ad in ad_result['value']: print 'Smart Shopping ad with ID "%s" was added.' % adgroup_ad['ad']['id']
Adds a new Smart Shopping ad. Args: client: an AdWordsClient instance. ad_group_id: an integer ID for an ad group.
def file_view(self, request, field_entry_id): model = self.fieldentry_model field_entry = get_object_or_404(model, id=field_entry_id) path = join(fs.location, field_entry.value) response = HttpResponse(content_type=guess_type(path)[0]) f = open(path, "r+b") response["Content-Disposition"] = "attachment; filename=%s" % f.name response.write(f.read()) f.close() return response
Output the file for the requested field entry.
def fail(self, group, message): logger.warn('Failing %s (%s): %s', self.jid, group, message) return self.client('fail', self.jid, self.client.worker_name, group, message, json.dumps(self.data)) or False
Mark the particular job as failed, with the provided type, and a more specific message. By `type`, we mean some phrase that might be one of several categorical modes of failure. The `message` is something more job-specific, like perhaps a traceback. This method should __not__ be used to note that a job has been dropped or has failed in a transient way. This method __should__ be used to note that a job has something really wrong with it that must be remedied. The motivation behind the `type` is so that similar errors can be grouped together. Optionally, updated data can be provided for the job. A job in any state can be marked as failed. If it has been given to a worker as a job, then its subsequent requests to heartbeat or complete that job will fail. Failed jobs are kept until they are canceled or completed. __Returns__ the id of the failed job if successful, or `False` on failure.
def from_cwl(cls, data, __reference__=None): class_name = data.get('class', None) cls = cls.registry.get(class_name, cls) if __reference__: with with_reference(__reference__): self = cls( **{k: v for k, v in iteritems(data) if k != 'class'} ) else: self = cls(**{k: v for k, v in iteritems(data) if k != 'class'}) return self
Return an instance from CWL data.
def _check_all_devices_in_sync(self): if len(self._get_devices_by_failover_status('In Sync')) != \ len(self.devices): msg = "Expected all devices in group to have 'In Sync' status." raise UnexpectedDeviceGroupState(msg)
Wait until all devices have failover status of 'In Sync'. :raises: UnexpectedClusterState
def get(number, locale): if locale == 'pt_BR': locale = 'xbr' if len(locale) > 3: locale = locale.split("_")[0] rule = PluralizationRules._rules.get(locale, lambda _: 0) _return = rule(number) if not isinstance(_return, int) or _return < 0: return 0 return _return
Returns the plural position to use for the given locale and number. @type number: int @param number: The number @type locale: str @param locale: The locale @rtype: int @return: The plural position
def paginate_resources(cls, request, resources, on_fail_status): if not resources: return (resources, client_list_control_pb2.ClientPagingResponse()) paging = request.paging limit = min(paging.limit, MAX_PAGE_SIZE) or DEFAULT_PAGE_SIZE try: if paging.start: start_index = cls.index_by_id(paging.start, resources) else: start_index = 0 if start_index < 0 or start_index >= len(resources): raise AssertionError except AssertionError: raise _ResponseFailed(on_fail_status) paged_resources = resources[start_index: start_index + limit] if start_index + limit < len(resources): paging_response = client_list_control_pb2.ClientPagingResponse( next=cls.id_by_index(start_index + limit, resources), start=cls.id_by_index(start_index, resources), limit=limit) else: paging_response = client_list_control_pb2.ClientPagingResponse( start=cls.id_by_index(start_index, resources), limit=limit) return paged_resources, paging_response
Truncates a list of resources based on ClientPagingControls Args: request (object): The parsed protobuf request object resources (list of objects): The resources to be paginated Returns: list: The paginated list of resources object: The ClientPagingResponse to be sent back to the client
def verify_hmac_sha1(request, client_secret=None, resource_owner_secret=None): norm_params = normalize_parameters(request.params) bs_uri = base_string_uri(request.uri) sig_base_str = signature_base_string(request.http_method, bs_uri, norm_params) signature = sign_hmac_sha1(sig_base_str, client_secret, resource_owner_secret) match = safe_string_equals(signature, request.signature) if not match: log.debug('Verify HMAC-SHA1 failed: signature base string: %s', sig_base_str) return match
Verify a HMAC-SHA1 signature. Per `section 3.4`_ of the spec. .. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4 To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri attribute MUST be an absolute URI whose netloc part identifies the origin server or gateway on which the resource resides. Any Host item of the request argument's headers dict attribute will be ignored. .. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
def register_payload(self, *payloads, flavour: ModuleType): for payload in payloads: self._logger.debug('registering payload %s (%s)', NameRepr(payload), NameRepr(flavour)) self.runners[flavour].register_payload(payload)
Queue one or more payload for execution after its runner is started
def add_repo(name, description=None, homepage=None, private=None, has_issues=None, has_wiki=None, has_downloads=None, auto_init=None, gitignore_template=None, license_template=None, profile="github"): try: client = _get_client(profile) organization = client.get_organization( _get_config_value(profile, 'org_name') ) given_params = { 'description': description, 'homepage': homepage, 'private': private, 'has_issues': has_issues, 'has_wiki': has_wiki, 'has_downloads': has_downloads, 'auto_init': auto_init, 'gitignore_template': gitignore_template, 'license_template': license_template } parameters = {'name': name} for param_name, param_value in six.iteritems(given_params): if param_value is not None: parameters[param_name] = param_value organization._requester.requestJsonAndCheck( "POST", organization.url + "/repos", input=parameters ) return True except github.GithubException: log.exception('Error creating a repo') return False
Create a new github repository. name The name of the team to be created. description The description of the repository. homepage The URL with more information about the repository. private The visiblity of the repository. Note that private repositories require a paid GitHub account. has_issues Whether to enable issues for this repository. has_wiki Whether to enable the wiki for this repository. has_downloads Whether to enable downloads for this repository. auto_init Whether to create an initial commit with an empty README. gitignore_template The desired language or platform for a .gitignore, e.g "Haskell". license_template The desired LICENSE template to apply, e.g "mit" or "mozilla". profile The name of the profile configuration to use. Defaults to ``github``. CLI Example: .. code-block:: bash salt myminion github.add_repo 'repo_name' .. versionadded:: 2016.11.0
def list(self, **kwargs): projects = Project.select().order_by(Project.name) if len(projects) == 0: self._print('No projects available', 'yellow') return for project in projects: project_repr = self._PROJECT_ITEM.format(project.name, project.path) row = '- {}'.format(self._PROJECT_ITEM.format(project.name, project.path)) six.print_(row)
displays all projects on database
def _fill_file_path(line, data): def _find_file(xs, target): if isinstance(xs, dict): for v in xs.values(): f = _find_file(v, target) if f: return f elif isinstance(xs, (list, tuple)): for x in xs: f = _find_file(x, target) if f: return f elif isinstance(xs, six.string_types) and os.path.exists(xs) and xs.endswith("/%s" % target): return xs orig_file = line.split("=")[-1].replace('"', '').strip() full_file = _find_file(data, os.path.basename(orig_file)) if not full_file and os.path.exists(os.path.abspath(orig_file)): full_file = os.path.abspath(orig_file) assert full_file, "Did not find vcfanno input file %s" % (orig_file) return 'file="%s"\n' % full_file
Fill in a full file path in the configuration file from data dictionary.
def postappend(self): if not self.doc and self.parent.doc: self.setdocument(self.parent.doc) if self.doc and self.doc.deepvalidation: self.deepvalidation()
This method will be called after an element is added to another and does some checks. It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated. This method is mostly for internal use.
def reconcileLimits(self): if self.minValue < self.maxValue: return minFixed = (self.minValueSource in ['min']) maxFixed = (self.maxValueSource in ['max', 'limit']) if minFixed and maxFixed: raise GraphError('The %s must be less than the %s' % (self.minValueSource, self.maxValueSource)) elif minFixed: self.maxValue = self.minValue + self.chooseDelta(self.minValue) elif maxFixed: self.minValue = self.maxValue - self.chooseDelta(self.maxValue) else: delta = self.chooseDelta(max(abs(self.minValue), abs(self.maxValue))) average = (self.minValue + self.maxValue) / 2.0 self.minValue = average - delta self.maxValue = average + delta
If self.minValue is not less than self.maxValue, fix the problem. If self.minValue is not less than self.maxValue, adjust self.minValue and/or self.maxValue (depending on which was not specified explicitly by the user) to make self.minValue < self.maxValue. If the user specified both limits explicitly, then raise GraphError.
def validate(self, r): if self.show_invalid: r.valid = True elif r.valid: if not r.description: r.valid = False if r.size and (r.size + r.offset) > r.file.size: r.valid = False if r.jump and (r.jump + r.offset) > r.file.size: r.valid = False if hasattr(r, "location") and (r.location != r.offset): r.valid = False if r.valid: if r.id == self.one_of_many: r.display = False elif r.many: self.one_of_many = r.id else: self.one_of_many = None
Called automatically by self.result.
def pdebug(*args, **kwargs): if should_msg(kwargs.get("groups", ["debug"])): global colorama_init if not colorama_init: colorama_init = True colorama.init() args = indent_text(*args, **kwargs) sys.stderr.write(colorama.Fore.CYAN) sys.stderr.write("".join(args)) sys.stderr.write(colorama.Fore.RESET) sys.stderr.write("\n")
print formatted output to stdout with indentation control
def linear_regression(self, target, regression_length, mask=NotSpecified): from .statistical import RollingLinearRegression return RollingLinearRegression( dependent=self, independent=target, regression_length=regression_length, mask=mask, )
Construct a new Factor that performs an ordinary least-squares regression predicting the columns of `self` from `target`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term to use as the predictor/independent variable in each regression. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, regressions are computed asset-wise. regression_length : int Length of the lookback window over which to compute each regression. mask : zipline.pipeline.Filter, optional A Filter describing which assets should be regressed with the target slice each day. Returns ------- regressions : zipline.pipeline.factors.RollingLinearRegression A new Factor that will compute linear regressions of `target` against the columns of `self`. Examples -------- Suppose we want to create a factor that regresses AAPL's 10-day returns against the 10-day returns of all other assets, computing each regression over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_regressions = returns.linear_regression( target=returns_slice, regression_length=30, ) This is equivalent to doing:: aapl_regressions = RollingLinearRegressionOfReturns( target=sid(24), returns_length=10, regression_length=30, ) See Also -------- :func:`scipy.stats.linregress` :class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
def Parse(self, stat, file_obj, knowledge_base): _ = knowledge_base lines = self.parser.ParseEntries(utils.ReadFileBytesAsUnicode(file_obj)) if os.path.basename(stat.pathspec.path) in self._CSH_FILES: paths = self._ParseCshVariables(lines) else: paths = self._ParseShVariables(lines) for path_name, path_vals in iteritems(paths): yield rdf_protodict.AttributedDict( config=stat.pathspec.path, name=path_name, vals=path_vals)
Identifies the paths set within a file. Expands paths within the context of the file, but does not infer fully expanded paths from external states. There are plenty of cases where path attributes are unresolved, e.g. sourcing other files. Lines are not handled literally. A field parser is used to: - Break lines with multiple distinct statements into separate lines (e.g. lines with a ';' separating stanzas. - Strip out comments. - Handle line continuations to capture multi-line configurations into one statement. Args: stat: statentry file_obj: VFSFile knowledge_base: unused Yields: An attributed dict for each env vars. 'name' contains the path name, and 'vals' contains its vals.
def find_ids(self, element_ids): elements = [_transform.FigureElement.find_id(self, eid) for eid in element_ids] return Panel(*elements)
Find elements with given IDs. Parameters ---------- element_ids : list of strings list of IDs to find Returns ------- a new `Panel` object which contains all the found elements.
def tokenize(string): for match in TOKENS_REGEX.finditer(string): yield Token(match.lastgroup, match.group().strip(), match.span())
Match and yield all the tokens of the input string.
def choices_validator(choices): def validator(value): if value not in choices: raise ValidationError( "{} is not in {}".format(value, list(choices)) ) return validator
Return validator function that will check if ``value in choices``. Args: max_value (list, set, tuple): allowed choices for new validator
def wait(timeout: Optional[float] = None) -> Iterator[Any]: if timeout is not None: tcod.lib.SDL_WaitEventTimeout(tcod.ffi.NULL, int(timeout * 1000)) else: tcod.lib.SDL_WaitEvent(tcod.ffi.NULL) return get()
Block until events exist, then return an event iterator. `timeout` is the maximum number of seconds to wait as a floating point number with millisecond precision, or it can be None to wait forever. Returns the same iterator as a call to :any:`tcod.event.get`. Example:: for event in tcod.event.wait(): if event.type == "QUIT": print(event) raise SystemExit() elif event.type == "KEYDOWN": print(event) elif event.type == "MOUSEBUTTONDOWN": print(event) elif event.type == "MOUSEMOTION": print(event) else: print(event)
def asyncPipeStringtokenizer(context=None, _INPUT=None, conf=None, **kwargs): conf['delimiter'] = conf.pop('to-str', dict.get(conf, 'delimiter')) splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs)) parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs()) items = yield asyncStarMap(partial(maybeDeferred, parse_result), parsed) _OUTPUT = utils.multiplex(items) returnValue(_OUTPUT)
A string module that asynchronously splits a string into tokens delimited by separators. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : twisted Deferred iterable of items or strings conf : { 'to-str': {'value': <delimiter>}, 'dedupe': {'type': 'bool', value': <1>}, 'sort': {'type': 'bool', value': <1>} } Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of items
def recurse_module( overall_record, index, shared, stop_types=STOP_TYPES, already_seen=None, min_size=0 ): for record in recurse( overall_record, index, stop_types=stop_types, already_seen=already_seen, type_group=True, ): if record.get('totsize') is not None: continue rinfo = record rinfo['module'] = overall_record.get('name',NON_MODULE_REFS ) if not record['refs']: rinfo['rsize'] = 0 rinfo['children'] = [] else: rinfo['children'] = rinfo_children = list ( children( record, index, stop_types=stop_types ) ) rinfo['rsize'] = sum([ ( child.get('totsize',0.0)/float(len(shared.get( child['address'], [])) or 1) ) for child in rinfo_children ], 0.0 ) rinfo['totsize'] = record['size'] + rinfo['rsize'] return None
Creates a has-a recursive-cost hierarchy Mutates objects in-place to produce a hierarchy of memory usage based on reference-holding cost assignment
def read_sql(self, code: str) -> pandas.DataFrame: if self.kind != SessionKind.SQL: raise ValueError("not a SQL session") output = self._execute(code) output.raise_for_status() if output.json is None: raise RuntimeError("statement had no JSON output") return dataframe_from_json_output(output.json)
Evaluate a Spark SQL satatement and retrieve the result. :param code: The Spark SQL statement to evaluate.
def split_input(val, mapper=None): if mapper is None: mapper = lambda x: x if isinstance(val, list): return list(map(mapper, val)) try: return list(map(mapper, [x.strip() for x in val.split(',')])) except AttributeError: return list(map(mapper, [x.strip() for x in six.text_type(val).split(',')]))
Take an input value and split it into a list, returning the resulting list
def charm_icon(self, charm_id, channel=None): url = self.charm_icon_url(charm_id, channel=channel) response = self._get(url) return response.content
Get the charm icon. @param charm_id The ID of the charm. @param channel Optional channel name.
def render_secretfile(opt): LOG.debug("Using Secretfile %s", opt.secretfile) secretfile_path = abspath(opt.secretfile) obj = load_vars(opt) return render(secretfile_path, obj)
Renders and returns the Secretfile construct
def _check_tcpdump(): with open(os.devnull, 'wb') as devnull: try: proc = subprocess.Popen([conf.prog.tcpdump, "--version"], stdout=devnull, stderr=subprocess.STDOUT) except OSError: return False if OPENBSD: return proc.wait() == 1 else: return proc.wait() == 0
Return True if the tcpdump command can be started
def updateFromKwargs(self, properties, kwargs, collector, **unused): properties[self.name] = self.getFromKwargs(kwargs)
Primary entry point to turn 'kwargs' into 'properties
def poll(self, id): id = self.__unpack_id(id) url = '/api/v1/polls/{0}'.format(str(id)) return self.__api_request('GET', url)
Fetch information about the poll with the given id Returns a `poll dict`_.
def url_paths(self): unformatted_paths = self._url_module.url_paths paths = {} for unformatted_path, handler in unformatted_paths.items(): path = unformatted_path.format("") paths[path] = handler return paths
A dictionary of the paths of the urls to be mocked with this service and the handlers that should be called in their place
def _to_ndarray(self, a): if isinstance(a, (list, tuple)): a = numpy.array(a) if not is_ndarray(a): raise TypeError("Expected an ndarray but got object of type '{}' instead".format(type(a))) return a
Casts Python lists and tuples to a numpy array or raises an AssertionError.
def sanitize_random(value): if not value: return value return ''.join(random.choice(CHARACTERS) for _ in range(len(value)))
Random string of same length as the given value.
def sobol(N, dim, scrambled=1): while(True): seed = np.random.randint(2**32) out = lowdiscrepancy.sobol(N, dim, scrambled, seed, 1, 0) if (scrambled == 0) or ((out < 1.).all() and (out > 0.).all()): return out
Sobol sequence. Parameters ---------- N : int length of sequence dim: int dimension scrambled: int which scrambling method to use: + 0: no scrambling + 1: Owen's scrambling + 2: Faure-Tezuka + 3: Owen + Faure-Tezuka Returns ------- (N, dim) numpy array. Notes ----- For scrambling, seed is set randomly. Fun fact: this venerable but playful piece of Fortran code occasionally returns numbers above 1. (i.e. for a very small number of seeds); when this happen we just start over (since the seed is randomly generated).
def subscriptions(self): if _debug: ChangeOfValueServices._debug("subscriptions") subscription_list = [] for obj, cov_detection in self.cov_detections.items(): for cov in cov_detection.cov_subscriptions: subscription_list.append(cov) return subscription_list
Generator for the active subscriptions.
def get_convex_hull(self): proj, polygon2d = self._get_proj_convex_hull() if isinstance(polygon2d, (shapely.geometry.LineString, shapely.geometry.Point)): polygon2d = polygon2d.buffer(self.DIST_TOLERANCE, 1) from openquake.hazardlib.geo.polygon import Polygon return Polygon._from_2d(polygon2d, proj)
Get a convex polygon object that contains projections of all the points of the mesh. :returns: Instance of :class:`openquake.hazardlib.geo.polygon.Polygon` that is a convex hull around all the points in this mesh. If the original mesh had only one point, the resulting polygon has a square shape with a side length of 10 meters. If there were only two points, resulting polygon is a stripe 10 meters wide.
def stdout(self): stdout_path = os.path.join(self.config.artifact_dir, 'stdout') if not os.path.exists(stdout_path): raise AnsibleRunnerException("stdout missing") return open(os.path.join(self.config.artifact_dir, 'stdout'), 'r')
Returns an open file handle to the stdout representing the Ansible run
def remote_file_exists(self, url): status = requests.head(url).status_code if status != 200: raise RemoteFileDoesntExist
Checks whether the remote file exists. :param url: The url that has to be checked. :type url: String :returns: **True** if remote file exists and **False** if it doesn't exist.
def get(self, udid): timeout = self.get_argument('timeout', 20.0) if timeout is not None: timeout = float(timeout) que = self.ques[udid] try: item = yield que.get(timeout=time.time()+timeout) print 'get from queue:', item self.write(item) que.task_done() except gen.TimeoutError: print 'timeout' self.write('') finally: self.finish()
get new task
def fix_client_permissions(portal): wfs = get_workflows() start = time.time() clients = portal.clients.objectValues() total = len(clients) for num, client in enumerate(clients): logger.info("Fixing permission for client {}/{} ({})" .format(num, total, client.getName())) update_role_mappings(client, wfs=wfs) end = time.time() logger.info("Fixing client permissions took %.2fs" % float(end-start)) transaction.commit()
Fix client permissions
def profile(): verification_form = VerificationForm(formdata=None, prefix="verification") profile_form = profile_form_factory() form = request.form.get('submit', None) if form == 'profile': handle_profile_form(profile_form) elif form == 'verification': handle_verification_form(verification_form) return render_template( current_app.config['USERPROFILES_PROFILE_TEMPLATE'], profile_form=profile_form, verification_form=verification_form,)
View for editing a profile.
def _get_default_annual_spacing(nyears): if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: (min_spacing, maj_spacing) = (1, 2) elif nyears < 50: (min_spacing, maj_spacing) = (1, 5) elif nyears < 100: (min_spacing, maj_spacing) = (5, 10) elif nyears < 200: (min_spacing, maj_spacing) = (5, 25) elif nyears < 600: (min_spacing, maj_spacing) = (10, 50) else: factor = nyears // 1000 + 1 (min_spacing, maj_spacing) = (factor * 20, factor * 100) return (min_spacing, maj_spacing)
Returns a default spacing between consecutive ticks for annual data.
def extendedboldqc(auth, label, scan_ids=None, project=None, aid=None): if not aid: aid = accession(auth, label, project) path = '/data/experiments' params = { 'xsiType': 'neuroinfo:extendedboldqc', 'columns': ','.join(extendedboldqc.columns.keys()) } if project: params['project'] = project params['xnat:mrSessionData/ID'] = aid _,result = _get(auth, path, 'json', autobox=True, params=params) for result in result['ResultSet']['Result']: if scan_ids == None or result['neuroinfo:extendedboldqc/scan/scan_id'] in scan_ids: data = dict() for k,v in iter(extendedboldqc.columns.items()): data[v] = result[k] yield data
Get ExtendedBOLDQC data as a sequence of dictionaries. Example: >>> import yaxil >>> import json >>> auth = yaxil.XnatAuth(url='...', username='...', password='...') >>> for eqc in yaxil.extendedboldqc2(auth, 'AB1234C') ... print(json.dumps(eqc, indent=2)) :param auth: XNAT authentication object :type auth: :mod:`yaxil.XnatAuth` :param label: XNAT MR Session label :type label: str :param scan_ids: Scan numbers to return :type scan_ids: list :param project: XNAT MR Session project :type project: str :param aid: XNAT Accession ID :type aid: str :returns: Generator of scan data dictionaries :rtype: :mod:`dict`
def make_input_from_multiple_strings(sentence_id: SentenceId, strings: List[str]) -> TranslatorInput: if not bool(strings): return TranslatorInput(sentence_id=sentence_id, tokens=[], factors=None) tokens = list(data_io.get_tokens(strings[0])) factors = [list(data_io.get_tokens(factor)) for factor in strings[1:]] if not all(len(factor) == len(tokens) for factor in factors): logger.error("Length of string sequences do not match: '%s'", strings) return _bad_input(sentence_id, reason=str(strings)) return TranslatorInput(sentence_id=sentence_id, tokens=tokens, factors=factors)
Returns a TranslatorInput object from multiple strings, where the first element corresponds to the surface tokens and the remaining elements to additional factors. All strings must parse into token sequences of the same length. :param sentence_id: Sentence id. :param strings: A list of strings representing a factored input sequence. :return: A TranslatorInput.
def get(name, defval=None): with s_datfile.openDatFile('synapse.data/%s.mpk' % name) as fd: return s_msgpack.un(fd.read())
Return an object from the embedded synapse data folder. Example: for tld in syanpse.data.get('iana.tlds'): dostuff(tld) NOTE: Files are named synapse/data/<name>.mpk
def all(self): if self._stream: return chain.from_iterable(self._get_streamed_response()) return self._get_buffered_response()[0]
Returns a chained generator response containing all matching records :return: - Iterable response
def from_string(cls, s): stream = cStringIO(s) stream.seek(0) return cls(**yaml.safe_load(stream))
Create an istance from string s containing a YAML dictionary.
def mri_head_reco_op_32_channel(): space = odl.uniform_discr(min_pt=[-115.2, -115.2], max_pt=[115.2, 115.2], shape=[256, 256], dtype=complex) trafo = odl.trafos.FourierTransform(space) return odl.ReductionOperator(odl.ComplexModulus(space) * trafo.inverse, 32)
Reconstruction operator for 32 channel MRI of a head. This is a T2 weighted TSE scan of a healthy volunteer. The reconstruction operator is the sum of the modulus of each channel. See the data source with DOI `10.5281/zenodo.800527`_ or the `project webpage`_ for further information. See Also -------- mri_head_data_32_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800527 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html
def predict(self, parameters, viterbi): x_dot_parameters = np.einsum('ijk,kl->ijl', self.x, parameters) if not viterbi: alpha = forward_predict(self._lattice, x_dot_parameters, self.state_machine.n_states) else: alpha = forward_max_predict(self._lattice, x_dot_parameters, self.state_machine.n_states) I, J, _ = self.x.shape class_Z = {} Z = -np.inf for state, predicted_class in self.states_to_classes.items(): weight = alpha[I - 1, J - 1, state] class_Z[self.states_to_classes[state]] = weight Z = np.logaddexp(Z, weight) return {label: np.exp(class_z - Z) for label, class_z in class_Z.items()}
Run forward algorithm to find the predicted distribution over classes.
def ping(self, reconnect=True): if self._sock is None: if reconnect: self.connect() reconnect = False else: raise err.Error("Already closed") try: self._execute_command(COMMAND.COM_PING, "") self._read_ok_packet() except Exception: if reconnect: self.connect() self.ping(False) else: raise
Check if the server is alive. :param reconnect: If the connection is closed, reconnect. :raise Error: If the connection is closed and reconnect=False.
def addEmptyTab(self, text=''): tab = self.defaultTabWidget() c = self.count() self.addTab(tab, text) self.setCurrentIndex(c) if not text: self.tabBar().editTab(c) self.sigTabAdded.emit(tab) return tab
Add a new DEFAULT_TAB_WIDGET, open editor to set text if no text is given
def _apply_index_days(self, i, roll): nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value return i + nanos.astype('timedelta64[ns]')
Add days portion of offset to DatetimeIndex i. Parameters ---------- i : DatetimeIndex roll : ndarray[int64_t] Returns ------- result : DatetimeIndex
def circle(branch: str): assert os.environ.get('CIRCLE_BRANCH') == branch assert not os.environ.get('CI_PULL_REQUEST')
Performs necessary checks to ensure that the circle build is one that should create releases. :param branch: The branch the environment should be running against.