code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def tokenizer(self): domain = 0 if domain not in self.domains: self.register_domain(domain=domain) return self.domains[domain].tokenizer
A property to link into IntentEngine's tokenizer. Warning: this is only for backwards compatiblility and should not be used if you intend on using domains. Return: the domains tokenizer from its IntentEngine
def update_redirect(self): if self.last_child: self._resolved_pid.redirect(self.last_child) elif any(map(lambda pid: pid.status not in [PIDStatus.DELETED, PIDStatus.REGISTERED, PIDStatus.RESERVED], super(PIDNodeVersioning, self).children.all())): raise PIDRelationConsistencyError( "Invalid relation state. Only REGISTERED, RESERVED " "and DELETED PIDs are supported." )
Update the parent redirect to the current last child. This method should be called on the parent PID node. Use this method when the status of a PID changed (ex: draft changed from RESERVED to REGISTERED)
def ReadItems(self, collection_link, feed_options=None): if feed_options is None: feed_options = {} return self.QueryItems(collection_link, None, feed_options)
Reads all documents in a collection. :param str collection_link: The link to the document collection. :param dict feed_options: :return: Query Iterable of Documents. :rtype: query_iterable.QueryIterable
def object(self, key): return _object.Object(self._name, key, context=self._context)
Retrieves a Storage Object for the specified key in this bucket. The object need not exist. Args: key: the key of the object within the bucket. Returns: An Object instance representing the specified key.
def _get_distance_term(self, C, rrup, backarc): distance_scale = -np.log10(np.sqrt(rrup ** 2 + 3600.0)) distance_scale[backarc] += (C["c2"] * rrup[backarc]) idx = np.logical_not(backarc) distance_scale[idx] += (C["c1"] * rrup[idx]) return distance_scale
Returns the distance scaling term, which varies depending on whether the site is in the forearc or the backarc
def appendContour(self, contour, offset=None): contour = normalizers.normalizeContour(contour) if offset is None: offset = (0, 0) offset = normalizers.normalizeTransformationOffset(offset) return self._appendContour(contour, offset)
Append a contour containing the same data as ``contour`` to this glyph. >>> contour = glyph.appendContour(contour) This will return a :class:`BaseContour` object representing the new contour in the glyph. ``offset`` indicates the x and y shift values that should be applied to the appended data. It must be a :ref:`type-coordinate` value or ``None``. If ``None`` is given, the offset will be ``(0, 0)``. >>> contour = glyph.appendContour(contour, (100, 0))
def delete_dataset(self, dataset): uri = URITemplate(self.baseuri + '/{owner}/{id}').expand( owner=self.username, id=dataset) return self.session.delete(uri)
Deletes a single dataset, including all of the features that it contains. Parameters ---------- dataset : str The dataset id. Returns ------- HTTP status code.
def is_equal(self, another, limit=0.8): if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False
Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false.
def exit(self): if self.nvml_ready: try: pynvml.nvmlShutdown() except Exception as e: logger.debug("pynvml failed to shutdown correctly ({})".format(e)) super(Plugin, self).exit()
Overwrite the exit method to close the GPU API.
def _translate_limit(self, len_, start, num): if start > len_ or num <= 0: return 0, 0 return min(start, len_), num
Translate limit to valid bounds.
def get_remote_executors(hub_ip, port = 4444): resp = requests.get("http://%s:%s/grid/console" %(hub_ip, port)) remote_hosts = () if resp.status_code == 200: remote_hosts = re.findall("remoteHost: ([\w/\.:]+)",resp.text) return [host + "/wd/hub" for host in remote_hosts]
Get remote hosts from Selenium Grid Hub Console @param hub_ip: hub ip of selenium grid hub @param port: hub port of selenium grid hub
def set_quality_index(self): window_start = self.parent.value('window_start') window_length = self.parent.value('window_length') qual = self.annot.get_stage_for_epoch(window_start, window_length, attr='quality') if qual is None: self.idx_quality.setCurrentIndex(-1) else: self.idx_quality.setCurrentIndex(QUALIFIERS.index(qual))
Set the current signal quality in combobox.
def send_slack_message(channel, text): http = httplib2.Http() return http.request(SLACK_MESSAGE_URL, 'POST', body=json.dumps({ 'channel': channel, 'text': text, }))
Send a message to Slack
def make_datastore_api(client): parse_result = six.moves.urllib_parse.urlparse(client._base_url) host = parse_result.netloc if parse_result.scheme == "https": channel = make_secure_channel(client._credentials, DEFAULT_USER_AGENT, host) else: channel = insecure_channel(host) return datastore_client.DatastoreClient( channel=channel, client_info=client_info.ClientInfo( client_library_version=__version__, gapic_version=__version__ ), )
Create an instance of the GAPIC Datastore API. :type client: :class:`~google.cloud.datastore.client.Client` :param client: The client that holds configuration details. :rtype: :class:`.datastore.v1.datastore_client.DatastoreClient` :returns: A datastore API instance with the proper credentials.
def get_all_keys(self): all_keys = [] for keys in self._index.values(): all_keys.extend(keys) return all_keys
Get all keys indexed. :return: All keys :rtype: list(str)
def get_service_uid_from(self, analysis): analysis = api.get_object(analysis) return api.get_uid(analysis.getAnalysisService())
Return the service from the analysis
def table(self, name=DEFAULT_TABLE, **options): if name in self._table_cache: return self._table_cache[name] table_class = options.pop('table_class', self._cls_table) table = table_class(self._cls_storage_proxy(self._storage, name), name, **options) self._table_cache[name] = table return table
Get access to a specific table. Creates a new table, if it hasn't been created before, otherwise it returns the cached :class:`~tinydb.Table` object. :param name: The name of the table. :type name: str :param cache_size: How many query results to cache. :param table_class: Which table class to use.
def search(self, title=None, libtype=None, **kwargs): args = {} if title: args['title'] = title if libtype: args['type'] = utils.searchType(libtype) for attr, value in kwargs.items(): args[attr] = value key = '/library/all%s' % utils.joinArgs(args) return self.fetchItems(key)
Searching within a library section is much more powerful. It seems certain attributes on the media objects can be targeted to filter this search down a bit, but I havent found the documentation for it. Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items such as actor=<id> seem to work, but require you already know the id of the actor. TLDR: This is untested but seems to work. Use library section search when you can.
def get_all_netting_channel_events( chain: BlockChainService, token_network_address: TokenNetworkAddress, netting_channel_identifier: ChannelID, contract_manager: ContractManager, from_block: BlockSpecification = GENESIS_BLOCK_NUMBER, to_block: BlockSpecification = 'latest', ) -> List[Dict]: filter_args = get_filter_args_for_all_events_from_channel( token_network_address=token_network_address, channel_identifier=netting_channel_identifier, contract_manager=contract_manager, from_block=from_block, to_block=to_block, ) return get_contract_events( chain, contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK), typing.Address(token_network_address), filter_args['topics'], from_block, to_block, )
Helper to get all events of a NettingChannelContract.
def _and_join(self, close_group=False): if not self.initialized: raise ValueError("You must add a search term before adding an operator.") else: self._operator("AND", close_group=close_group) return self
Combine terms with AND. There must be a term added before using this method. Arguments: close_group (bool): If ``True``, will end the current group and start a new one. If ``False``, will continue current group. Example:: If the current query is "(term1" .and(close_group=True) => "(term1) AND (" .and(close_group=False) => "(term1 AND " Returns: SearchHelper: Self
def copy_model_instance(obj): meta = getattr(obj, '_meta') return {f.name: getattr(obj, f.name) for f in meta.get_fields(include_parents=False) if not f.auto_created}
Copy Django model instance as a dictionary excluding automatically created fields like an auto-generated sequence as a primary key or an auto-created many-to-one reverse relation. :param obj: Django model object :return: copy of model instance as dictionary
def listen(manifest, config, model_mock=False): config['manifest'] = manifest config['model_mock'] = model_mock IRC = IrcBot(config) try: IRC.start() except KeyboardInterrupt: pass
IRC listening process.
def _fix_syscall_ip(state): try: bypass = o.BYPASS_UNSUPPORTED_SYSCALL in state.options stub = state.project.simos.syscall(state, allow_unsupported=bypass) if stub: state.ip = stub.addr except AngrUnsupportedSyscallError: pass
Resolve syscall information from the state, get the IP address of the syscall SimProcedure, and set the IP of the state accordingly. Don't do anything if the resolution fails. :param SimState state: the program state. :return: None
def rolling_count(self, window_start, window_end): agg_op = '__builtin__nonnull__count__' return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0))
Count the number of non-NULL values of different subsets over this SArray. The subset that the count is executed on is defined as an inclusive range relative to the position to each value in the SArray, using `window_start` and `window_end`. For a better understanding of this, see the examples below. Parameters ---------- window_start : int The start of the subset to count relative to the current value. window_end : int The end of the subset to count relative to the current value. Must be greater than `window_start`. Returns ------- out : SArray Examples -------- >>> import pandas >>> sa = SArray([1,2,3,None,5]) >>> series = pandas.Series([1,2,3,None,5]) A rolling count with a window including the previous 2 entries including the current: >>> sa.rolling_count(-2,0) dtype: int Rows: 5 [1, 2, 3, 2, 2] Pandas equivalent: >>> pandas.rolling_count(series, 3) 0 1 1 2 2 3 3 2 4 2 dtype: float64 A rolling count with a size of 3, centered around the current: >>> sa.rolling_count(-1,1) dtype: int Rows: 5 [2, 3, 2, 2, 1] Pandas equivalent: >>> pandas.rolling_count(series, 3, center=True) 0 2 1 3 2 2 3 2 4 1 dtype: float64 A rolling count with a window including the current and the 2 entries following: >>> sa.rolling_count(0,2) dtype: int Rows: 5 [3, 2, 2, 1, 1] A rolling count with a window including the previous 2 entries NOT including the current: >>> sa.rolling_count(-2,-1) dtype: int Rows: 5 [0, 1, 2, 2, 1]
async def _get(self, key: Text) -> Dict[Text, Any]: try: with await self.pool as r: return ujson.loads(await r.get(self.register_key(key))) except (ValueError, TypeError): return {}
Get the value for the key. It is automatically deserialized from JSON and returns an empty dictionary by default.
def require_int(self, key: str) -> int: v = self.get_int(key) if v is None: raise ConfigMissingError(self.full_key(key)) return v
Returns a configuration value, as an int, by its given key. If it doesn't exist, or the configuration value is not a legal int, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: int :raises ConfigMissingError: The configuration value did not exist. :raises ConfigTypeError: The configuration value existed but couldn't be coerced to int.
def create(self, port, value, timestamp=None): session = self._session datapoint_class = self._datapoint_class attributes = { 'port': port, 'value': value, } if timestamp is not None: attributes['timestamp'] = to_iso_date(timestamp) attributes = build_request_body('data-point', None, attributes=attributes) def _process(json): data = json.get('data') return datapoint_class(data, session) return session.post(self._base_url, CB.json(201, _process), json=attributes)
Post a new reading to a timeseries. A reading is comprised of a `port`, a `value` and a timestamp. A port is like a tag for the given reading and gives an indication of the meaning of the value. The value of the reading can be any valid json value. The timestamp is considered the time the reading was taken, as opposed to the `created` time of the data-point which represents when the data-point was stored in the Helium API. If the timestamp is not given the server will construct a timestemp upon receiving the new reading. Args: port(string): The port to use for the new data-point value: The value for the new data-point Keyword Args: timestamp(:class:`datetime`): An optional :class:`datetime` object
def rebuildtable(cls): cls._closure_model.objects.all().delete() cls._closure_model.objects.bulk_create([cls._closure_model( parent_id=x['pk'], child_id=x['pk'], depth=0 ) for x in cls.objects.values("pk")]) for node in cls.objects.all(): node._closure_createlink()
Regenerate the entire closuretree.
def _create_column(data, col, value): with suppress(AttributeError): if not value.index.equals(data.index): if len(value) == len(data): value.index = data.index else: value.reset_index(drop=True, inplace=True) if data.index.empty: try: len(value) except TypeError: scalar = True else: scalar = isinstance(value, str) if scalar: value = [value] data[col] = value return data
Create column in dataframe Helper method meant to deal with problematic column values. e.g When the series index does not match that of the data. Parameters ---------- data : pandas.DataFrame dataframe in which to insert value col : column label Column name value : object Value to assign to column Returns ------- data : pandas.DataFrame Modified original dataframe >>> df = pd.DataFrame({'x': [1, 2, 3]}) >>> y = pd.Series([11, 12, 13], index=[21, 22, 23]) Data index and value index do not match >>> _create_column(df, 'y', y) x y 0 1 11 1 2 12 2 3 13 Non-empty dataframe, scalar value >>> _create_column(df, 'z', 3) x y z 0 1 11 3 1 2 12 3 2 3 13 3 Empty dataframe, scalar value >>> df = pd.DataFrame() >>> _create_column(df, 'w', 3) w 0 3 >>> _create_column(df, 'z', 'abc') w z 0 3 abc
def read_sources_from_numpy_file(npfile): srcs = np.load(npfile).flat[0]['sources'] roi = ROIModel() roi.load_sources(srcs.values()) return roi.create_table()
Open a numpy pickle file and read all the new sources into a dictionary Parameters ---------- npfile : file name The input numpy pickle file Returns ------- tab : `~astropy.table.Table`
def windowed_run_count_ufunc(x, window): return xr.apply_ufunc(windowed_run_count_1d, x, input_core_dims=[['time'], ], vectorize=True, dask='parallelized', output_dtypes=[np.int, ], keep_attrs=True, kwargs={'window': window})
Dask-parallel version of windowed_run_count_1d, ie the number of consecutive true values in array for runs at least as long as given duration. Parameters ---------- x : bool array Input array window : int Minimum duration of consecutive run to accumulate values. Returns ------- out : func A function operating along the time dimension of a dask-array.
def reset_tag(self, name): id_ = str(uuid.uuid4()).replace('-', '') self._store.forever(self.tag_key(name), id_) return id_
Reset the tag and return the new tag identifier. :param name: The tag :type name: str :rtype: str
def remove_boards_gui(hwpack=''): if not hwpack: if len(hwpack_names()) > 1: hwpack = psidialogs.choice(hwpack_names(), 'select hardware package to select board from!', title='select') else: hwpack = hwpack_names()[0] print('%s selected' % hwpack) if hwpack: sel = psidialogs.multi_choice(board_names(hwpack), 'select boards to remove from %s!' % boards_txt( hwpack), title='remove boards') print('%s selected' % sel) if sel: for x in sel: remove_board(x) print('%s was removed' % x)
remove boards by GUI.
def shutdown(self): if self.sock: self.sock.close() self.sock = None self.connected = False
close socket, immediately.
def _get_zoom_mat(sw:float, sh:float, c:float, r:float)->AffineMatrix: "`sw`,`sh` scale width,height - `c`,`r` focus col,row." return [[sw, 0, c], [0, sh, r], [0, 0, 1.]]
`sw`,`sh` scale width,height - `c`,`r` focus col,row.
def create_logger(name, formatter=None, handler=None, level=None): logger = logging.getLogger(name) logger.handlers = [] if handler is None: handler = logging.StreamHandler(sys.stdout) if formatter is not None: handler.setFormatter(formatter) if level is None: level = logging.DEBUG handler.setLevel(level) logger.setLevel(level) logger.addHandler(handler) return logger
Returns a new logger for the specified name.
def _handle_exc(exception): bugzscout.ext.celery_app.submit_error.delay( 'http://fogbugz/scoutSubmit.asp', 'error-user', 'MyAppProject', 'Errors', 'An error occurred in MyApp: {0}'.format(exception.message), extra=traceback.extract_tb(*sys.exc_info())) return ['']
Record exception with stack trace to FogBugz via BugzScout, asynchronously. Returns an empty string. Note that this will not be reported to FogBugz until a celery worker processes this task. :param exception: uncaught exception thrown in app
def format_label(self, field, counter): return '<label for="id_formfield_%s" %s>%s</label>' % ( counter, field.field.required and 'class="required"', field.label)
Format the label for each field
def range(self, count): if count <= 1: raise ValueError("Range size must be greater than 1.") dom = self._domain distance = dom[-1] - dom[0] props = [ self(dom[0] + distance * float(x)/(count-1)) for x in range(count) ] return props
Create a list of colors evenly spaced along this scale's domain. :param int count: The number of colors to return. :rtype: list :returns: A list of spectra.Color objects.
def add(self, value): added = self.redis.sadd( self.key, value ) if self.redis.scard(self.key) < 2: self.redis.expire(self.key, self.expire) return added
Add value to set.
def get_grade_system_ids_by_gradebooks(self, gradebook_ids): id_list = [] for grade_system in self.get_grade_systems_by_gradebooks(gradebook_ids): id_list.append(grade_system.get_id()) return IdList(id_list)
Gets the list of ``GradeSystem Ids`` corresponding to a list of ``Gradebooks``. arg: gradebook_ids (osid.id.IdList): list of gradebook ``Ids`` return: (osid.id.IdList) - list of grade systems ``Ids`` raise: NullArgument - ``gradebook_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def nsuriLogic(self): if self.parentClass: return 'ns = %s.%s.schema' %(self.parentClass, self.getClassName()) return 'ns = %s.%s.schema' %(self.getNSAlias(), self.getClassName())
set a variable "ns" that represents the targetNamespace in which this item is defined. Used for namespacing local elements.
def flip(self, reactions): for reaction in reactions: if reaction in self._flipped: self._flipped.remove(reaction) else: self._flipped.add(reaction)
Flip the specified reactions.
def setup_fields_processors(config, model_cls, schema): properties = schema.get('properties', {}) for field_name, props in properties.items(): if not props: continue processors = props.get('_processors') backref_processors = props.get('_backref_processors') if processors: processors = [resolve_to_callable(val) for val in processors] setup_kwargs = {'model': model_cls, 'field': field_name} config.add_field_processors(processors, **setup_kwargs) if backref_processors: db_settings = props.get('_db_settings', {}) is_relationship = db_settings.get('type') == 'relationship' document = db_settings.get('document') backref_name = db_settings.get('backref_name') if not (is_relationship and document and backref_name): continue backref_processors = [ resolve_to_callable(val) for val in backref_processors] setup_kwargs = { 'model': engine.get_document_cls(document), 'field': backref_name } config.add_field_processors( backref_processors, **setup_kwargs)
Set up model fields' processors. :param config: Pyramid Configurator instance. :param model_cls: Model class for field of which processors should be set up. :param schema: Dict of model JSON schema.
def usernames( self ): try: return list(set([tweet.username for tweet in self])) except: log.error("error -- possibly a problem with tweets stored")
This function returns the list of unique usernames corresponding to the tweets stored in self.
def _ProcessGrepSource(self, source): attributes = source.base_source.attributes paths = artifact_utils.InterpolateListKbAttributes( attributes["paths"], self.knowledge_base, self.ignore_interpolation_errors) regex = utils.RegexListDisjunction(attributes["content_regex_list"]) condition = rdf_file_finder.FileFinderCondition.ContentsRegexMatch( regex=regex, mode="ALL_HITS") file_finder_action = rdf_file_finder.FileFinderAction.Stat() request = rdf_file_finder.FileFinderArgs( paths=paths, action=file_finder_action, conditions=[condition], follow_links=True) action = file_finder.FileFinderOSFromClient yield action, request
Find files fulfilling regex conditions.
def get_consumers(self, _Consumer, channel): return [_Consumer(queues=[self.queue(channel)], callbacks=[self.main_callback], prefetch_count=self.prefetch_count)]
| ConsumerMixin requirement. | Get the consumers list. :returns: All the consumers. :rtype: list.
def list_labels(self, bucket): for name in self.z.namelist(): container, label = self._nf(name.encode("utf-8")) if container == bucket and label != MD_FILE: yield label
List labels for the given bucket. Due to zipfiles inherent arbitrary ordering, this is an expensive operation, as it walks the entire archive searching for individual 'buckets' :param bucket: bucket to list labels for. :return: iterator for the labels in the specified bucket.
def advice_dcv_method(cls, csr, package, altnames, dcv_method, cert_id=None): params = {'csr': csr, 'package': package, 'dcv_method': dcv_method} if cert_id: params['cert_id'] = cert_id result = cls.call('cert.get_dcv_params', params) if dcv_method == 'dns': cls.echo('You have to add these records in your domain zone :') cls.echo('\n'.join(result['message']))
Display dcv_method information.
def auth(username, password): django_auth_path = __opts__['django_auth_path'] if django_auth_path not in sys.path: sys.path.append(django_auth_path) os.environ.setdefault('DJANGO_SETTINGS_MODULE', __opts__['django_auth_settings']) __django_auth_setup() if not is_connection_usable(): connection.close() import django.contrib.auth user = django.contrib.auth.authenticate(username=username, password=password) if user is not None: if user.is_active: log.debug('Django authentication successful') return True else: log.debug('Django authentication: the password is valid but the account is disabled.') else: log.debug('Django authentication failed.') return False
Simple Django auth
def _handle_tag_scriptlimits(self): obj = _make_object("ScriptLimits") obj.MaxRecursionDepth = unpack_ui16(self._src) obj.ScriptTimeoutSeconds = unpack_ui16(self._src) return obj
Handle the ScriptLimits tag.
def _make_bridge_request_msg(self, channel, netfn, command): head = bytearray((constants.IPMI_BMC_ADDRESS, constants.netfn_codes['application'] << 2)) check_sum = _checksum(*head) boday = bytearray((0x81, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD, 0x40 | channel)) self._add_request_entry((constants.netfn_codes['application'] + 1, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD)) return head + bytearray((check_sum,)) + boday
This function generate message for bridge request. It is a part of ipmi payload.
def _route(self, attr, args, kwargs, **fkwargs): return self.cluster.hosts.keys()
Perform routing and return db_nums
def find_cached_job(jid): serial = salt.payload.Serial(__opts__) proc_dir = os.path.join(__opts__['cachedir'], 'minion_jobs') job_dir = os.path.join(proc_dir, six.text_type(jid)) if not os.path.isdir(job_dir): if not __opts__.get('cache_jobs'): return ('Local jobs cache directory not found; you may need to' ' enable cache_jobs on this minion') else: return 'Local jobs cache directory {0} not found'.format(job_dir) path = os.path.join(job_dir, 'return.p') with salt.utils.files.fopen(path, 'rb') as fp_: buf = fp_.read() if buf: try: data = serial.loads(buf) except NameError: pass else: if isinstance(data, dict): return data return None
Return the data for a specific cached job id. Note this only works if cache_jobs has previously been set to True on the minion. CLI Example: .. code-block:: bash salt '*' saltutil.find_cached_job <job id>
def apply_ants_transform(transform, data, data_type="point", reference=None, **kwargs): return transform.apply(data, data_type, reference, **kwargs)
Apply ANTsTransform to data ANTsR function: `applyAntsrTransform` Arguments --------- transform : ANTsTransform transform to apply to image data : ndarray/list/tuple data to which transform will be applied data_type : string type of data Options : 'point' 'vector' 'image' reference : ANTsImage target space for transforming image kwargs : kwargs additional options passed to `apply_ants_transform_to_image` Returns ------- ANTsImage if data_type == 'point' OR tuple if data_type == 'point' or data_type == 'vector'
def dirichlet_like(x, theta): R x = np.atleast_2d(x) theta = np.atleast_2d(theta) if (np.shape(x)[-1] + 1) != np.shape(theta)[-1]: raise ValueError('The dimension of x in dirichlet_like must be k-1.') return flib.dirichlet(x, theta)
R""" Dirichlet log-likelihood. This is a multivariate continuous distribution. .. math:: f(\mathbf{x}) = \frac{\Gamma(\sum_{i=1}^k \theta_i)}{\prod \Gamma(\theta_i)}\prod_{i=1}^{k-1} x_i^{\theta_i - 1} \cdot\left(1-\sum_{i=1}^{k-1}x_i\right)^\theta_k :Parameters: x : (n, k-1) array Array of shape (n, k-1) where `n` is the number of samples and `k` the dimension. :math:`0 < x_i < 1`, :math:`\sum_{i=1}^{k-1} x_i < 1` theta : array An (n,k) or (1,k) array > 0. .. note:: Only the first `k-1` elements of `x` are expected. Can be used as a parent of Multinomial and Categorical nevertheless.
def SetValue(self, row, col, value, refresh=True): value = "".join(value.split("\n")) key = row, col, self.grid.current_table old_code = self.grid.code_array(key) if old_code is None: old_code = "" if value != old_code: self.grid.actions.set_code(key, value)
Set the value of a cell, merge line breaks
def dictionary_merge(a, b): for key, value in b.items(): if key in a and isinstance(a[key], dict) and isinstance(value, dict): dictionary_merge(a[key], b[key]) continue a[key] = b[key] return a
merges dictionary b into a Like dict.update, but recursive
def add(self, word): if not word or word.strip() == '': return self.words[word]=word
Add a word to the dictionary
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts): from mlabwrap import mlab filename, varnames, outOf = __saveVarsHelper( filename, varNamesStr, outOf, '.mat', **opts) try: for varname in varnames: mlab._set(varname, outOf[varname]) mlab._do("save('%s','%s')" % (filename, "', '".join(varnames)), nout=0) finally: assert varnames mlab._do("clear('%s')" % "', '".join(varnames), nout=0)
Hacky convinience function to dump a couple of python variables in a .mat file. See `awmstools.saveVars`.
def set_disk_timeout(timeout, power='ac', scheme=None): return _set_powercfg_value( scheme=scheme, sub_group='SUB_DISK', setting_guid='DISKIDLE', power=power, value=timeout)
Set the disk timeout in minutes for the given power scheme Args: timeout (int): The amount of time in minutes before the disk will timeout power (str): Set the value for AC or DC power. Default is ``ac``. Valid options are: - ``ac`` (AC Power) - ``dc`` (Battery) scheme (str): The scheme to use, leave as ``None`` to use the current. Default is ``None``. This can be the GUID or the Alias for the Scheme. Known Aliases are: - ``SCHEME_BALANCED`` - Balanced - ``SCHEME_MAX`` - Power saver - ``SCHEME_MIN`` - High performance Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash # Sets the disk timeout to 30 minutes on battery salt '*' powercfg.set_disk_timeout 30 power=dc
def __setRouterSelectionJitter(self, iRouterJitter): print 'call _setRouterSelectionJitter' try: cmd = 'routerselectionjitter %s' % str(iRouterJitter) print cmd return self.__sendCommand(cmd) == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("setRouterSelectionJitter() Error: " + str(e))
set ROUTER_SELECTION_JITTER parameter for REED to upgrade to Router Args: iRouterJitter: a random period prior to request Router ID for REED Returns: True: successful to set the ROUTER_SELECTION_JITTER False: fail to set ROUTER_SELECTION_JITTER
def GetNumberOfRows(self): if not self._database_object: raise IOError('Not opened.') if self._number_of_rows is None: self._number_of_rows = self._database_object.GetNumberOfRows( self._table_name) return self._number_of_rows
Retrieves the number of rows of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened.
def parse_statements(self, end_tokens, drop_needle=False): self.stream.skip_if('colon') self.stream.expect('block_end') result = self.subparse(end_tokens) if self.stream.current.type == 'eof': self.fail_eof(end_tokens) if drop_needle: next(self.stream) return result
Parse multiple statements into a list until one of the end tokens is reached. This is used to parse the body of statements as it also parses template data if appropriate. The parser checks first if the current token is a colon and skips it if there is one. Then it checks for the block end and parses until if one of the `end_tokens` is reached. Per default the active token in the stream at the end of the call is the matched end token. If this is not wanted `drop_needle` can be set to `True` and the end token is removed.
def _translate_str(self, oprnd1, oprnd2, oprnd3): assert oprnd1.size and oprnd3.size op1_var = self._translate_src_oprnd(oprnd1) op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3) if oprnd3.size > oprnd1.size: result = smtfunction.zero_extend(op1_var, op3_var.size) elif oprnd3.size < oprnd1.size: result = smtfunction.extract(op1_var, 0, op3_var.size) else: result = op1_var return [op3_var == result] + op3_var_constrs
Return a formula representation of a STR instruction.
def write_error(self, status_code, **kwargs): message = default_message = httplib.responses.get(status_code, '') if 'exc_info' in kwargs: (_, exc, _) = kwargs['exc_info'] if hasattr(exc, 'log_message'): message = str(exc.log_message) or default_message self.logvalue('halt_reason', message) title = "{}: {}".format(status_code, default_message) body = "{}: {}".format(status_code, message) self.finish("<html><title>" + title + "</title>" "<body>" + body + "</body></html>")
Log halt_reason in service log and output error page
def issue(self, issuance_spec, metadata, fees): inputs, total_amount = self._collect_uncolored_outputs( issuance_spec.unspent_outputs, 2 * self._dust_amount + fees) return bitcoin.core.CTransaction( vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs], vout=[ self._get_colored_output(issuance_spec.to_script), self._get_marker_output([issuance_spec.amount], metadata), self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees) ] )
Creates a transaction for issuing an asset. :param TransferParameters issuance_spec: The parameters of the issuance. :param bytes metadata: The metadata to be embedded in the transaction. :param int fees: The fees to include in the transaction. :return: An unsigned transaction for issuing an asset. :rtype: CTransaction
def _event_to_pb(event): if isinstance(event, (TaskData, Task)): key, klass = 'task', clearly_pb2.TaskMessage elif isinstance(event, (WorkerData, Worker)): key, klass = 'worker', clearly_pb2.WorkerMessage else: raise ValueError('unknown event') keys = klass.DESCRIPTOR.fields_by_name.keys() data = {k: v for k, v in getattr(event, '_asdict', lambda: {f: getattr(event, f) for f in event._fields}) ().items() if k in keys} return key, klass(**data)
Supports converting internal TaskData and WorkerData, as well as celery Task and Worker to proto buffers messages. Args: event (Union[TaskData|Task|WorkerData|Worker]): Returns: ProtoBuf object
def get_queryset(self): qs = super().get_queryset() qs = qs.filter(approved=True) return qs
Returns all the approved topics or posts.
def chunks(data, chunk_size): for i in xrange(0, len(data), chunk_size): yield data[i:i+chunk_size]
Yield chunk_size chunks from data.
def add_header(self, name, value): self._headers.setdefault(_hkey(name), []).append(_hval(value))
Add an additional response header, not removing duplicates.
def _put_attributes_using_post(self, domain_or_name, item_name, attributes, replace=True, expected_value=None): domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} self._build_name_value_list(params, attributes, replace) if expected_value: self._build_expected_value(params, expected_value) return self.get_status('PutAttributes', params, verb='POST')
Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit for attribute values. Using POST prevents that. https://github.com/BD2KGenomics/toil/issues/502
def request_exception(sender, request, **kwargs): if not isinstance(request, WSGIRequest): logger = logging.getLogger(__name__) level = CRITICAL if request.status_code <= 500 else WARNING logger.log(level, '%s exception occured (%s)', request.status_code, request.reason_phrase) else: logger = logging.getLogger(__name__) logger.log(WARNING, 'WSGIResponse exception occured')
Automated request exception logging. The function can also return an WSGIRequest exception, which does not supply either status_code or reason_phrase.
def do_gh(self, arg): if self.cmdprefix: raise CmdError("prefix not allowed") if arg: raise CmdError("too many arguments") if self.lastEvent: self.lastEvent.continueStatus = win32.DBG_EXCEPTION_HANDLED return self.do_go(arg)
gh - go with exception handled
def set_params(self, arg_params, aux_params, allow_extra=False): for exec_ in self.execs: exec_.copy_params_from(arg_params, aux_params, allow_extra_params=allow_extra)
Assign, i.e. copy parameters to all the executors. Parameters ---------- arg_params : dict A dictionary of name to `NDArray` parameter mapping. aux_params : dict A dictionary of name to `NDArray` auxiliary variable mapping. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor.
def from_file(cls, file_path, compressed=False, encoded=False): file_id = '.'.join(path.basename(file_path).split('.')[:-1]) file_format = file_path.split('.')[-1] content = cls(file_id, file_format, compressed, encoded) content.file_exists = True content._location = path.dirname(file_path) return content
Create a content object from a file path.
def write_ply(self, output_file): points = np.hstack([self.coordinates, self.colors]) with open(output_file, 'w') as outfile: outfile.write(self.ply_header.format( vertex_count=len(self.coordinates))) np.savetxt(outfile, points, '%f %f %f %d %d %d')
Export ``PointCloud`` to PLY file for viewing in MeshLab.
def render(self, doc): d = defer.succeed(doc) for element in self._elements: d.addCallback(element.render) return d
Render all elements using specified document. @param doc: the writable document to render to. @type doc: document.IWritableDocument @return: a deferred fired with the specified document when the rendering is done. @rtype: defer.Deferred
def __cancel_timer(self): if self.__timer is not None: self.__timer.cancel() self.__unbind_call(True) self.__timer_args = None self.__timer = None
Cancels the timer, and calls its target method immediately
def post_change_receiver(self, instance: Model, action: Action, **kwargs): try: old_group_names = instance.__instance_groups.observers[self] except (ValueError, KeyError): old_group_names = set() if action == Action.DELETE: new_group_names = set() else: new_group_names = set(self.group_names(instance)) self.send_messages( instance, old_group_names - new_group_names, Action.DELETE, **kwargs ) self.send_messages( instance, old_group_names & new_group_names, Action.UPDATE, **kwargs ) self.send_messages( instance, new_group_names - old_group_names, Action.CREATE, **kwargs )
Triggers the old_binding to possibly send to its group.
def dependencies(self, kwargs=None, expand_only=False): if not kwargs: kwargs = {} self.proper_kwargs('dependencies', kwargs) sections = self._get_dependency_sections_to_use(kwargs) deps = [] for sect in sections: if expand_only: deps.extend(lang.expand_dependencies_section(sect, kwargs)) else: deps.extend(lang.dependencies_section(sect, kwargs, runner=self)) return deps
Returns all dependencies of this assistant with regards to specified kwargs. If expand_only == False, this method returns list of mappings of dependency types to actual dependencies (keeps order, types can repeat), e.g. Example: [{'rpm', ['rubygems']}, {'gem', ['mygem']}, {'rpm', ['spam']}, ...] If expand_only == True, this method returns a structure that can be used as "dependencies" section and has all the "use: foo" commands expanded (but conditions are left untouched and variables are not substituted).
def get_output_error(cmd): if not isinstance(cmd, list): cmd = [cmd] logging.debug("Running: %s", ' '.join(map(quote, cmd))) try: result = Popen(cmd, stdout=PIPE, stderr=PIPE) except IOError as e: return -1, u(''), u('Failed to run %r: %r' % (cmd, e)) so, se = result.communicate() so = so.decode('utf8', 'replace') se = se.decode('utf8', 'replace') return result.returncode, so, se
Return the exit status, stdout, stderr of a command
def spop(self, name, count=None): "Remove and return a random member of set ``name``" args = (count is not None) and [count] or [] return self.execute_command('SPOP', name, *args)
Remove and return a random member of set ``name``
def register(self): self._queue.put(hello_packet(socket.gethostname(), mac(), __version__)) self._queue.put(request_packet(MSG_SERVER_SETTINGS)) self._queue.put(request_packet(MSG_SAMPLE_FORMAT)) self._queue.put(request_packet(MSG_HEADER))
Transact with server.
def close(self): self.log.warning('Closing connection to AVR') self._closing = True if self.protocol.transport: self.protocol.transport.close()
Close the AVR device connection and don't try to reconnect.
def __insert_action(self, revision): revision["patch"]["_id"] = ObjectId(revision.get("master_id")) insert_response = yield self.collection.insert(revision.get("patch")) if not isinstance(insert_response, str): raise DocumentRevisionInsertFailed()
Handle the insert action type. Creates new document to be created in this collection. This allows you to stage a creation of an object :param dict revision: The revision dictionary
def update_payment_request(self, tid, currency=None, amount=None, action=None, ledger=None, callback_uri=None, display_message_uri=None, capture_id=None, additional_amount=None, text=None, refund_id=None, required_scope=None, required_scope_text=None, line_items=None): arguments = {'ledger': ledger, 'display_message_uri': display_message_uri, 'callback_uri': callback_uri, 'currency': currency, 'amount': amount, 'additional_amount': additional_amount, 'capture_id': capture_id, 'action': action, 'text': text, 'refund_id': refund_id} if required_scope: arguments['required_scope'] = required_scope arguments['required_scope_text'] = required_scope_text if line_items: arguments['line_items'] = line_items arguments = {k: v for k, v in arguments.items() if v is not None} return self.do_req('PUT', self.merchant_api_base_url + '/payment_request/' + tid + '/', arguments)
Update payment request, reauthorize, capture, release or abort It is possible to update ledger and the callback URIs for a payment request. Changes are always appended to the open report of a ledger, and notifications are sent to the callback registered at the time of notification. Capturing an authorized payment or reauthorizing is done with the action field. The call is idempotent; that is, if one posts the same amount, additional_amount and capture_id twice with action CAPTURE, only one capture is performed. Similarly, if one posts twice with action CAPTURE without any amount stated, to capture the full amount, only one full capture is performed. Arguments: ledger: Log entries will be added to the open report on the specified ledger display_message_uri: Messages that can be used to inform the POS operator about the progress of the payment request will be POSTed to this URI if provided callback_uri: If provided, mCASH will POST to this URI when the status of the payment request changes, using the message mechanism described in the introduction. The data in the "object" part of the message is the same as what can be retrieved by calling GET on the "/payment_request/<tid>/outcome/" resource URI. currency: 3 chars https://en.wikipedia.org/wiki/ISO_4217 amount: The base amount of the payment additional_amount: Typically cash withdrawal or gratuity capture_id: Local id for capture. Must be set if amount is set, otherwise capture_id must be unset. tid: Transaction id assigned by mCASH refund_id: Refund id needed when doing partial refund text: For example reason for refund. action: Action to perform. required_scope: Scopes required to fulfill payment line_items: An updated line_items. Will fail if line_items already set in the payment request or if the sum of the totals is different from the original amount. required_scope_text: Text that is shown to user when asked for permission.
def fromML(vec): if isinstance(vec, newlinalg.DenseVector): return DenseVector(vec.array) elif isinstance(vec, newlinalg.SparseVector): return SparseVector(vec.size, vec.indices, vec.values) else: raise TypeError("Unsupported vector type %s" % type(vec))
Convert a vector from the new mllib-local representation. This does NOT copy the data; it copies references. :param vec: a :py:class:`pyspark.ml.linalg.Vector` :return: a :py:class:`pyspark.mllib.linalg.Vector` .. versionadded:: 2.0.0
def AsRegEx(self): parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value) result = u"".join(self._ReplaceRegExPart(p) for p in parts) return rdf_standard.RegularExpression(u"(?i)\\A%s\\Z" % result)
Return the current glob as a simple regex. Note: No interpolation is performed. Returns: A RegularExpression() object.
def get_form_kwargs(self): kwargs = super(ApiFormView, self).get_form_kwargs() kwargs['data'] = kwargs.get('initial') return kwargs
Add the 'data' to the form args so you can validate the form data on a get request.
def is_pk_descriptor(descriptor, include_alt=False): if descriptor.pk is True or type(descriptor.pk) is int: return True if include_alt: return descriptor.alt_pk is True or type(descriptor.alt_pk) is int else: return False
Return true if `descriptor` is a primary key.
def create_parameter_group(name, db_parameter_group_family, description, tags=None, region=None, key=None, keyid=None, profile=None): res = __salt__['boto_rds.parameter_group_exists'](name, tags, region, key, keyid, profile) if res.get('exists'): return {'exists': bool(res)} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'results': bool(conn)} taglist = _tag_doc(tags) rds = conn.create_db_parameter_group(DBParameterGroupName=name, DBParameterGroupFamily=db_parameter_group_family, Description=description, Tags=taglist) if not rds: return {'created': False, 'message': 'Failed to create RDS parameter group {0}'.format(name)} return {'exists': bool(rds), 'message': 'Created RDS parameter group {0}'.format(name)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Create an RDS parameter group CLI example to create an RDS parameter group:: salt myminion boto_rds.create_parameter_group my-param-group mysql5.6 \ "group description"
def set_nest_transactions_with_savepoints(self, nest_transactions_with_savepoints): if self._transaction_nesting_level > 0: raise DBALConnectionError.may_not_alter_nested_transaction_with_savepoints_in_transaction() if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self._nest_transactions_with_savepoints = bool(nest_transactions_with_savepoints)
Sets if nested transactions should use savepoints. :param nest_transactions_with_savepoints: `True` or `False`
def _build_fluent_table(self): self.fluent_table = collections.OrderedDict() for name, size in zip(self.domain.non_fluent_ordering, self.non_fluent_size): non_fluent = self.domain.non_fluents[name] self.fluent_table[name] = (non_fluent, size) for name, size in zip(self.domain.state_fluent_ordering, self.state_size): fluent = self.domain.state_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.action_fluent_ordering, self.action_size): fluent = self.domain.action_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.interm_fluent_ordering, self.interm_size): fluent = self.domain.intermediate_fluents[name] self.fluent_table[name] = (fluent, size)
Builds the fluent table for each RDDL pvariable.
def connection_lost(self, exc: Optional[Exception]) -> None: logger.debug("%s - event = connection_lost(%s)", self.side, exc) self.state = State.CLOSED logger.debug("%s - state = CLOSED", self.side) if not hasattr(self, "close_code"): self.close_code = 1006 if not hasattr(self, "close_reason"): self.close_reason = "" logger.debug( "%s x code = %d, reason = %s", self.side, self.close_code, self.close_reason or "[no reason]", ) self.abort_keepalive_pings() self.connection_lost_waiter.set_result(None) super().connection_lost(exc)
7.1.4. The WebSocket Connection is Closed.
def allocate_stack(size=DEFAULT_STACK_SIZE): base = libc.mmap( None, size + GUARD_PAGE_SIZE, libc.PROT_READ | libc.PROT_WRITE, libc.MAP_PRIVATE | libc.MAP_ANONYMOUS | libc.MAP_GROWSDOWN | libc.MAP_STACK, -1, 0) try: libc.mprotect(base, GUARD_PAGE_SIZE, libc.PROT_NONE) yield ctypes.c_void_p(base + size + GUARD_PAGE_SIZE) finally: libc.munmap(base, size + GUARD_PAGE_SIZE)
Allocate some memory that can be used as a stack. @return: a ctypes void pointer to the *top* of the stack.
def predict_from_variants( self, variants, transcript_expression_dict=None, gene_expression_dict=None): variants = apply_variant_expression_filters( variants, transcript_expression_dict=transcript_expression_dict, transcript_expression_threshold=self.min_transcript_expression, gene_expression_dict=gene_expression_dict, gene_expression_threshold=self.min_gene_expression) effects = variants.effects(raise_on_error=self.raise_on_error) return self.predict_from_mutation_effects( effects=effects, transcript_expression_dict=transcript_expression_dict, gene_expression_dict=gene_expression_dict)
Predict epitopes from a Variant collection, filtering options, and optional gene and transcript expression data. Parameters ---------- variants : varcode.VariantCollection transcript_expression_dict : dict Maps from Ensembl transcript IDs to FPKM expression values. gene_expression_dict : dict, optional Maps from Ensembl gene IDs to FPKM expression values. Returns DataFrame with the following columns: - variant - gene - gene_id - transcript_id - transcript_name - effect - effect_type - peptide - peptide_offset - peptide_length - allele - affinity - percentile_rank - prediction_method_name - contains_mutant_residues - mutation_start_in_peptide - mutation_end_in_peptide Optionall will also include the following columns if corresponding expression dictionary inputs are provided: - gene_expression - transcript_expression
def ParseMessage(descriptor, byte_str): result_class = MakeClass(descriptor) new_msg = result_class() new_msg.ParseFromString(byte_str) return new_msg
Generate a new Message instance from this Descriptor and a byte string. Args: descriptor: Protobuf Descriptor object byte_str: Serialized protocol buffer byte string Returns: Newly created protobuf Message object.
def get(self, block=True, timeout=None): _, node_id = self.inner.get(block=block, timeout=timeout) with self.lock: self._mark_in_progress(node_id) return self.get_node(node_id)
Get a node off the inner priority queue. By default, this blocks. This takes the lock, but only for part of it. :param bool block: If True, block until the inner queue has data :param Optional[float] timeout: If set, block for timeout seconds waiting for data. :return ParsedNode: The node as present in the manifest. See `queue.PriorityQueue` for more information on `get()` behavior and exceptions.
def find_all(self, prefix): prefix = ip_network(prefix) if not self.prefix.overlaps(prefix) \ or self.prefix[0] > prefix[0] \ or self.prefix[-1] < prefix[-1]: raise NotAuthoritativeError('This node is not authoritative for %r' % prefix) matches = set() for child in self.children: if prefix.overlaps(child.prefix): matches.add(child) return matches
Find everything in the given prefix