code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def GetQueryValuesFromDict(cls, d, version=sorted(_SERVICE_MAP.keys())[-1]): return [{ 'key': key, 'value': cls.GetValueRepresentation(value, version) } for key, value in d.iteritems()]
Converts a dict of python types into a list of PQL types. Args: d: A dictionary of variable names to python types. version: A string identifying the Ad Manager version the values object is compatible with. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. Returns: A list of variables formatted for PQL statements which are compatible with a particular API version.
def _selectedLinesRange(self): (startLine, startCol), (endLine, endCol) = self._qpart.selectedPosition start = min(startLine, endLine) end = max(startLine, endLine) return start, end
Selected lines range for line manipulation methods
def getBitmap(self): return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h)
Captures screen area of this region, at least the part that is on the screen Returns image as numpy array
def _typedef_code(t, base=0, refs=None, kind=_kind_static, heap=False): v = _Typedef(base=_basicsize(t, base=base), refs=refs, both=False, kind=kind, type=t) v.save(t, base=base, heap=heap) return v
Add new typedef for code only.
def get_tags(self, name): tags = list() for tag in self._tags: if tag[0] == name: tags.append(tag[1]) return tags
Returns a list of tags. @param str name: The name of the tag. :rtype: list[str]
def set_goterm(self, go2obj): if self.GO in go2obj: goterm = go2obj[self.GO] self.goterm = goterm self.name = goterm.name self.depth = goterm.depth self.NS = self.namespace2NS[self.goterm.namespace]
Set goterm and copy GOTerm's name and namespace.
def fdfilter(data, *filt, **kwargs): inplace = kwargs.pop('inplace', False) analog = kwargs.pop('analog', False) fs = kwargs.pop('sample_rate', None) if kwargs: raise TypeError("filter() got an unexpected keyword argument '%s'" % list(kwargs.keys())[0]) if fs is None: fs = 2 * (data.shape[-1] * data.df).to('Hz').value form, filt = parse_filter(filt, analog=analog, sample_rate=fs) lti = signal.lti(*filt) freqs = data.frequencies.value.copy() fresp = numpy.nan_to_num(abs(lti.freqresp(w=freqs)[1])) if inplace: data *= fresp return data new = data * fresp return new
Filter a frequency-domain data object See Also -------- gwpy.frequencyseries.FrequencySeries.filter gwpy.spectrogram.Spectrogram.filter
def callback(self): if self._callback_func and callable(self._callback_func): self._callback_func(self)
Run callback.
def build_param_schema(schema, param_type): properties = filter_params_by_type(schema, param_type) if not properties: return return { 'type': 'object', 'properties': dict((p['name'], p) for p in properties), 'additionalProperties': param_type == 'header', }
Turn a swagger endpoint schema into an equivalent one to validate our request. As an example, this would take this swagger schema: { "paramType": "query", "name": "query", "description": "Location to query", "type": "string", "required": true } To this jsonschema: { "type": "object", "additionalProperties": "False", "properties:": { "description": "Location to query", "type": "string", "required": true } } Which we can then validate against a JSON object we construct from the pyramid request.
def find_tie_breaker(self, candidate_ids): for candidate_aggregates in reversed(self.round_candidate_aggregates): candidates_on_vote = defaultdict(int) for candidate_id in candidate_ids: votes = candidate_aggregates.get_vote_count(candidate_id) candidates_on_vote[votes] += 1 if max(candidates_on_vote.values()) == 1: return candidate_aggregates
finds a round in the count history in which the candidate_ids each had different vote counts if no such round exists, returns None
def clear_cache(self): self.raw_packet_cache = None for _, fval in six.iteritems(self.fields): if isinstance(fval, Packet): fval.clear_cache() self.payload.clear_cache()
Clear the raw packet cache for the field and all its subfields
def predict(self, text): pred = self.predict_proba(text) tags = self._get_tags(pred) return tags
Predict using the model. Args: text: string, the input text. Returns: tags: list, shape = (num_words,) Returns predicted values.
def quick_send(self, send, echo=None, loglevel=logging.INFO): shutit = self.shutit shutit.log('Quick send: ' + send, level=loglevel) res = self.sendline(ShutItSendSpec(self, send=send, check_exit=False, echo=echo, fail_on_empty_before=False, record_command=False, ignore_background=True)) if not res: self.expect(self.default_expect)
Quick and dirty send that ignores background tasks. Intended for internal use.
def requiredIdr(self, idr: Identifier=None, alias: str=None): if idr: if ':' in idr: idr = idr.split(':')[1] else: idr = self.aliasesToIds[alias] if alias else self.defaultId if not idr: raise EmptyIdentifier return idr
Checks whether signer identifier specified, or can it be inferred from alias or can be default used instead :param idr: :param alias: :param other: :return: signer identifier
def load_newsgroups(): dataset = datasets.fetch_20newsgroups() return Dataset(load_newsgroups.__doc__, np.array(dataset.data), dataset.target, accuracy_score, stratify=True)
20 News Groups Dataset. The data of this dataset is a 1d numpy array vector containing the texts from 11314 newsgroups posts, and the target is a 1d numpy integer array containing the label of one of the 20 topics that they are about.
def get_market_tops(symbols=None, **kwargs): import warnings warnings.warn(WNG_MSG % ("get_market_tops", "iexdata.get_tops")) return TOPS(symbols, **kwargs).fetch()
MOVED to iexfinance.iexdata.get_tops
def opens(self, date="", page=1, page_size=1000, order_field="date", order_direction="asc"): params = { "date": date, "page": page, "pagesize": page_size, "orderfield": order_field, "orderdirection": order_direction} response = self._get(self.uri_for("opens"), params=params) return json_to_py(response)
Retrieves the opens for this campaign.
def fix_insert_size(in_bam, config): fixed_file = os.path.splitext(in_bam)[0] + ".pi_fixed.bam" if file_exists(fixed_file): return fixed_file header_file = os.path.splitext(in_bam)[0] + ".header.sam" read_length = bam.estimate_read_length(in_bam) bam_handle= bam.open_samfile(in_bam) header = bam_handle.header.copy() rg_dict = header['RG'][0] if 'PI' not in rg_dict: return in_bam PI = int(rg_dict.get('PI')) PI = PI + 2*read_length rg_dict['PI'] = PI header['RG'][0] = rg_dict with pysam.Samfile(header_file, "wb", header=header) as out_handle: with bam.open_samfile(in_bam) as in_handle: for record in in_handle: out_handle.write(record) shutil.move(header_file, fixed_file) return fixed_file
Tophat sets PI in the RG to be the inner distance size, but the SAM spec states should be the insert size. This fixes the RG in the alignment file generated by Tophat header to match the spec
def call_closers(self, client, clients_list): for func in self.closers: func(client, clients_list)
Calls closers callbacks
def _extract_command_with_args(cmd): def _isint(value): try: int(value) return True except ValueError: return False equal_sign = cmd.find('=') if equal_sign == -1: return cmd, [] command = cmd[0:equal_sign] args = cmd[equal_sign+1:].split(',') converted = [x if not _isint(x) else int(x) for x in args] return command, converted
Parse input command with arguments. Parses the input command in such a way that the user may provide additional argument to the command. The format used is this: command=arg1,arg2,arg3,... all the additional arguments are passed as arguments to the target method.
def _calcCTRBUF(self): self.ctr_cks = self.encrypt(struct.pack("Q", self.ctr_iv)) self.ctr_iv += 1 self.ctr_pos = 0
Calculates one block of CTR keystream
def translate( self, values, target_language=None, format_=None, source_language=None, customization_ids=(), model=None, ): single_value = False if isinstance(values, six.string_types): single_value = True values = [values] if target_language is None: target_language = self.target_language if isinstance(customization_ids, six.string_types): customization_ids = [customization_ids] data = { "target": target_language, "q": values, "cid": customization_ids, "format": format_, "source": source_language, "model": model, } response = self._connection.api_request(method="POST", path="", data=data) translations = response.get("data", {}).get("translations", ()) if len(values) != len(translations): raise ValueError( "Expected iterations to have same length", values, translations ) for value, translation in six.moves.zip(values, translations): translation["input"] = value if single_value: return translations[0] else: return translations
Translate a string or list of strings. See https://cloud.google.com/translate/docs/translating-text :type values: str or list :param values: String or list of strings to translate. :type target_language: str :param target_language: The language to translate results into. This is required by the API and defaults to the target language of the current instance. :type format_: str :param format_: (Optional) One of ``text`` or ``html``, to specify if the input text is plain text or HTML. :type source_language: str :param source_language: (Optional) The language of the text to be translated. :type customization_ids: str or list :param customization_ids: (Optional) ID or list of customization IDs for translation. Sets the ``cid`` parameter in the query. :type model: str :param model: (Optional) The model used to translate the text, such as ``'base'`` or ``'nmt'``. :rtype: str or list :returns: A list of dictionaries for each queried value. Each dictionary typically contains three keys (though not all will be present in all cases) * ``detectedSourceLanguage``: The detected language (as an ISO 639-1 language code) of the text. * ``translatedText``: The translation of the text into the target language. * ``input``: The corresponding input value. * ``model``: The model used to translate the text. If only a single value is passed, then only a single dictionary will be returned. :raises: :class:`~exceptions.ValueError` if the number of values and translations differ.
def true_num_genes(model, custom_spont_id=None): true_num = 0 for gene in model.genes: if not is_spontaneous(gene, custom_id=custom_spont_id): true_num += 1 return true_num
Return the number of genes in a model ignoring spontaneously labeled genes. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of genes excluding spontaneous genes
def load_data(self): for index in reversed(self.stack_history): text = self.tabs.tabText(index) text = text.replace('&', '') item = QListWidgetItem(ima.icon('TextFileIcon'), text) self.addItem(item)
Fill ListWidget with the tabs texts. Add elements in inverse order of stack_history.
def status_delete(self, id): id = self.__unpack_id(id) url = '/api/v1/statuses/{0}'.format(str(id)) self.__api_request('DELETE', url)
Delete a status
def children(self, **kwargs): if not kwargs: if not self._cached_children: self._cached_children = list(self._client.parts(parent=self.id, category=self.category)) return self._cached_children else: return self._client.parts(parent=self.id, category=self.category, **kwargs)
Retrieve the children of this `Part` as `Partset`. When you call the :func:`Part.children()` method without any additional filtering options for the children, the children are cached to help speed up subsequent calls to retrieve the children. The cached children are returned as a list and not as a `Partset`. When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the cached children are _not_ used and a separate API call is made to retrieve only those children. :param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts` for additional info :type kwargs: dict :return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the children are retrieved from the cached children. :raises APIError: When an error occurs. Example ------- A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made. >>> bike = project.part('Bike') >>> direct_descendants_of_bike = bike.children() An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the API, not the bike's internal (already cached in previous example) cache. >>> bike = project.part('Bike') >>> wheel_children_of_bike = bike.children(name__icontains='wheel')
def scale_v2(vec, amount): return Vec2(vec.x * amount, vec.y * amount)
Return a new Vec2 with x and y from vec and multiplied by amount.
def spectrum_to_xyz100(spectrum, observer): lambda_o, data_o = observer lambda_s, data_s = spectrum lmbda = numpy.sort(numpy.unique(numpy.concatenate([lambda_o, lambda_s]))) assert lmbda[0] < 361e-9 assert lmbda[-1] > 829e-9 idata_o = numpy.array([numpy.interp(lmbda, lambda_o, dt) for dt in data_o]) idata_s = numpy.interp(lmbda, lambda_s, data_s) delta = numpy.zeros(len(lmbda)) diff = lmbda[1:] - lmbda[:-1] delta[1:] += diff delta[:-1] += diff delta /= 2 values = numpy.dot(idata_o, idata_s * delta) return values * 100
Computes the tristimulus values XYZ from a given spectrum for a given observer via X_i = int_lambda spectrum_i(lambda) * observer_i(lambda) dlambda. In section 7, the technical report CIE Standard Illuminants for Colorimetry, 1999, gives a recommendation on how to perform the computation.
def infix(tokens, operator_table): operator, matched_tokens = operator_table.infix.match(tokens) if operator: return TokenMatch(operator, None, matched_tokens)
Match an infix of an operator.
def repr(tick, pack=False): if tick == 0x7fffffffffffffff: return '?' dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=tick) millis = dt.microsecond / 1000 if pack: return '%d%.2d%.2d%.2d%.2d%.2d%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis) return '%d/%.2d/%.2d %.2d:%.2d:%.2d.%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis)
Return a date string for an epoch-millis timestamp. Args: tick (int): The timestamp in milliseconds since the epoch. Returns: (str): A date time string
def _check_rename_constraints(self, old_key, new_key): if new_key in self.relations: dbt.exceptions.raise_cache_inconsistent( 'in rename, new key {} already in cache: {}' .format(new_key, list(self.relations.keys())) ) if old_key not in self.relations: logger.debug( 'old key {} not found in self.relations, assuming temporary' .format(old_key) ) return False return True
Check the rename constraints, and return whether or not the rename can proceed. If the new key is already present, that is an error. If the old key is absent, we debug log and return False, assuming it's a temp table being renamed. :param _ReferenceKey old_key: The existing key, to rename from. :param _ReferenceKey new_key: The new key, to rename to. :return bool: If the old relation exists for renaming. :raises InternalError: If the new key is already present.
def addCallback(cls, eventType, func, record=None, once=False): callbacks = cls.callbacks() callbacks.setdefault(eventType, []) callbacks[eventType].append((func, record, once))
Adds a callback method to the class. When an event of the given type is triggered, any registered callback will be executed. :param eventType: <str> :param func: <callable>
def delete_grade_entry(self, grade_entry_id): collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) if not isinstance(grade_entry_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') grade_entry_map = collection.find_one( dict({'_id': ObjectId(grade_entry_id.get_identifier())}, **self._view_filter())) objects.GradeEntry(osid_object_map=grade_entry_map, runtime=self._runtime, proxy=self._proxy)._delete() collection.delete_one({'_id': ObjectId(grade_entry_id.get_identifier())})
Deletes the ``GradeEntry`` identified by the given ``Id``. arg: grade_entry_id (osid.id.Id): the ``Id`` of the ``GradeEntry`` to delete raise: NotFound - a ``GradeEntry`` was not found identified by the given ``Id`` raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def addDelay(self, urlPattern="", delay=0, httpMethod=None): print("addDelay is deprecated please use delays instead") delay = {"urlPattern": urlPattern, "delay": delay} if httpMethod: delay["httpMethod"] = httpMethod return self.delays(delays={"data": [delay]})
Adds delays.
def query_versions_pypi(self, package_name): if not package_name in self.pkg_list: self.logger.debug("Package %s not in cache, querying PyPI..." \ % package_name) self.fetch_pkg_list() versions = [] for pypi_pkg in self.pkg_list: if pypi_pkg.lower() == package_name.lower(): if self.debug: self.logger.debug("DEBUG: %s" % package_name) versions = self.package_releases(pypi_pkg) package_name = pypi_pkg break return (package_name, versions)
Fetch list of available versions for a package from The CheeseShop
def _get_freq(freqfunc, m1, m2, s1z, s2z): m1kg = float(m1) * lal.MSUN_SI m2kg = float(m2) * lal.MSUN_SI return lalsimulation.SimInspiralGetFrequency( m1kg, m2kg, 0, 0, float(s1z), 0, 0, float(s2z), int(freqfunc))
Wrapper of the LALSimulation function returning the frequency for a given frequency function and template parameters. Parameters ---------- freqfunc : lalsimulation FrequencyFunction wrapped object e.g. lalsimulation.fEOBNRv2RD m1 : float-ish, i.e. castable to float First component mass in solar masses m2 : float-ish Second component mass in solar masses s1z : float-ish First component dimensionless spin S_1/m_1^2 projected onto L s2z : float-ish Second component dimensionless spin S_2/m_2^2 projected onto L Returns ------- f : float Frequency in Hz
def build_docs(directory): os.chdir(directory) process = subprocess.Popen(["make", "html"], cwd=directory) process.communicate()
Builds sphinx docs from a given directory.
def imap_unordered(self, jobs, timeout=0.5): timeout = max(timeout, 0.5) jobs_iter = iter(jobs) out_jobs = 0 job = None while True: if not self.closed and job is None: try: job = jobs_iter.next() except StopIteration: job = None self.close() if job is not None: try: self.put(job, True, timeout) except Queue.Full: pass else: job = None for result in self.get_finished(): yield result if self.closed and self._items == 0: break sleep(timeout)
A iterator over a set of jobs. :param jobs: the items to pass through our function :param timeout: timeout between polling queues Results are yielded as soon as they are available in the output queue (up to the discretisation provided by timeout). Since the queues can be specified to have a maximum length, the consumption of both the input jobs iterable and memory use in the output queues are controlled.
def add_predicate(self, pred_obj): pred_id = pred_obj.get_id() if not pred_id in self.idx: pred_node = pred_obj.get_node() self.node.append(pred_node) self.idx[pred_id] = pred_node else: print('Error: trying to add new element, but id has already been given')
Adds a predicate object to the layer @type pred_obj: L{Cpredicate} @param pred_obj: the predicate object
def fuzzy_index_match(possiblities, label, **kwargs): possibilities = list(possiblities) if isinstance(label, basestring): return fuzzy_get(possibilities, label, **kwargs) if isinstance(label, int): return possibilities[label] if isinstance(label, list): return [fuzzy_get(possibilities, lbl) for lbl in label]
Find the closest matching column label, key, or integer indexed value Returns: type(label): sequence of immutable objects corresponding to best matches to each object in label if label is an int returns the object (value) in the list of possibilities at that index if label is a str returns the closest str match in possibilities >>> from collections import OrderedDict as odict >>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b') 'B' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2') '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1) '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1) '5' >>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4) 0
def call_pre_hook(awsclient, cloudformation): if not hasattr(cloudformation, 'pre_hook'): return hook_func = getattr(cloudformation, 'pre_hook') if not hook_func.func_code.co_argcount: hook_func() else: log.error('pre_hock can not have any arguments. The pre_hook it is ' + 'executed BEFORE config is read')
Invoke the pre_hook BEFORE the config is read. :param awsclient: :param cloudformation:
def get_targets(self, config): return {urllib.parse.urljoin(self.url, attrs['href']) for attrs in self._targets if self._check_rel(attrs, config.rel_whitelist, config.rel_blacklist) and self._domain_differs(attrs['href'])}
Given an Entry object, return all of the outgoing links.
def center_of_mass(self): weights = [s.species.weight for s in self] center_of_mass = np.average(self.frac_coords, weights=weights, axis=0) return center_of_mass
Calculates the center of mass of the slab
def get_argument_parser(): desc = 'Filter FASTA file by chromosome names.' parser = cli.get_argument_parser(desc=desc) parser.add_argument( '-f', '--fasta-file', default='-', type=str, help=textwrap.dedent( )) parser.add_argument( '-s', '--species', type=str, choices=sorted(ensembl.SPECIES_CHROMPAT.keys()), default='human', help=textwrap.dedent( ) ) parser.add_argument( '-c', '--chromosome-pattern', type=str, required=False, default=None, help=textwrap.dedent( ) ) parser.add_argument( '-o', '--output-file', type=str, required=True, help=textwrap.dedent( )) parser = cli.add_reporting_args(parser) return parser
Returns an argument parser object for the script.
def get_web_auth_session_key(self, url, token=""): session_key, _username = self.get_web_auth_session_key_username(url, token) return session_key
Retrieves the session key of a web authorization process by its URL.
def _to_json_default(obj): if isinstance(obj, datetime.datetime): return obj.isoformat() if isinstance(obj, uuid.UUID): return str(obj) if hasattr(obj, 'item'): return obj.item() try: return obj.id except Exception: raise TypeError('{obj} is not JSON serializable'.format(obj=repr(obj)))
Helper to convert non default objects to json. Usage: simplejson.dumps(data, default=_to_json_default)
async def _send_sysex(self, sysex_command, sysex_data=None): if not sysex_data: sysex_data = [] sysex_message = chr(PrivateConstants.START_SYSEX) sysex_message += chr(sysex_command) if len(sysex_data): for d in sysex_data: sysex_message += chr(d) sysex_message += chr(PrivateConstants.END_SYSEX) for data in sysex_message: await self.write(data)
This is a private utility method. This method sends a sysex command to Firmata. :param sysex_command: sysex command :param sysex_data: data for command :returns : No return value.
def _extract_lookup(self, key): parts = key.split('__') op = 'exact' if len(parts) == 1 else parts[1] return parts[0], self.get_lookup(op)
Extract lookup method based on key name format
def S(Document, *fields): result = [] for field in fields: if isinstance(field, tuple): field, direction = field result.append((field, direction)) continue direction = ASCENDING if not field.startswith('__'): field = field.replace('__', '.') if field[0] == '-': direction = DESCENDING if field[0] in ('+', '-'): field = field[1:] _field = traverse(Document, field, default=None) result.append(((~_field) if _field else field, direction)) return result
Generate a MongoDB sort order list using the Django ORM style.
def write(self, pkt): if isinstance(pkt, bytes): if not self.header_present: self._write_header(pkt) self._write_packet(pkt) else: pkt = pkt.__iter__() for p in pkt: if not self.header_present: self._write_header(p) self._write_packet(p)
Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes
def _send_batch(self): if (not self._batch_reqs) or self._batch_send_d: return requests, self._batch_reqs = self._batch_reqs, [] self._waitingByteCount = 0 self._waitingMsgCount = 0 d_list = [] for req in requests: d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) d.callback(None)
Send the waiting messages, if there are any, and we can... This is called by our LoopingCall every send_every_t interval, and from send_messages everytime we have enough messages to send. This is also called from py:method:`send_messages` via py:method:`_check_send_batch` if there are enough messages/bytes to require a send. Note, the send will be delayed (triggered by completion or failure of previous) if we are currently trying to complete the last batch send.
def register_user_type(self, keyspace, user_type, klass): if self.protocol_version < 3: log.warning("User Type serialization is only supported in native protocol version 3+ (%d in use). " "CQL encoding for simple statements will still work, but named tuples will " "be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type) self._user_types[keyspace][user_type] = klass for session in tuple(self.sessions): session.user_type_registered(keyspace, user_type, klass) UserType.evict_udt_class(keyspace, user_type)
Registers a class to use to represent a particular user-defined type. Query parameters for this user-defined type will be assumed to be instances of `klass`. Result sets for this user-defined type will be instances of `klass`. If no class is registered for a user-defined type, a namedtuple will be used for result sets, and non-prepared statements may not encode parameters for this type correctly. `keyspace` is the name of the keyspace that the UDT is defined in. `user_type` is the string name of the UDT to register the mapping for. `klass` should be a class with attributes whose names match the fields of the user-defined type. The constructor must accepts kwargs for each of the fields in the UDT. This method should only be called after the type has been created within Cassandra. Example:: cluster = Cluster(protocol_version=3) session = cluster.connect() session.set_keyspace('mykeyspace') session.execute("CREATE TYPE address (street text, zipcode int)") session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)") # create a class to map to the "address" UDT class Address(object): def __init__(self, street, zipcode): self.street = street self.zipcode = zipcode cluster.register_user_type('mykeyspace', 'address', Address) # insert a row using an instance of Address session.execute("INSERT INTO users (id, location) VALUES (%s, %s)", (0, Address("123 Main St.", 78723))) # results will include Address instances results = session.execute("SELECT * FROM users") row = results[0] print row.id, row.location.street, row.location.zipcode
def transform(self, Y): r check_is_fitted(self, 'X_fit_') n_samples_x, n_features = self.X_fit_.shape Y = numpy.asarray(Y) if Y.shape[1] != n_features: raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1])) n_samples_y = Y.shape[0] mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float) continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64), self.X_fit_[:, self._numeric_columns].astype(numpy.float64), self._numeric_ranges, mat) if len(self._nominal_columns) > 0: _nominal_kernel(Y[:, self._nominal_columns], self.X_fit_[:, self._nominal_columns], mat) mat /= n_features return mat
r"""Compute all pairwise distances between `self.X_fit_` and `Y`. Parameters ---------- y : array-like, shape = (n_samples_y, n_features) Returns ------- kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_) Kernel matrix. Values are normalized to lie within [0, 1].
def _compute_nearest_weights_edge(idcs, ndist, variant): lo = (ndist < 0) hi = (ndist > 1) if variant == 'left': w_lo = np.where(ndist <= 0.5, 1.0, 0.0) else: w_lo = np.where(ndist < 0.5, 1.0, 0.0) w_lo[lo] = 0 w_lo[hi] = 1 if variant == 'left': w_hi = np.where(ndist <= 0.5, 0.0, 1.0) else: w_hi = np.where(ndist < 0.5, 0.0, 1.0) w_hi[lo] = 1 w_hi[hi] = 0 edge = [idcs, idcs + 1] edge[0][hi] = -1 edge[1][lo] = 0 return w_lo, w_hi, edge
Helper for nearest interpolation mimicing the linear case.
def setOverlayFlag(self, ulOverlayHandle, eOverlayFlag, bEnabled): fn = self.function_table.setOverlayFlag result = fn(ulOverlayHandle, eOverlayFlag, bEnabled) return result
Specify flag setting for a given overlay
def valid_backbone_bond_lengths(self, atol=0.1): bond_lengths = self.backbone_bond_lengths a1 = numpy.allclose(bond_lengths['n_ca'], [ideal_backbone_bond_lengths['n_ca']] * len(self), atol=atol) a2 = numpy.allclose(bond_lengths['ca_c'], [ideal_backbone_bond_lengths['ca_c']] * len(self), atol=atol) a3 = numpy.allclose(bond_lengths['c_o'], [ideal_backbone_bond_lengths['c_o']] * len(self), atol=atol) a4 = numpy.allclose(bond_lengths['c_n'], [ideal_backbone_bond_lengths['c_n']] * (len(self) - 1), atol=atol) return all([a1, a2, a3, a4])
True if all backbone bonds are within atol Angstroms of the expected distance. Notes ----- Ideal bond lengths taken from [1]. References ---------- .. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of Protein Structure. New York: Springer-Verlag, 1979. Parameters ---------- atol : float, optional Tolerance value in Angstoms for the absolute deviation away from ideal backbone bond lengths.
def postprocess_periodical(marc_xml, mods, uuid, counter, url): dom = double_linked_dom(mods) add_missing_xml_attributes(dom, counter) if uuid: add_uuid(dom, uuid) return dom.prettify()
Some basic postprocessing of the periodical publications. Args: marc_xml (str): Original Aleph record. mods (str): XML string generated by XSLT template. uuid (str): UUID of the package. counter (int): Number of record, is added to XML headers. url (str): URL of the publication (public or not). Returns: str: Updated XML.
def _get_nsymop(self): if self.centrosymmetric: return 2 * len(self._rotations) * len(self._subtrans) else: return len(self._rotations) * len(self._subtrans)
Returns total number of symmetry operations.
def find_path(self, test_function=None, on_targets=False): assert self.has_referential_domain(), "need context set" if not test_function: test_function = lambda x, y: True def find_path_inner(part, prefix): name, structure = part if test_function(name, structure): yield prefix + [name] if isinstance(structure, DictCell): for sub_structure in structure: for prefix2 in find_path_inner(sub_structure,\ prefix[:] + [name]): yield prefix2 prefix = [] if on_targets: results = [] for _, instance in self.iter_singleton_referents(): for part in instance: for entry in find_path_inner(part, prefix[:]): results.append(['target'] + entry) while results: yield results.pop() break else: for part in self: for entry in find_path_inner(part, prefix[:]): yield entry
General helper method that iterates breadth-first over the referential_domain's cells and returns a path where the test_function is True
def tags(): "Get a set of tags for the current git repo." result = [t.decode('ascii') for t in subprocess.check_output([ 'git', 'tag' ]).split(b"\n")] assert len(set(result)) == len(result) return set(result)
Get a set of tags for the current git repo.
def closeTab(self, item): index = self.indexOf(item) if index != -1: self.tabCloseRequested.emit(index)
Requests a close for the inputed tab item. :param item | <XViewPanelItem>
def create(self, verbose=None): if not self.email_enabled: raise EmailNotEnabledError("See settings.EMAIL_ENABLED") response = requests.post( self.api_url, auth=("api", self.api_key), data={ "address": self.address, "name": self.name, "description": self.display_name, }, ) if verbose: sys.stdout.write( f"Creating mailing list {self.address}. " f"Got response={response.status_code}.\n" ) return response
Returns a response after attempting to create the list.
def refreshLabels( self ): itemCount = self.itemCount() title = self.itemsTitle() if ( not itemCount ): self._itemsLabel.setText(' %s per page' % title) else: msg = ' %s per page, %i %s total' % (title, itemCount, title) self._itemsLabel.setText(msg)
Refreshes the labels to display the proper title and count information.
def parent(self): if self == root or self == empty: raise NoParent return Name(self.labels[1:])
Return the parent of the name. @rtype: dns.name.Name object @raises NoParent: the name is either the root name or the empty name, and thus has no parent.
def sync(self, expectedThreads=0): 'Wait for all but expectedThreads async threads to finish.' while len(self.unfinishedThreads) > expectedThreads: time.sleep(.3) self.checkForFinishedThreads()
Wait for all but expectedThreads async threads to finish.
def delete(self, force=False): if force: RecordsBuckets.query.filter_by( record=self.model, bucket=self.files.bucket ).delete() return super(Record, self).delete(force)
Delete a record and also remove the RecordsBuckets if necessary. :param force: True to remove also the :class:`~invenio_records_files.models.RecordsBuckets` object. :returns: Deleted record.
def start(self): self.is_collocated = bool(socket.gethostname() == self.config.slaveinput['cov_master_host'] and self.topdir == self.config.slaveinput['cov_master_topdir']) if not self.is_collocated: master_topdir = self.config.slaveinput['cov_master_topdir'] slave_topdir = self.topdir self.cov_source = [source.replace(master_topdir, slave_topdir) for source in self.cov_source] self.cov_data_file = self.cov_data_file.replace(master_topdir, slave_topdir) self.cov_config = self.cov_config.replace(master_topdir, slave_topdir) self.cov_data_file += '.%s' % self.nodeid self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.set_env()
Determine what data file and suffix to contribute to and start coverage.
def iter_features(self, stanza=None): return itertools.chain( iter(self.STATIC_FEATURES), iter(self._features) )
Return an iterator which yields the features of the node. :param stanza: The IQ request stanza :type stanza: :class:`~aioxmpp.IQ` :rtype: iterable of :class:`str` :return: :xep:`30` features of this node `stanza` is the :class:`aioxmpp.IQ` stanza of the request. This can be used to filter the list according to who is asking (not recommended). `stanza` may be :data:`None` if the features are queried without a specific request context. In that case, implementors should assume that the result is visible to everybody. .. note:: Subclasses must allow :data:`None` for `stanza` and default it to :data:`None`. The features are returned as strings. The features demanded by :xep:`30` are always returned.
def clean_new_password2(self): password1 = self.cleaned_data.get('new_password1') password2 = self.cleaned_data.get('new_password2') if password1 or password2: if password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', ) password_validation.validate_password(password2, self.instance) return password2
Validate password when set
def disable_constant(parameterized): params = parameterized.params().values() constants = [p.constant for p in params] for p in params: p.constant = False try: yield except: raise finally: for (p, const) in zip(params, constants): p.constant = const
Temporarily set parameters on Parameterized object to constant=False.
def zrange(key, start, stop, host=None, port=None, db=None, password=None): server = _connect(host, port, db, password) return server.zrange(key, start, stop)
Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10
def cached(cache, key=keys.hashkey, lock=None): def decorator(func): if cache is None: def wrapper(*args, **kwargs): return func(*args, **kwargs) elif lock is None: def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: return cache[k] except KeyError: pass v = func(*args, **kwargs) try: cache[k] = v except ValueError: pass return v else: def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: with lock: return cache[k] except KeyError: pass v = func(*args, **kwargs) try: with lock: cache[k] = v except ValueError: pass return v return _update_wrapper(wrapper, func) return decorator
Decorator to wrap a function with a memoizing callable that saves results in a cache.
def validate_wavetable(self): wave = self._wavetable if N.any(wave <= 0): wrong = N.where(wave <= 0)[0] raise exceptions.ZeroWavelength( 'Negative or Zero wavelength occurs in wavelength array', rows=wrong) sorted = N.sort(wave) if not N.alltrue(sorted == wave): if N.alltrue(sorted[::-1] == wave): pass else: wrong = N.where(sorted != wave)[0] raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=wrong) dw = sorted[1:] - sorted[:-1] if N.any(dw == 0): wrong = N.where(dw == 0)[0] raise exceptions.DuplicateWavelength( "Wavelength array contains duplicate entries", rows=wrong)
Enforce monotonic, ascending wavelength array with no zero or negative values. Raises ------ pysynphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. pysynphot.exceptions.UnsortedWavelength Wavelength array is not monotonic ascending or descending. pysynphot.exceptions.ZeroWavelength Wavelength array has zero or negative value(s).
def str_cast(maybe_bytes, encoding='utf-8'): if isinstance(maybe_bytes, bytes_): return maybe_bytes.decode(encoding) else: return maybe_bytes
Converts any bytes-like input to a string-like output, with respect to python version Parameters ---------- maybe_bytes : if this is a bytes-like object, it will be converted to a string encoding : str, default='utf-8' encoding to be used when decoding bytes
def split_sequence_as_iterable(self, values): print(self.count) s = iter(self.index.sorter) for c in self.count: yield (values[i] for i in itertools.islice(s, int(c)))
Group sequence into iterables Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- This is the preferred method if values has random access, but we dont want it completely in memory. Like a big memory mapped file, for instance
def attr(**context): def decorator(func): def wrapped_func(*args, **kwargs): for key, value in context.items(): print key, value return func(*args, **kwargs) return wraps(func)(decorator) return decorator
Decorator that add attributes into func. Added attributes can be access outside via function's `func_dict` property.
def get_response_headers(self, *args, **kwargs): if self.response_headers: return self._unpack_headers(self.response_headers)
A convenience method for obtaining the headers that were sent from the S3 server. The AWS S3 API depends upon setting headers. This method is used by the head_object API call for getting a S3 object's metadata.
def lookup_string(conn, kstr): if kstr in keysyms: return get_keycode(conn, keysyms[kstr]) elif len(kstr) > 1 and kstr.capitalize() in keysyms: return get_keycode(conn, keysyms[kstr.capitalize()]) return None
Finds the keycode associated with a string representation of a keysym. :param kstr: English representation of a keysym. :return: Keycode, if one exists. :rtype: int
def all(cls): queues = {x: 0 for x in Queue.get_queues_config()} stats = list(context.connections.mongodb_jobs.mrq_jobs.aggregate([ {"$match": {"status": "queued"}}, {"$group": {"_id": "$queue", "jobs": {"$sum": 1}}} ])) queues.update({x["_id"]: x["jobs"] for x in stats}) return queues
List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow.
def import_private_key_from_file(filename, passphrase=None): with open(filename, "rb") as key_file: private_key = serialization.load_pem_private_key( key_file.read(), password=passphrase, backend=default_backend()) return private_key
Read a private Elliptic Curve key from a PEM file. :param filename: The name of the file :param passphrase: A pass phrase to use to unpack the PEM file. :return: A cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey instance
def _str_desc(self, reader): data_version = reader.data_version if data_version is not None: data_version = data_version.replace("releases/", "") desc = "{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms".format( OBO=reader.obo_file, FMT=reader.format_version, REL=data_version, N=len(self)) if reader.optobj: desc = "{D}; optional_attrs({A})".format(D=desc, A=" ".join(sorted(reader.optobj.optional_attrs))) return desc
String containing information about the current GO DAG.
def repay_funding(self, amount, currency): params = { 'amount': amount, 'currency': currency } return self._send_message('post', '/funding/repay', data=json.dumps(params))
Repay funding. Repays the older funding records first. Args: amount (int): Amount of currency to repay currency (str): The currency, example USD Returns: Not specified by cbpro.
def _serialize_v1(self, macaroon): serialized = { 'identifier': utils.convert_to_string(macaroon.identifier), 'signature': macaroon.signature, } if macaroon.location: serialized['location'] = macaroon.location if macaroon.caveats: serialized['caveats'] = [ _caveat_v1_to_dict(caveat) for caveat in macaroon.caveats ] return json.dumps(serialized)
Serialize the macaroon in JSON format v1. @param macaroon the macaroon to serialize. @return JSON macaroon.
def ssh_check_mic(self, mic_token, session_id, username=None): self._session_id = session_id self._username = username if username is not None: mic_field = self._ssh_build_mic( self._session_id, self._username, self._service, self._auth_method, ) self._gss_srv_ctxt.verify(mic_field, mic_token) else: self._gss_ctxt.verify(self._session_id, mic_token)
Verify the MIC token for a SSH2 message. :param str mic_token: The MIC token received from the client :param str session_id: The SSH session ID :param str username: The name of the user who attempts to login :return: None if the MIC check was successful :raises: ``sspi.error`` -- if the MIC check failed
def bootstrap(array): reg_func = lambda a: N.linalg.svd(a,full_matrices=False)[2][2] beta_boots = bootstrap(array, func=reg_func) return yhat, yhat_boots
Provides a bootstrap resampling of an array. Provides another statistical method to estimate the variance of a dataset. For a `PCA` object in this library, it should be applied to `Orientation.array` method.
def _encode(self, value): "Return a bytestring representation of the value. Taken from redis-py connection.py" if isinstance(value, bytes): return value elif isinstance(value, (int, long)): value = str(value).encode('utf-8') elif isinstance(value, float): value = repr(value).encode('utf-8') elif not isinstance(value, basestring): value = str(value).encode('utf-8') else: value = value.encode('utf-8', 'strict') return value
Return a bytestring representation of the value. Taken from redis-py connection.py
def addPointScalars(self, scalars, name): poly = self.polydata(False) if len(scalars) != poly.GetNumberOfPoints(): colors.printc('~times pointScalars Error: Number of scalars != nr. of points', len(scalars), poly.GetNumberOfPoints(), c=1) exit() arr = numpy_to_vtk(np.ascontiguousarray(scalars), deep=True) arr.SetName(name) poly.GetPointData().AddArray(arr) poly.GetPointData().SetActiveScalars(name) self.mapper.SetScalarRange(np.min(scalars), np.max(scalars)) self.mapper.ScalarVisibilityOn() return self
Add point scalars to the actor's polydata assigning it a name. .. hint:: |mesh_coloring| |mesh_coloring.py|_
def sbo_list(): sbo_packages = [] for pkg in os.listdir(_meta_.pkg_path): if pkg.endswith("_SBo"): sbo_packages.append(pkg) return sbo_packages
Return all SBo packages
def validate(self, model, validator=None): for filter_ in self.filters: if not filter_(model): return True is_valid, message = self.is_valid(model, validator) if not is_valid: model.add_error(self.pretty_property_name or self.property_name, message) return is_valid
Checks the model against all filters, and if it shoud be validated, runs the validation. if the model is invalid, an error is added to the model. Then the validity value is returned.
def fingerprint(value): h = hashlib.sha256() _digest(value, h) return h.digest().encode('hex')
Return a hash value that uniquely identifies the GCL value.
def to_date_or_datetime(value, ctx): if isinstance(value, str): temporal = ctx.get_date_parser().auto(value) if temporal is not None: return temporal elif type(value) == datetime.date: return value elif isinstance(value, datetime.datetime): return value.astimezone(ctx.timezone) raise EvaluationError("Can't convert '%s' to a date or datetime" % str(value))
Tries conversion of any value to a date or datetime
def sort(x, axis=-1, reverse=False, with_index=False, only_index=False): from .function_bases import sort as sort_base n_outputs = 2 if with_index and not only_index else 1 return sort_base(x, axis, reverse, with_index, only_index, n_outputs)
Sorts the elements of `x` along a given `axis` in ascending order by value. A negative `axis` counts from the last dimension of `x`, so the default of -1 sorts along the last dimension. If `reverse` is True, then the elements are soreted in descending order. If `with_index` is True, result is a tuple ``(sorted, indices)`` or only ``indices`` if `only_index` is True. Setting `only_index` to True implies that `with_index` is also True. .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F nn.set_auto_forward(True) x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4)) sorted = F.sort(x) assert np.allclose(sorted.d, np.sort(x.d)) sorted, indices = F.sort(x, with_index=True) assert np.allclose(sorted.d, np.sort(x.d)) assert np.all(indices.d == np.argsort(x.d)) indices = F.sort(x, only_index=True) assert np.all(indices.d == np.argsort(x.d)) Args: x(~nnabla.Variable): N-D array axis(int): Axis along which to sort. reverse(bool): Sort in descending order. with_index(bool): Return sorted values and index. only_index(bool): Return only the sort index. Returns: :obj:`~nnabla.Variable` `sorted` or :obj:`~nnabla.Variable` `indices` or (:obj:`~nnabla.Variable` `sorted`, :obj:`~nnabla.Variable` `indices`)
def put(self, uri, data, **kwargs): return self.request("PUT", uri, data=data, **kwargs)
PUT the provided data to the specified path See :meth:`request` for additional details. The `data` parameter here is expected to be a string type.
def getIndicesFromBigIndex(self, bigIndex): indices = numpy.array([0 for i in range(self.ndims)]) for i in range(self.ndims): indices[i] = bigIndex // self.dimProd[i] % self.dims[i] return indices
Get index set from given big index @param bigIndex @return index set @note no checks are performed to ensure that the returned big index is valid
def openCurrentItem(self): logger.debug("openCurrentItem") _currentItem, currentIndex = self.getCurrentItem() if not currentIndex.isValid(): return self.expand(currentIndex)
Opens the current item in the repository.
def fill_in_table(self, table, worksheet, flags): max_row = 0 min_row = sys.maxint for row in table: if len(row) > max_row: max_row = len(row) if len(row) < min_row: min_row = len(row) if max_row != min_row: for row in table: if len(row) < max_row: row.extend([None]*(max_row-len(row)))
Fills in any rows with missing right hand side data with empty cells.
def create_objects(self, raw_objects): types_creations = self.__class__.types_creations early_created_types = self.__class__.early_created_types logger.info("Creating objects...") self.add_self_defined_objects(raw_objects) for o_type in sorted(types_creations): if o_type not in early_created_types: self.create_objects_for_type(raw_objects, o_type) logger.info("Done")
Create all the objects got after the post configuration file initialization :param raw_objects: dict with all object with str values :type raw_objects: dict :return: None
async def get_link_secret_label(self) -> str: LOGGER.debug('Wallet.get_link_secret_label >>>') if not self.handle: LOGGER.debug('Wallet.get_link_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv = None records = await self.get_non_secret(TYPE_LINK_SECRET_LABEL) if records: rv = records[str(max(int(k) for k in records))].value LOGGER.debug('Wallet.get_link_secret_label <<< %s', rv) return rv
Get current link secret label from non-secret storage records; return None for no match. :return: latest non-secret storage record for link secret label
def datefmt_to_regex(datefmt): new_string = datefmt for pat, reg in PATTERN_MATCHNG: new_string = new_string.replace(pat, reg) return re.compile(r'(%s)' % new_string)
Convert a strftime format string to a regex. :param datefmt: strftime format string :type datefmt: ``str`` :returns: Equivalent regex :rtype: ``re.compite``
def _save_trace(self): stack_trace = stack() try: self.trace = [] for frm in stack_trace[5:]: self.trace.insert(0, frm[1:]) finally: del stack_trace
Save current stack trace as formatted string.