code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def warehouse_query(line, cell): "my cell magic" from IPython import get_ipython parts = line.split() w_var_name = parts.pop(0) w = get_ipython().ev(w_var_name) w.query(cell).close()
my cell magic
def umi_histogram(fastq): annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) counter = collections.Counter() for read in read_fastq(fastq): match = parser_re.search(read).groupdict() counter[match['MB']] += 1 for bc, count in counter.most_common(): sys.stdout.write('{}\t{}\n'.format(bc, count))
Counts the number of reads for each UMI Expects formatted fastq files.
def readSB(self, bits): shift = 32 - bits return int32(self.readbits(bits) << shift) >> shift
Read a signed int using the specified number of bits
def unmarshaller(self, typed=True): if typed: return UmxEncoded(self.schema()) else: return RPC.unmarshaller(self, typed)
Get the appropriate XML decoder. @return: Either the (basic|typed) unmarshaller. @rtype: L{UmxTyped}
def write_frame(self): if not hasattr(self, 'mwriter'): raise AssertionError('This plotter has not opened a movie or GIF file.') self.mwriter.append_data(self.image)
Writes a single frame to the movie file
def get_assessment_metadata(self): metadata = dict(self._mdata['assessment']) metadata.update({'existing_id_values': self._my_map['assessmentId']}) return Metadata(**metadata)
Gets the metadata for an assessment. return: (osid.Metadata) - metadata for the assessment *compliance: mandatory -- This method must be implemented.*
def upload_submit(self, upload_request): path = '/api/1.0/upload/save' return self._api_post(definition.DatasetUploadResponse, path, upload_request)
The method is submitting dataset upload
def write(self, data, show_progress=False, invalid_data_behavior='warn'): ctx = maybe_show_progress( data, show_progress=show_progress, item_show_func=lambda e: e if e is None else str(e[0]), label="Merging minute equity files:", ) write_sid = self.write_sid with ctx as it: for e in it: write_sid(*e, invalid_data_behavior=invalid_data_behavior)
Write a stream of minute data. Parameters ---------- data : iterable[(int, pd.DataFrame)] The data to write. Each element should be a tuple of sid, data where data has the following format: columns : ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64 index : DatetimeIndex of market minutes. A given sid may appear more than once in ``data``; however, the dates must be strictly increasing. show_progress : bool, optional Whether or not to show a progress bar while writing.
def formfield(self, **kwargs): defaults = {'form_class': forms.TimeZoneField} defaults.update(**kwargs) return super(TimeZoneField, self).formfield(**defaults)
Returns a custom form field for the TimeZoneField.
def get_resource_bin_session(self, proxy): if not self.supports_resource_bin(): raise errors.Unimplemented() return sessions.ResourceBinSession(proxy=proxy, runtime=self._runtime)
Gets the session for retrieving resource to bin mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.ResourceBinSession) - a ``ResourceBinSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_bin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_bin()`` is ``true``.*
def server_poweroff(host=None, admin_username=None, admin_password=None, module=None): return __execute_cmd('serveraction powerdown', host=host, admin_username=admin_username, admin_password=admin_password, module=module)
Powers down the managed server. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. module The element to power off on the chassis such as a blade. If not provided, the chassis will be powered off. CLI Example: .. code-block:: bash salt dell dracr.server_poweroff salt dell dracr.server_poweroff module=server-1
def pause(self, scaling_group): uri = "/%s/%s/pause" % (self.uri_base, utils.get_id(scaling_group)) resp, resp_body = self.api.method_post(uri) return None
Pauses all execution of the policies for the specified scaling group.
def _install_hiero(use_threaded_wrapper): import hiero import nuke if "--hiero" not in nuke.rawArgs: raise ImportError def threaded_wrapper(func, *args, **kwargs): return hiero.core.executeInMainThreadWithResult( func, args, kwargs) _common_setup("Hiero", threaded_wrapper, use_threaded_wrapper)
Helper function to The Foundry Hiero support
def calc_point_distance(self, chi_coords): chi1_bin, chi2_bin = self.find_point_bin(chi_coords) min_dist = 1000000000 indexes = None for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order: curr_chi1_bin = chi1_bin + chi1_bin_offset curr_chi2_bin = chi2_bin + chi2_bin_offset for idx, bank_chis in \ enumerate(self.bank[curr_chi1_bin][curr_chi2_bin]): dist = coord_utils.calc_point_dist(chi_coords, bank_chis) if dist < min_dist: min_dist = dist indexes = (curr_chi1_bin, curr_chi2_bin, idx) return min_dist, indexes
Calculate distance between point and the bank. Return the closest distance. Parameters ----------- chi_coords : numpy.array The position of the point in the chi coordinates. Returns -------- min_dist : float The smallest **SQUARED** metric distance between the test point and the bank. indexes : The chi1_bin, chi2_bin and position within that bin at which the closest matching point lies.
def find_field_by_name(browser, field_type, name): return ElementSelector( browser, field_xpath(field_type, 'name') % string_literal(name), filter_displayed=True, )
Locate the control input with the given ``name``. :param browser: ``world.browser`` :param string field_type: a field type (i.e. `button`) :param string name: ``name`` attribute Returns: an :class:`ElementSelector`
def get_changelog_file_for_database(database=DEFAULT_DB_ALIAS): from django.conf import settings try: return settings.LIQUIMIGRATE_CHANGELOG_FILES[database] except AttributeError: if database == DEFAULT_DB_ALIAS: try: return settings.LIQUIMIGRATE_CHANGELOG_FILE except AttributeError: raise ImproperlyConfigured( 'Please set LIQUIMIGRATE_CHANGELOG_FILE or ' 'LIQUIMIGRATE_CHANGELOG_FILES in your ' 'project settings') else: raise ImproperlyConfigured( 'LIQUIMIGRATE_CHANGELOG_FILES dictionary setting ' 'is required for multiple databases support') except KeyError: raise ImproperlyConfigured( "Liquibase changelog file is not set for database: %s" % database)
get changelog filename for given `database` DB alias
def _group_sql(self, quote_char=None, groupby_alias=True, **kwargs): clauses = [] selected_aliases = {s.alias for s in self._selects} for field in self._groupbys: if groupby_alias and field.alias and field.alias in selected_aliases: clauses.append("{quote}{alias}{quote}".format( alias=field.alias, quote=quote_char or '', )) else: clauses.append(field.get_sql(quote_char=quote_char, **kwargs)) sql = ' GROUP BY {groupby}'.format(groupby=','.join(clauses)) if self._with_totals: return sql + ' WITH TOTALS' return sql
Produces the GROUP BY part of the query. This is a list of fields. The clauses are stored in the query under self._groupbys as a list fields. If an groupby field is used in the select clause, determined by a matching alias, and the groupby_alias is set True then the GROUP BY clause will use the alias, otherwise the entire field will be rendered as SQL.
def build_target_areas(entry): target_areas = [] areas = str(entry['cap:areaDesc']).split(';') for area in areas: target_areas.append(area.strip()) return target_areas
Cleanup the raw target areas description string
def minutes(self, start_date=None, end_date=None, grouping=None): data = self.__format(start_date, end_date) url = self.base_url + '/minutes' return self.get(url, data=data)
Gets a detailed Report of encoded minutes and billable minutes for a date range. **Warning**: ``start_date`` and ``end_date`` must be ``datetime.date`` objects. Example:: import datetime start = datetime.date(2012, 12, 31) end = datetime.today() data = z.report.minutes(start, end) https://app.zencoder.com/docs/api/reports/minutes
def _get_pattern(self, pys_style): if "bgcolor" not in pys_style: return pattern = xlwt.Pattern() pattern.pattern = xlwt.Pattern.SOLID_PATTERN bgcolor = wx.Colour() bgcolor.SetRGB(pys_style["bgcolor"]) pattern.pattern_fore_colour = self.color2idx(*bgcolor.Get()) return pattern
Returns xlwt.pattern for pyspread style
def _check_if_must_download(request_list, redownload): for request in request_list: request.will_download = (request.save_response or request.return_data) \ and (not request.is_downloaded() or redownload)
Updates request.will_download attribute of each request in request_list. **Note:** the function mutates the elements of the list! :param request_list: a list of ``DownloadRequest`` instances :type: list[DownloadRequest] :param redownload: tells whether to download the data again or not :type: bool
def replace_media(self,src_file,dst_file): with open(dst_file, 'rb') as fh: crc = self.get_file_crc(src_file) self.crc_to_new_media[crc] = fh.read()
Replace one media by another one into a docx This has been done mainly because it is not possible to add images in docx header/footer. With this function, put a dummy picture in your header/footer, then specify it with its replacement in this function Syntax: tpl.replace_media('dummy_media_to_replace.png','media_to_paste.jpg') Note: for images, the aspect ratio will be the same as the replaced image Note2 : it is important to have the source media file as it is required to calculate its CRC to find them in the docx
def try_storage(self, identifier, req, resp, resource, uri_kwargs): if identifier is None: user = None elif self.user_storage is not None: user = self.user_storage.get_user( self, identifier, req, resp, resource, uri_kwargs ) elif self.user_storage is None and not self.only_with_storage: user = { 'identified_with': self, 'identifier': identifier } else: user = None return user
Try to find user in configured user storage object. Args: identifier: User identifier. Returns: user object.
def _handle_tag_removeobject(self): obj = _make_object("RemoveObject") obj.CharacterId = unpack_ui16(self._src) obj.Depth = unpack_ui16(self._src) return obj
Handle the RemoveObject tag.
def change_exteditor(self): path, valid = QInputDialog.getText(self, _('External editor'), _('External editor executable path:'), QLineEdit.Normal, self.get_option('external_editor/path')) if valid: self.set_option('external_editor/path', to_text_string(path))
Change external editor path
def _save_token_cache(self, new_cache): 'Write out to the filesystem a cache of the OAuth2 information.' logging.debug('Looking to write to local authentication cache...') if not self._check_token_cache_type(new_cache): logging.error('Attempt to save a bad value: %s', new_cache) return try: logging.debug('About to write to fs cache file: %s', self.token_cache_file) with open(self.token_cache_file, 'wb') as f: cPickle.dump(new_cache, f, protocol=cPickle.HIGHEST_PROTOCOL) logging.debug('Finished dumping cache_value to fs cache file.') except: logging.exception( 'Could not successfully cache OAuth2 secrets on the file ' 'system.')
Write out to the filesystem a cache of the OAuth2 information.
def _post_clean(self): super()._post_clean() password = self.cleaned_data.get('password1') if password: try: password_validation.validate_password(password, self.instance) except ValidationError as error: self.add_error('password1', error)
Run password validaton after clean methods When clean methods are run, the user instance does not yet exist. To properly compare model values agains the password (in the UserAttributeSimilarityValidator), we wait until we have an instance to compare against. https://code.djangoproject.com/ticket/28127 https://github.com/django/django/pull/8408 Has no effect in Django prior to 1.9 May become unnecessary in Django 2.0 (if this superclass changes)
def stop_threadsafe(self): if self.stopped: return try: self._loop.run_coroutine(self.stop()) except asyncio.TimeoutError: raise TimeoutExpiredError("Timeout stopping task {} with {} subtasks".format(self.name, len(self.subtasks)))
Stop this task from another thread and wait for it to finish. This method must not be called from within the BackgroundEventLoop but will inject self.stop() into the event loop and block until it returns. Raises: TimeoutExpiredError: If the task does not stop in the given timeout specified in __init__()
def set(self, key, val): return self.evolver().set(key, val).persistent()
Return a new PMap with key and val inserted. >>> m1 = m(a=1, b=2) >>> m2 = m1.set('a', 3) >>> m3 = m1.set('c' ,4) >>> m1 pmap({'a': 1, 'b': 2}) >>> m2 pmap({'a': 3, 'b': 2}) >>> m3 pmap({'a': 1, 'c': 4, 'b': 2})
def make_metatiles(size, tiles, date_time=None): groups = defaultdict(list) for tile in tiles: key = tile['layer'] groups[key].append(tile) metatiles = [] for group in groups.itervalues(): parent = _parent_tile(t['coord'] for t in group) metatiles.extend(make_multi_metatile(parent, group, date_time)) return metatiles
Group by layers, and make metatiles out of all the tiles which share those properties relative to the "top level" tile which is parent of them all. Provide a 6-tuple date_time to set the timestamp on each tile within the metatile, or leave it as None to use the current time.
def by_interval_lookup(self, style_key, style_value): style_attr = style_key if self.style_types[style_key] is bool else None intervals = style_value["interval"] def proc(value, result): try: value = float(value) except TypeError: return result for start, end, lookup_value in intervals: if start is None: start = float("-inf") if end is None: end = float("inf") if start <= value < end: if not lookup_value: return result return self.render(style_attr or lookup_value, result) return result return proc
Return a processor for an "interval" style value. Parameters ---------- style_key : str A style key. style_value : dict A dictionary with an "interval" key whose value consists of a sequence of tuples where each tuple should have the form `(start, end, x)`, where start is the start of the interval (inclusive), end is the end of the interval, and x is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function.
def get_scores(self, y_i, y_j): z_scores = self.get_zeta_i_j_given_separate_counts(y_i, y_j) return z_scores
Same function as get_zeta_i_j_given_separate_counts Parameters ---------- y_i, np.array(int) Arrays of word counts of words occurring in positive class y_j, np.array(int) Returns ------- np.array of z-scores
def strip_py(arg): for ext in PY_EXTENSIONS: if arg.endswith(ext): return arg[:-len(ext)] return None
Strip a trailing .py or .pyi suffix. Return None if no such suffix is found.
def _get_representative(self, obj): if obj not in self._parents: self._parents[obj] = obj self._weights[obj] = 1 self._prev_next[obj] = [obj, obj] self._min_values[obj] = obj return obj path = [obj] root = self._parents[obj] while root != path[-1]: path.append(root) root = self._parents[root] for ancestor in path: self._parents[ancestor] = root return root
Finds and returns the root of the set containing `obj`.
def error_asymptotes(pca,**kwargs): ax = kwargs.pop("ax",current_axes()) lon,lat = pca.plane_errors('upper', n=1000) ax.plot(lon,lat,'-') lon,lat = pca.plane_errors('lower', n=1000) ax.plot(lon,lat,'-') ax.plane(*pca.strike_dip())
Plots asymptotic error bounds for hyperbola on a stereonet.
def _collapse_state(args: Dict[str, Any]): index = args['index'] result = args['result'] prob_one = args['prob_one'] state = _state_shard(args) normalization = np.sqrt(prob_one if result else 1 - prob_one) state *= (_one_projector(args, index) * result + (1 - _one_projector(args, index)) * (1 - result)) state /= normalization
Projects state shards onto the appropriate post measurement state. This function makes no assumptions about the interpretation of quantum theory. Args: args: The args from shard_num_args.
def _handle_author(author): lname = author.split(' ') try: auinit = lname[0][0] final = lname[-1].upper() if final in ['JR.', 'III']: aulast = lname[-2].upper() + " " + final.strip(".") else: aulast = final except IndexError: raise ValueError("malformed author name") return aulast, auinit
Yields aulast and auinit from an author's full name. Parameters ---------- author : str or unicode Author fullname, e.g. "Richard L. Nixon". Returns ------- aulast : str Author surname. auinit : str Author first-initial.
def count_sci_extensions(filename): num_sci = 0 extname = 'SCI' hdu_list = fileutil.openImage(filename, memmap=False) for extn in hdu_list: if 'extname' in extn.header and extn.header['extname'] == extname: num_sci += 1 if num_sci == 0: extname = 'PRIMARY' num_sci = 1 hdu_list.close() return num_sci,extname
Return the number of SCI extensions and the EXTNAME from a input MEF file.
def check_power(self): packet = bytearray(16) packet[0] = 1 response = self.send_packet(0x6a, packet) err = response[0x22] | (response[0x23] << 8) if err == 0: payload = self.decrypt(bytes(response[0x38:])) if type(payload[0x4]) == int: if payload[0x4] == 1 or payload[0x4] == 3 or payload[0x4] == 0xFD: state = True else: state = False else: if ord(payload[0x4]) == 1 or ord(payload[0x4]) == 3 or ord(payload[0x4]) == 0xFD: state = True else: state = False return state
Returns the power state of the smart plug.
def OnContextMenu(self, event): self.grid.PopupMenu(self.grid.contextmenu) event.Skip()
Context menu event handler
def get_default_keystore(prefix='AG_'): path = os.environ.get('%s_KEYSTORE_PATH' % prefix, config.keystore.path) storepass = os.environ.get('%s_KEYSTORE_STOREPASS' % prefix, config.keystore.storepass) keypass = os.environ.get('%s_KEYSTORE_KEYPASS' % prefix, config.keystore.keypass) alias = os.environ.get('%s_KEYSTORE_ALIAS' % prefix, config.keystore.alias) return (path, storepass, keypass, alias)
Gets the default keystore information based on environment variables and a prefix. $PREFIX_KEYSTORE_PATH - keystore file path, default is opt/digger/debug.keystore $PREFIX_KEYSTORE_STOREPASS - keystore storepass, default is android $PREFIX_KEYSTORE_KEYPASS - keystore keypass, default is android $PREFIX_KEYSTORE_ALIAS - keystore alias, default is androiddebug :param prefix(str) - A prefix to be used for environment variables, default is AG_. Returns: A tuple containing the keystore information: (path, storepass, keypass, alias)
def from_text(text): value = _by_text.get(text.upper()) if value is None: match = _unknown_class_pattern.match(text) if match == None: raise UnknownRdataclass value = int(match.group(1)) if value < 0 or value > 65535: raise ValueError("class must be between >= 0 and <= 65535") return value
Convert text into a DNS rdata class value. @param text: the text @type text: string @rtype: int @raises dns.rdataclass.UnknownRdataClass: the class is unknown @raises ValueError: the rdata class value is not >= 0 and <= 65535
def get_precision(self): config_str = self.raw_sensor_strings[1].split()[4] bit_base = int(config_str, 16) >> 5 return bit_base + 9
Get the current precision from the sensor. :returns: sensor resolution from 9-12 bits :rtype: int
def find_slot(self, wanted, slots=None): for slot in self.find_slots(wanted, slots): return slot return None
Searches the given slots or, if not given, active hotbar slot, hotbar, inventory, open window in this order. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) Returns: Optional[Slot]: The first slot containing the item or None if not found.
def read_file(path): gen = textfile.read_separated_lines_generator(path, max_columns=6, ignore_lines_starting_with=[';;']) utterances = collections.defaultdict(list) for record in gen: values = record[1:len(record)] for i in range(len(values)): if i == 1 or i == 2 or i == 4: values[i] = float(values[i]) utterances[record[0]].append(values) return utterances
Reads a ctm file. Args: path (str): Path to the file Returns: (dict): Dictionary with entries. Example:: >>> read_file('/path/to/file.txt') { 'wave-ab': [ ['1', 0.00, 0.07, 'HI', 1], ['1', 0.09, 0.08, 'AH', 1] ], 'wave-xy': [ ['1', 0.00, 0.07, 'HI', 1], ['1', 0.09, 0.08, 'AH', 1] ] }
def iter_files(self): if callable(self.file_iter): return self.file_iter() return iter(self.file_iter)
Iterate over files.
def crossover(cross): @functools.wraps(cross) def inspyred_crossover(random, candidates, args): if len(candidates) % 2 == 1: candidates = candidates[:-1] moms = candidates[::2] dads = candidates[1::2] children = [] for i, (mom, dad) in enumerate(zip(moms, dads)): cross.index = i offspring = cross(random, mom, dad, args) for o in offspring: children.append(o) return children inspyred_crossover.single_crossover = cross return inspyred_crossover
Return an inspyred crossover function based on the given function. This function generator takes a function that operates on only two parent candidates to produce an iterable sequence of offspring (typically two). The generator handles the pairing of selected parents and collecting of all offspring. The generated function chooses every odd candidate as a 'mom' and every even as a 'dad' (discounting the last candidate if there is an odd number). For each mom-dad pair, offspring are produced via the `cross` function. The given function ``cross`` must have the following signature:: offspring = cross(random, mom, dad, args) This function is most commonly used as a function decorator with the following usage:: @crossover def cross(random, mom, dad, args): # Implementation of paired crossing pass The generated function also contains an attribute named ``single_crossover`` which holds the original crossover function. In this way, the original single-set-of-parents function can be retrieved if necessary.
def struct(self): data = {} for var, fmap in self._def.items(): if hasattr(self, var): data.update(fmap.get_outputs(getattr(self, var))) return data
XML-RPC-friendly representation of the current object state
def jsonobjlen(self, name, path=Path.rootPath()): return self.execute_command('JSON.OBJLEN', name, str_path(path))
Returns the length of the dictionary JSON value under ``path`` at key ``name``
def send_mail(subject, body, email_from, emails_to): msg = MIMEText(body) msg['Subject'] = subject msg['From'] = email_from msg['To'] = ", ".join(emails_to) s = smtplib.SMTP('smtp.gmail.com', 587) s.ehlo() s.starttls() s.ehlo() s.login(SMTP_USERNAME, SMTP_PASSWORD) s.sendmail(email_from, emails_to, msg.as_string()) s.quit()
Funxtion for sending email though gmail
def init_database(connection=None, dbname=None): connection = connection or connect() dbname = dbname or bigchaindb.config['database']['name'] create_database(connection, dbname) create_tables(connection, dbname)
Initialize the configured backend for use with BigchainDB. Creates a database with :attr:`dbname` with any required tables and supporting indexes. Args: connection (:class:`~bigchaindb.backend.connection.Connection`): an existing connection to use to initialize the database. Creates one if not given. dbname (str): the name of the database to create. Defaults to the database name given in the BigchainDB configuration.
def make_noise_surface(dims=DEFAULT_DIMS, blur=10, seed=None): if seed is not None: np.random.seed(seed) return gaussian_filter(np.random.normal(size=dims), blur)
Makes a surface by generating random noise and blurring it. Args: dims (pair): the dimensions of the surface to create blur (float): the amount of Gaussian blur to apply seed (int): a random seed to use (optional) Returns: surface: A surface.
def push_state(self, new_file=''): 'Saves the current error state to parse subpackages' self.subpackages.append({'detected_type': self.detected_type, 'message_tree': self.message_tree, 'resources': self.pushable_resources, 'metadata': self.metadata}) self.message_tree = {} self.pushable_resources = {} self.metadata = {'requires_chrome': False, 'listed': self.metadata.get('listed'), 'validator_version': validator.__version__} self.package_stack.append(new_file)
Saves the current error state to parse subpackages
async def async_set_operation_mode( self, operation_mode: OperationMode, password: str = '') -> None: await self._protocol.async_execute( SetOpModeCommand(operation_mode), password=password)
Set the operation mode on the base unit. :param operation_mode: the operation mode to change to :param password: if specified, will be used instead of the password property when issuing the command
def startResponse(self, status, headers, excInfo=None): self.status = status self.headers = headers self.reactor.callInThread( responseInColor, self.request, status, headers ) return self.write
extends startResponse to call speakerBox in a thread
def __remove_activity(self, id): query = "select count(*) as count from facts where activity_id = ?" bound_facts = self.fetchone(query, (id,))['count'] if bound_facts > 0: self.execute("UPDATE activities SET deleted = 1 WHERE id = ?", (id,)) else: self.execute("delete from activities where id = ?", (id,))
check if we have any facts with this activity and behave accordingly if there are facts - sets activity to deleted = True else, just remove it
def orientnii(imfile): strorient = ['L-R', 'S-I', 'A-P'] niiorient = [] niixyz = np.zeros(3,dtype=np.int8) if os.path.isfile(imfile): nim = nib.load(imfile) pct = nim.get_data() A = nim.get_sform() for i in range(3): niixyz[i] = np.argmax(abs(A[i,:-1])) niiorient.append( strorient[ niixyz[i] ] ) print niiorient
Get the orientation from NIfTI sform. Not fully functional yet.
def password_get(username=None): password = keyring.get_password('supernova', username) if password is None: split_username = tuple(username.split(':')) msg = ("Couldn't find a credential for {0}:{1}. You need to set one " "with: supernova-keyring -s {0} {1}").format(*split_username) raise LookupError(msg) else: return password.encode('ascii')
Retrieves a password from the keychain based on the environment and configuration parameter pair. If this fails, None is returned.
def body(self): if not self._auto_decode: return self._body if 'body' in self._decode_cache: return self._decode_cache['body'] body = try_utf8_decode(self._body) self._decode_cache['body'] = body return body
Return the Message Body. If auto_decode is enabled, the body will automatically be decoded using decode('utf-8') if possible. :rtype: bytes|str|unicode
def slice_indexer(self, start=None, end=None, step=None, kind=None): start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind) if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step)
For an ordered or unique index, compute the slice indexer for input labels and step. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, default None kind : string, default None Returns ------- indexer : slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples --------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3)
def add_parent(self, parent): parent.add_child(self) self.parent = parent return parent
Adds self as child of parent, then adds parent.
def get_output(self, index): pdata = ctypes.POINTER(mx_uint)() ndim = mx_uint() _check_call(_LIB.MXPredGetOutputShape( self.handle, index, ctypes.byref(pdata), ctypes.byref(ndim))) shape = tuple(pdata[:ndim.value]) data = np.empty(shape, dtype=np.float32) _check_call(_LIB.MXPredGetOutput( self.handle, mx_uint(index), data.ctypes.data_as(mx_float_p), mx_uint(data.size))) return data
Get the index-th output. Parameters ---------- index : int The index of output. Returns ------- out : numpy array. The output array.
def _quadratic_sum_cost(self, state: _STATE) -> float: cost = 0.0 total_len = float(len(self._c)) seqs, _ = state for seq in seqs: cost += (len(seq) / total_len) ** 2 return -cost
Cost function that sums squares of lengths of sequences. Args: state: Search state, not mutated. Returns: Cost which is minus the normalized quadratic sum of each linear sequence section in the state. This promotes single, long linear sequence solutions and converges to number -1. The solution with a lowest cost consists of every node being a single sequence and is always less than 0.
def get_resource_url(resource): path = model_path(resource) parsed = list(urlparse.urlparse(path)) parsed[1] = "" return urlparse.urlunparse(parsed)
Returns the URL for the given resource.
def hpx_to_axes(h, npix): x = h.ebins z = np.arange(npix[-1] + 1) return x, z
Generate a sequence of bin edge vectors corresponding to the axes of a HPX object.
def remove_droppable(self, droppable_id): updated_droppables = [] for droppable in self.my_osid_object_form._my_map['droppables']: if droppable['id'] != droppable_id: updated_droppables.append(droppable) self.my_osid_object_form._my_map['droppables'] = updated_droppables
remove a droppable, given the id
def tail(self, n=5): return MultiIndex([v.tail(n) for v in self.values], self.names)
Return MultiIndex with the last n values in each column. Parameters ---------- n : int Number of values. Returns ------- MultiIndex MultiIndex containing the last n values in each column.
def _printUUID(uuid, detail='word'): if not isinstance(detail, int): detail = detailNum[detail] if detail > detailNum['word']: return uuid if uuid is None: return None return "%s...%s" % (uuid[:4], uuid[-4:])
Return friendly abbreviated string for uuid.
def set_regressor_interface_params(spec, features, output_features): if output_features is None: output_features = [("predicted_class", datatypes.Double())] else: output_features = _fm.process_or_validate_features(output_features, 1) if len(output_features) != 1: raise ValueError("Provided output features for a regressor must be " "one Double feature.") if output_features[0][1] != datatypes.Double(): raise ValueError("Output type of a regressor must be a Double.") prediction_name = output_features[0][0] spec.description.predictedFeatureName = prediction_name features = _fm.process_or_validate_features(features) for cur_input_name, feature_type in features: input_ = spec.description.input.add() input_.name = cur_input_name datatypes._set_datatype(input_.type, feature_type) output_ = spec.description.output.add() output_.name = prediction_name datatypes._set_datatype(output_.type, 'Double') return spec
Common utilities to set the regressor interface params.
def find_parent_id_for_component(self, component_id): response = self.get_record(component_id) if "parent" in response: return (ArchivesSpaceClient.RESOURCE_COMPONENT, response["parent"]["ref"]) elif "resource" in response: return (ArchivesSpaceClient.RESOURCE, response["resource"]["ref"]) else: return (ArchivesSpaceClient.RESOURCE, component_id)
Given the URL to a component, returns the parent component's URL. :param string component_id: The URL of the component. :return: A tuple containing: * The type of the parent record; valid values are ArchivesSpaceClient.RESOURCE and ArchivesSpaceClient.RESOURCE_COMPONENT. * The URL of the parent record. If the provided URL fragment references a resource, this method will simply return the same URL. :rtype tuple:
def _requires_refresh_token(self): expires_on = datetime.datetime.strptime( self.login_data['token']['expiresOn'], '%Y-%m-%dT%H:%M:%SZ') refresh = datetime.datetime.utcnow() + datetime.timedelta(seconds=30) return expires_on < refresh
Check if a refresh of the token is needed
def queryWorkitems(self, query_str, projectarea_id=None, projectarea_name=None, returned_properties=None, archived=False): rp = returned_properties return self.query.queryWorkitems(query_str=query_str, projectarea_id=projectarea_id, projectarea_name=projectarea_name, returned_properties=rp, archived=archived)
Query workitems with the query string in a certain project area At least either of `projectarea_id` and `projectarea_name` is given :param query_str: a valid query string :param projectarea_id: the :class:`rtcclient.project_area.ProjectArea` id :param projectarea_name: the project area name :param returned_properties: the returned properties that you want. Refer to :class:`rtcclient.client.RTCClient` for more explanations :param archived: (default is False) whether the workitems are archived :return: a :class:`list` that contains the queried :class:`rtcclient.workitem.Workitem` objects :rtype: list
def delete(self, refobj): i = self.get_typ_interface(self.get_typ(refobj)) i.delete(refobj) self.delete_refobj(refobj)
Delete the given refobj and the contents of the entity :param refobj: the refobj to delete :type refobj: refobj :returns: None :rtype: None :raises: None
def restricted_to_files(self, filenames: List[str] ) -> 'Spectra': tally_passing = \ {fn: entries for (fn, entries) in self.__tally_passing.items() \ if fn in filenames} tally_failing = \ {fn: entries for (fn, entries) in self.__tally_failing.items() \ if fn in filenames} return Spectra(self.__num_passing, self.__num_failing, tally_passing, tally_failing)
Returns a variant of this spectra that only contains entries for lines that appear in any of the files whose name appear in the given list.
def add_campaign(self, name, device_filter, **kwargs): device_filter = filters.legacy_filter_formatter( dict(filter=device_filter), Device._get_attributes_map() ) campaign = Campaign._create_request_map(kwargs) if 'when' in campaign: campaign['when'] = force_utc(campaign['when']) body = UpdateCampaignPostRequest( name=name, device_filter=device_filter['filter'], **campaign) api = self._get_api(update_service.DefaultApi) return Campaign(api.update_campaign_create(body))
Add new update campaign. Add an update campaign with a name and device filtering. Example: .. code-block:: python device_api, update_api = DeviceDirectoryAPI(), UpdateAPI() # Get a filter to use for update campaign query_obj = device_api.get_query(query_id="MYID") # Create the campaign new_campaign = update_api.add_campaign( name="foo", device_filter=query_obj.filter ) :param str name: Name of the update campaign (Required) :param str device_filter: The device filter to use (Required) :param str manifest_id: ID of the manifest with description of the update :param str description: Description of the campaign :param int scheduled_at: The timestamp at which update campaign is scheduled to start :param str state: The state of the campaign. Values: "draft", "scheduled", "devicefetch", "devicecopy", "publishing", "deploying", "deployed", "manifestremoved", "expired" :return: newly created campaign object :rtype: Campaign
def try_set_count(self, count): check_not_negative(count, "count can't be negative") return self._encode_invoke(count_down_latch_try_set_count_codec, count=count)
Sets the count to the given value if the current count is zero. If count is not zero, this method does nothing and returns ``false``. :param count: (int), the number of times count_down() must be invoked before threads can pass through await(). :return: (bool), ``true`` if the new count was set, ``false`` if the current count is not zero.
def get_product_url(self, force_http=False): base_url = self.base_http_url if force_http else self.base_url return '{}products/{}/{}'.format(base_url, self.date.replace('-', '/'), self.product_id)
Creates base url of product location on AWS. :param force_http: True if HTTP base URL should be used and False otherwise :type force_http: str :return: url of product location :rtype: str
def _update_hasher(hasher, data): if isinstance(data, (tuple, list, zip)): needs_iteration = True elif (util_type.HAVE_NUMPY and isinstance(data, np.ndarray) and data.dtype.kind == 'O'): needs_iteration = True else: needs_iteration = False if needs_iteration: SEP = b'SEP' iter_prefix = b'ITER' iter_ = iter(data) hasher.update(iter_prefix) try: for item in iter_: prefix, hashable = _covert_to_hashable(data) binary_data = SEP + prefix + hashable hasher.update(binary_data) except TypeError: _update_hasher(hasher, item) for item in iter_: hasher.update(SEP) _update_hasher(hasher, item) else: prefix, hashable = _covert_to_hashable(data) binary_data = prefix + hashable hasher.update(binary_data)
This is the clear winner over the generate version. Used by hash_data Ignore: import utool rng = np.random.RandomState(0) # str1 = rng.rand(0).dumps() str1 = b'SEP' str2 = rng.rand(10000).dumps() for timer in utool.Timerit(100, label='twocall'): hasher = hashlib.sha256() with timer: hasher.update(str1) hasher.update(str2) a = hasher.hexdigest() for timer in utool.Timerit(100, label='concat'): hasher = hashlib.sha256() with timer: hasher.update(str1 + str2) b = hasher.hexdigest() assert a == b # CONCLUSION: Faster to concat in case of prefixes and seps nested_data = {'1': [rng.rand(100), '2', '3'], '2': ['1', '2', '3', '4', '5'], '3': [('1', '2'), ('3', '4'), ('5', '6')]} data = list(nested_data.values()) for timer in utool.Timerit(1000, label='cat-generate'): hasher = hashlib.sha256() with timer: hasher.update(b''.join(_bytes_generator(data))) for timer in utool.Timerit(1000, label='inc-generate'): hasher = hashlib.sha256() with timer: for b in _bytes_generator(data): hasher.update(b) for timer in utool.Timerit(1000, label='inc-generate'): hasher = hashlib.sha256() with timer: for b in _bytes_generator(data): hasher.update(b) for timer in utool.Timerit(1000, label='chunk-inc-generate'): hasher = hashlib.sha256() import ubelt as ub with timer: for chunk in ub.chunks(_bytes_generator(data), 5): hasher.update(b''.join(chunk)) for timer in utool.Timerit(1000, label='inc-update'): hasher = hashlib.sha256() with timer: _update_hasher(hasher, data) data = ut.lorium_ipsum() hash_data(data) ut.hashstr27(data) %timeit hash_data(data) %timeit ut.hashstr27(repr(data)) for timer in utool.Timerit(100, label='twocall'): hasher = hashlib.sha256() with timer: hash_data(data) hasher = hashlib.sha256() hasher.update(memoryview(np.array([1]))) print(hasher.hexdigest()) hasher = hashlib.sha256() hasher.update(np.array(['1'], dtype=object)) print(hasher.hexdigest())
def safe_compare_digest(val1, val2): if len(val1) != len(val2): return False result = 0 if PY3 and isinstance(val1, bytes) and isinstance(val2, bytes): for i, j in zip(val1, val2): result |= i ^ j else: for i, j in zip(val1, val2): result |= (ord(i) ^ ord(j)) return result == 0
safe_compare_digest method. :param val1: string or bytes for compare :type val1: str | bytes :param val2: string or bytes for compare :type val2: str | bytes
def normnorm(self): n = self.norm() return V2(-self.y / n, self.x / n)
Return a vecor noraml to this one with a norm of one :return: V2
def get_default_base_name(self, viewset): queryset = getattr(viewset, 'queryset', None) if queryset is not None: get_url_name = getattr(queryset.model, 'get_url_name', None) if get_url_name is not None: return get_url_name() return super(SortedDefaultRouter, self).get_default_base_name(viewset)
Attempt to automatically determine base name using `get_url_name`.
def __step4(self): step = 0 done = False row = -1 col = -1 star_col = -1 while not done: (row, col) = self.__find_a_zero() if row < 0: done = True step = 6 else: self.marked[row][col] = 2 star_col = self.__find_star_in_row(row) if star_col >= 0: col = star_col self.row_covered[row] = True self.col_covered[col] = False else: done = True self.Z0_r = row self.Z0_c = col step = 5 return step
Find a noncovered zero and prime it. If there is no starred zero in the row containing this primed zero, Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero. Continue in this manner until there are no uncovered zeros left. Save the smallest uncovered value and Go to Step 6.
def _unregister_service(self): if self._registration is not None: try: self._registration.unregister() except BundleException as ex: logger = logging.getLogger( "-".join((self._ipopo_instance.name, "ServiceRegistration")) ) logger.error("Error unregistering a service: %s", ex) self._ipopo_instance.safe_callback( ipopo_constants.IPOPO_CALLBACK_POST_UNREGISTRATION, self._svc_reference, ) self._registration = None self._svc_reference = None
Unregisters the provided service, if needed
def backness(self, value): if (value is not None) and (not value in DG_V_BACKNESS): raise ValueError("Unrecognized value for backness: '%s'" % value) self.__backness = value
Set the backness of the vowel. :param str value: the value to be set
def _op(self, line, op=None, offset=0): if op is None: op = self.op_count[line] return "line{}_gate{}".format(line, op + offset)
Returns the gate name for placing a gate on a line. :param int line: Line number. :param int op: Operation number or, by default, uses the current op count. :return: Gate name. :rtype: string
def get_version(): if PackageHelper.__version: return PackageHelper.__version PackageHelper.__version = "Unknown" file = os.path.realpath(__file__) folder = os.path.dirname(file) try: semver = open(folder + "/../../.semver", "r") PackageHelper.__version = semver.read().rstrip() semver.close() return PackageHelper.__version except: pass try: distribution = pkg_resources.get_distribution(PackageHelper.get_alias()) if distribution.version: PackageHelper.__version = distribution.version return PackageHelper.__version except: pass return PackageHelper.__version
Get the version number of this package. Returns: str: The version number (marjor.minor.patch). Note: When this package is installed, the version number will be available through the package resource details. Otherwise this method will look for a ``.semver`` file. Note: In rare cases corrupt installs can cause the version number to be unknown. In this case the version number will be set to the string "Unknown".
def create(self, query_name, saved_query): url = "{0}/{1}".format(self.saved_query_url, query_name) payload = saved_query if not isinstance(payload, str): payload = json.dumps(saved_query) response = self._get_json(HTTPMethods.PUT, url, self._get_master_key(), data=payload) return response
Creates the saved query via a PUT request to Keen IO Saved Query endpoint. Master key must be set.
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): super().save(force_insert, force_update, using, update_fields) if self.main_language: TransLanguage.objects.exclude(pk=self.pk).update(main_language=False)
Overwrite of the save method in order that when setting the language as main we deactivate any other model selected as main before :param force_insert: :param force_update: :param using: :param update_fields: :return:
def get_outputs(self, input_value): output_value = self.convert_to_xmlrpc(input_value) output = {} for name in self.output_names: output[name] = output_value return output
Generate a set of output values for a given input.
def which(self): if self.binary is None: return None return which(self.binary, path=self.env_path)
Figure out which binary this will execute. Returns None if the binary is not found.
def get_access_control_function(): fn_path = getattr(settings, 'ROSETTA_ACCESS_CONTROL_FUNCTION', None) if fn_path is None: return is_superuser_staff_or_in_translators_group perm_module, perm_func = fn_path.rsplit('.', 1) perm_module = importlib.import_module(perm_module) return getattr(perm_module, perm_func)
Return a predicate for determining if a user can access the Rosetta views
def tag_dssp_solvent_accessibility(self, force=False): tagged = ['dssp_acc' in x.tags.keys() for x in self._monomers] if (not all(tagged)) or force: dssp_out = run_dssp(self.pdb, path=False) if dssp_out is None: return dssp_acc_list = extract_solvent_accessibility_dssp( dssp_out, path=False) for monomer, dssp_acc in zip(self._monomers, dssp_acc_list): monomer.tags['dssp_acc'] = dssp_acc[-1] return
Tags each `Residues` Polymer with its solvent accessibility. Notes ----- For more about DSSP's solvent accessibilty metric, see: http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC References ---------- .. [1] Kabsch W, Sander C (1983) "Dictionary of protein secondary structure: pattern recognition of hydrogen-bonded and geometrical features", Biopolymers, 22, 2577-637. Parameters ---------- force : bool, optional If `True` the tag will be run even if `Residues` are already tagged.
def alloc_vpcid(nexus_ips): LOG.debug("alloc_vpc() called") vpc_id = 0 intersect = _get_free_vpcids_on_switches(nexus_ips) for intersect_tuple in intersect: try: update_vpc_entry(nexus_ips, intersect_tuple.vpc_id, False, True) vpc_id = intersect_tuple.vpc_id break except Exception: LOG.exception( "This exception is expected if another controller " "beat us to vpcid %(vpcid)s for nexus %(ip)s", {'vpcid': intersect_tuple.vpc_id, 'ip': ', '.join(map(str, nexus_ips))}) return vpc_id
Allocate a vpc id for the given list of switch_ips.
def temperature_data_from_csv( filepath_or_buffer, tz=None, date_col="dt", temp_col="tempF", gzipped=False, freq=None, **kwargs ): read_csv_kwargs = { "usecols": [date_col, temp_col], "dtype": {temp_col: np.float64}, "parse_dates": [date_col], "index_col": date_col, } if gzipped: read_csv_kwargs.update({"compression": "gzip"}) read_csv_kwargs.update(kwargs) if tz is None: tz = "UTC" df = pd.read_csv(filepath_or_buffer, **read_csv_kwargs).tz_localize(tz) if freq == "hourly": df = df.resample("H").sum() return df[temp_col]
Load temperature data from a CSV file. Default format:: dt,tempF 2017-01-01T00:00:00+00:00,21 2017-01-01T01:00:00+00:00,22.5 2017-01-01T02:00:00+00:00,23.5 Parameters ---------- filepath_or_buffer : :any:`str` or file-handle File path or object. tz : :any:`str`, optional E.g., ``'UTC'`` or ``'US/Pacific'`` date_col : :any:`str`, optional, default ``'dt'`` Date period start column. temp_col : :any:`str`, optional, default ``'tempF'`` Temperature column. gzipped : :any:`bool`, optional Whether file is gzipped. freq : :any:`str`, optional If given, apply frequency to data using :any:`pandas.Series.resample`. **kwargs Extra keyword arguments to pass to :any:`pandas.read_csv`, such as ``sep='|'``.
def get_task_module(feature): try: importlib.import_module(feature) except ImportError: raise FeatureNotFound(feature) tasks_module = None try: tasks_module = importlib.import_module(feature + '.apetasks') except ImportError: pass try: tasks_module = importlib.import_module(feature + '.tasks') except ImportError: pass return tasks_module
Return imported task module of feature. This function first tries to import the feature and raises FeatureNotFound if that is not possible. Thereafter, it looks for a submodules called ``apetasks`` and ``tasks`` in that order. If such a submodule exists, it is imported and returned. :param feature: name of feature to fet task module for. :raises: FeatureNotFound if feature_module could not be imported. :return: imported module containing the ape tasks of feature or None, if module cannot be imported.
def save_html(out_file, plot_html): internal_open = False if type(out_file) == str: out_file = open(out_file, "w") internal_open = True out_file.write("<html><head><script>\n") bundle_path = os.path.join(os.path.split(__file__)[0], "resources", "bundle.js") with io.open(bundle_path, encoding="utf-8") as f: bundle_data = f.read() out_file.write(bundle_data) out_file.write("</script></head><body>\n") out_file.write(plot_html.data) out_file.write("</body></html>\n") if internal_open: out_file.close()
Save html plots to an output file.
def remove(name, rc_file='~/.odoorpcrc'): conf = ConfigParser() conf.read([os.path.expanduser(rc_file)]) if not conf.has_section(name): raise ValueError( "'%s' session does not exist in %s" % (name, rc_file)) conf.remove_section(name) with open(os.path.expanduser(rc_file), 'wb') as file_: conf.write(file_)
Remove the session configuration identified by `name` from the `rc_file` file. >>> import odoorpc >>> odoorpc.session.remove('foo') # doctest: +SKIP .. doctest:: :hide: >>> import odoorpc >>> session = '%s_session' % DB >>> odoorpc.session.remove(session) :raise: `ValueError` (wrong session name)
def results_history(history_log, no_color): try: with open(history_log, 'r') as f: lines = f.readlines() except Exception as error: echo_style( 'Unable to process results history log: %s' % error, no_color, fg='red' ) sys.exit(1) index = len(lines) for item in lines: click.echo('{} {}'.format(index, item), nl=False) index -= 1
Display a list of ipa test results history.
def to_bytes(self): raw = b'' if not self._options: return raw for ipopt in self._options: raw += ipopt.to_bytes() padbytes = 4 - (len(raw) % 4) raw += b'\x00'*padbytes return raw
Takes a list of IPOption objects and returns a packed byte string of options, appropriately padded if necessary.
def changed(self, message=None, *args): if message is not None: self.logger.debug('%s: %s', self._repr(), message % args) self.logger.debug('%s: changed', self._repr()) if self.parent is not None: self.parent.changed() elif isinstance(self, Mutable): super(TrackedObject, self).changed()
Marks the object as changed. If a `parent` attribute is set, the `changed()` method on the parent will be called, propagating the change notification up the chain. The message (if provided) will be debug logged.