code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def httpapi_request(client, **params) -> 'Response': return requests.get( _HTTPAPI, params={ 'client': client.name, 'clientver': client.version, 'protover': 1, **params })
Send a request to AniDB HTTP API. https://wiki.anidb.net/w/HTTP_API_Definition
def update_glances(self): try: server_stats = json.loads(self.client.getAll()) except socket.error: return "Disconnected" except Fault: return "Disconnected" else: self.stats.update(server_stats) return "Connected"
Get stats from Glances server. Return the client/server connection status: - Connected: Connection OK - Disconnected: Connection NOK
def findViewsWithAttribute(self, attr, val, root="ROOT"): return self.__findViewsWithAttributeInTree(attr, val, root)
Finds the Views with the specified attribute and value. This allows you to see all items that match your criteria in the view hierarchy Usage: buttons = v.findViewsWithAttribute("class", "android.widget.Button")
def match(self, dom, act): return self.match_domain(dom) and self.match_action(act)
Check if the given `domain` and `act` are allowed by this capability
def parse_time(time): unit = time[-1] if unit not in ['s', 'm', 'h', 'd']: print_error('the unit of time could only from {s, m, h, d}') exit(1) time = time[:-1] if not time.isdigit(): print_error('time format error!') exit(1) parse_dict = {'s':1, 'm':60, 'h':3600, 'd':86400} return int(time) * parse_dict[unit]
Change the time to seconds
def sampleLocation(self): areaRatio = self.radius / (self.radius + self.height) if random.random() < areaRatio: return self._sampleLocationOnDisc() else: return self._sampleLocationOnSide()
Simple method to sample uniformly from a cylinder.
def output_channels(self): if callable(self._output_channels): self._output_channels = self._output_channels() self._output_channels = int(self._output_channels) return self._output_channels
Returns the number of output channels.
def edit_ticket_links(self, ticket_id, **kwargs): post_data = '' for key in kwargs: post_data += "{}: {}\n".format(key, str(kwargs[key])) msg = self.__request('ticket/{}/links'.format(str(ticket_id), ), post_data={'content': post_data}) state = msg.split('\n')[2] return self.RE_PATTERNS['links_updated_pattern'].match(state) is not None
Edit ticket links. .. warning:: This method is deprecated in favour of edit_link method, because there exists bug in RT 3.8 REST API causing mapping created links to ticket/1. The only drawback is that edit_link cannot process multiple links all at once. :param ticket_id: ID of ticket to edit :keyword kwargs: Other arguments possible to set: DependsOn, DependedOnBy, RefersTo, ReferredToBy, Members, MemberOf. Each value should be either ticker ID or external link. Int types are converted. Use empty string as value to delete existing link. :returns: ``True`` Operation was successful ``False`` Ticket with given ID does not exist or unknown parameter was set (in this case all other valid fields are changed)
def to_dict(self): return { 'mean': self.mean, 'var': self.var, 'min': self.min, 'max': self.max, 'num': self.num }
Return the stats as a dictionary.
def get_duration(self, matrix_name): duration = 0.0 if matrix_name in self.data: duration = sum([stage.duration() for stage in self.data[matrix_name]]) return duration
Get duration for a concrete matrix. Args: matrix_name (str): name of the Matrix. Returns: float: duration of concrete matrix in seconds.
def move(self, folder): if self.object_id is None: raise RuntimeError('Attempting to move an unsaved Message') url = self.build_url( self._endpoints.get('move_message').format(id=self.object_id)) if isinstance(folder, str): folder_id = folder else: folder_id = getattr(folder, 'folder_id', None) if not folder_id: raise RuntimeError('Must Provide a valid folder_id') data = {self._cc('destinationId'): folder_id} response = self.con.post(url, data=data) if not response: return False self.folder_id = folder_id return True
Move the message to a given folder :param folder: Folder object or Folder id or Well-known name to move this message to :type folder: str or mailbox.Folder :return: Success / Failure :rtype: bool
def _decode(image_data): from ...data_structures.sarray import SArray as _SArray from ... import extensions as _extensions if type(image_data) is _SArray: return _extensions.decode_image_sarray(image_data) elif type(image_data) is _Image: return _extensions.decode_image(image_data)
Internal helper function for decoding a single Image or an SArray of Images
def interval_tree(intervals): if intervals == []: return None center = intervals[len(intervals) // 2][0] L = [] R = [] C = [] for I in intervals: if I[1] <= center: L.append(I) elif center < I[0]: R.append(I) else: C.append(I) by_low = sorted((I[0], I) for I in C) by_high = sorted((I[1], I) for I in C) IL = interval_tree(L) IR = interval_tree(R) return _Node(center, by_low, by_high, IL, IR)
Construct an interval tree :param intervals: list of half-open intervals encoded as value pairs *[left, right)* :assumes: intervals are lexicographically ordered ``>>> assert intervals == sorted(intervals)`` :returns: the root of the interval tree :complexity: :math:`O(n)`
def struct_member_error(err, sid, name, offset, size): exception, msg = STRUCT_ERROR_MAP[err] struct_name = idc.GetStrucName(sid) return exception(('AddStructMember(struct="{}", member="{}", offset={}, size={}) ' 'failed: {}').format( struct_name, name, offset, size, msg ))
Create and format a struct member exception. Args: err: The error value returned from struct member creation sid: The struct id name: The member name offset: Memeber offset size: Member size Returns: A ``SarkErrorAddStructMemeberFailed`` derivative exception, with an informative message.
def getTicker(pair, connection=None, info=None): if info is not None: info.validate_pair(pair) if connection is None: connection = common.BTCEConnection() response = connection.makeJSONRequest("/api/3/ticker/%s" % pair) if type(response) is not dict: raise TypeError("The response is a %r, not a dict." % type(response)) elif u'error' in response: print("There is a error \"%s\" while obtaining ticker %s" % (response['error'], pair)) ticker = None else: ticker = Ticker(**response[pair]) return ticker
Retrieve the ticker for the given pair. Returns a Ticker instance.
def list(path, ext=None, start=None, stop=None, recursive=False): files = listflat(path, ext) if not recursive else listrecursive(path, ext) if len(files) < 1: raise FileNotFoundError('Cannot find files of type "%s" in %s' % (ext if ext else '*', path)) files = select(files, start, stop) return files
Get sorted list of file paths matching path and extension
def check_future(self, fut): done = self.done = fut.done() if done and not self.prev_done: self.done_since = self.ioloop.time() self.prev_done = done
Call with each future that is to be yielded on
def get_stoch(self, symbol, interval='daily', fastkperiod=None, slowkperiod=None, slowdperiod=None, slowkmatype=None, slowdmatype=None): _FUNCTION_KEY = "STOCH" return _FUNCTION_KEY, 'Technical Analysis: STOCH', 'Meta Data'
Return the stochatic oscillator values in two json objects as data and meta_data. It raises ValueError when problems arise Keyword Arguments: symbol: the symbol for the equity we want to get its data interval: time interval between two conscutive values, supported values are '1min', '5min', '15min', '30min', '60min', 'daily', 'weekly', 'monthly' (default 'daily') fastkperiod: The time period of the fastk moving average. Positive integers are accepted (default=None) slowkperiod: The time period of the slowk moving average. Positive integers are accepted (default=None) slowdperiod: The time period of the slowd moving average. Positive integers are accepted (default=None) slowkmatype: Moving average type for the slowk moving average. By default, fastmatype=0. Integers 0 - 8 are accepted (check down the mappings) or the string containing the math type can also be used. slowdmatype: Moving average type for the slowd moving average. By default, slowmatype=0. Integers 0 - 8 are accepted (check down the mappings) or the string containing the math type can also be used. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA)
def thumbnail(parser, token): thumb = None if SORL: try: thumb = sorl_thumb(parser, token) except Exception: thumb = False if EASY and not thumb: thumb = easy_thumb(parser, token) return thumb
This template tag supports both syntax for declare thumbanil in template
def context_menu_requested(self, event): if self.fig: pos = QPoint(event.x(), event.y()) context_menu = QMenu(self) context_menu.addAction(ima.icon('editcopy'), "Copy Image", self.copy_figure, QKeySequence( get_shortcut('plots', 'copy'))) context_menu.popup(self.mapToGlobal(pos))
Popup context menu.
async def spawn_slaves(self, slave_addrs, slave_env_cls, slave_mgr_cls, slave_kwargs=None): pool, r = spawn_containers(slave_addrs, env_cls=slave_env_cls, env_params=slave_kwargs, mgr_cls=slave_mgr_cls) self._pool = pool self._r = r self._manager_addrs = ["{}{}".format(_get_base_url(a), 0) for a in slave_addrs]
Spawn slave environments. :param slave_addrs: List of (HOST, PORT) addresses for the slave-environments. :param slave_env_cls: Class for the slave environments. :param slave_kwargs: If not None, must be a list of the same size as *addrs*. Each item in the list containing parameter values for one slave environment. :param slave_mgr_cls: Class of the slave environment managers.
def get_fit_failed_candidate_model(model_type, formula): warnings = [ EEMeterWarning( qualified_name="eemeter.caltrack_daily.{}.model_results".format(model_type), description=( "Error encountered in statsmodels.formula.api.ols method. (Empty data?)" ), data={"traceback": traceback.format_exc()}, ) ] return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status="ERROR", warnings=warnings )
Return a Candidate model that indicates the fitting routine failed. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). formula : :any:`float` The candidate model formula. Returns ------- candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Candidate model instance with status ``'ERROR'``, and warning with traceback.
def by_prefix(self, prefix, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.zipcode.name, ascending=True, returns=DEFAULT_LIMIT): return self.query( prefix=prefix, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
Search zipcode information by first N digits. Returns multiple results.
def search_image(name=None, path=['.']): name = strutils.decode(name) for image_dir in path: if not os.path.isdir(image_dir): continue image_dir = strutils.decode(image_dir) image_path = os.path.join(image_dir, name) if os.path.isfile(image_path): return strutils.encode(image_path) for image_path in list_all_image(image_dir): if not image_name_match(name, image_path): continue return strutils.encode(image_path) return None
look for the image real path, if name is None, then return all images under path. @return system encoded path string FIXME(ssx): this code is just looking wired.
def command(self, name=None): def decorator(f): self.add_command(f, name) return f return decorator
A decorator to add subcommands.
def setup_step_out(self, frame): self.frame_calling = None self.frame_stop = None self.frame_return = frame.f_back self.frame_suspend = False self.pending_stop = True return
Setup debugger for a "stepOut"
def get_command(arguments): cmds = list(filter(lambda k: not (k.startswith('-') or k.startswith('<')) and arguments[k], arguments.keys())) if len(cmds) != 1: raise Exception('invalid command line!') return cmds[0]
Utility function to extract command from docopt arguments. :param arguments: :return: command
def filenames(self): return [os.path.basename(source) for source in self.sources if source]
Assuming sources are paths to VCF or MAF files, trim their directory path and return just the file names.
def get_config_var_data(self, index, offset): if index == 0 or index > len(self.config_database.entries): return [Error.INVALID_ARRAY_KEY, b''] entry = self.config_database.entries[index - 1] if not entry.valid: return [ConfigDatabaseError.OBSOLETE_ENTRY, b''] if offset >= len(entry.data): return [Error.INVALID_ARRAY_KEY, b''] data_chunk = entry.data[offset:offset + 16] return [Error.NO_ERROR, data_chunk]
Get a chunk of data for a config variable.
def expect_file_hash_to_equal(self, value, hash_alg='md5', result_format=None, include_config=False, catch_exceptions=None, meta=None): success = False try: hash = hashlib.new(hash_alg) BLOCKSIZE = 65536 try: with open(self._path, 'rb') as file: file_buffer = file.read(BLOCKSIZE) while file_buffer: hash.update(file_buffer) file_buffer = file.read(BLOCKSIZE) success = hash.hexdigest() == value except IOError: raise except ValueError: raise return {"success":success}
Expect computed file hash to equal some given value. Args: value: A string to compare with the computed hash value Keyword Args: hash_alg (string): Indicates the hash algorithm to use result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
def relocate_instance(part, target_parent, name=None, include_children=True): if not name: name = "CLONE - {}".format(part.name) part_model = part.model() target_parent_model = target_parent.model() relocate_model(part=part_model, target_parent=target_parent_model, name=part_model.name, include_children=include_children) if include_children: part.populate_descendants() moved_instance = move_part_instance(part_instance=part, target_parent=target_parent, part_model=part_model, name=name, include_children=include_children) return moved_instance
Move the `Part` instance to target parent. .. versionadded:: 2.3 :param part: `Part` object to be moved :type part: :class:`Part` :param target_parent: `Part` object under which the desired `Part` is moved :type target_parent: :class:`Part` :param name: how the moved top-level `Part` should be called :type name: basestring :param include_children: True to move also the descendants of `Part`. If False, the children will be lost. :type include_children: bool :return: moved :class: `Part` instance
def app_start_service(self, *args) -> None: _, error = self._execute('-s', self.device_sn, 'shell', 'am', 'startservice', *args) if error and error.startswith('Error'): raise ApplicationsException(error.split(':', 1)[-1].strip())
Start a service.
def create_ini(self, board, project_dir='', sayyes=False): project_dir = util.check_dir(project_dir) ini_path = util.safe_join(project_dir, PROJECT_FILENAME) boards = Resources().boards if board not in boards.keys(): click.secho( 'Error: no such board \'{}\''.format(board), fg='red') sys.exit(1) if isfile(ini_path): if sayyes: self._create_ini_file(board, ini_path, PROJECT_FILENAME) else: click.secho( 'Warning: {} file already exists'.format(PROJECT_FILENAME), fg='yellow') if click.confirm('Do you want to replace it?'): self._create_ini_file(board, ini_path, PROJECT_FILENAME) else: click.secho('Abort!', fg='red') else: self._create_ini_file(board, ini_path, PROJECT_FILENAME)
Creates a new apio project file
def emit(signal, *args, **kwargs): if signal not in __receivers: return receivers = __live_receivers(signal) for func in receivers: func(*args, **kwargs)
Emit a signal by serially calling each registered signal receiver for the `signal`. Note: The receiver must accept the *args and/or **kwargs that have been passed to it. There expected parameters are not dictated by mixbox. Args: signal: A signal identifier or name. *args: A variable-length argument list to pass to the receiver. **kwargs: Keyword-arguments to pass to the receiver.
def check_failhard(self, low, running): tag = _gen_tag(low) if self.opts.get('test', False): return False if low.get('failhard', self.opts['failhard']) and tag in running: if running[tag]['result'] is None: return False return not running[tag]['result'] return False
Check if the low data chunk should send a failhard signal
def _convert(lines): def parse(line): line = line.replace("from PySide2 import", "from Qt import QtCompat,") line = line.replace("QtWidgets.QApplication.translate", "QtCompat.translate") if "QtCore.SIGNAL" in line: raise NotImplementedError("QtCore.SIGNAL is missing from PyQt5 " "and so Qt.py does not support it: you " "should avoid defining signals inside " "your ui files.") return line parsed = list() for line in lines: line = parse(line) parsed.append(line) return parsed
Convert compiled .ui file from PySide2 to Qt.py Arguments: lines (list): Each line of of .ui file Usage: >> with open("myui.py") as f: .. lines = _convert(f.readlines())
def map_resnum_a_to_resnum_b(resnums, a_aln, b_aln): resnums = ssbio.utils.force_list(resnums) aln_df = get_alignment_df(a_aln, b_aln) maps = aln_df[aln_df.id_a_pos.isin(resnums)] able_to_map_to_b = maps[pd.notnull(maps.id_b_pos)] successful_map_from_a = able_to_map_to_b.id_a_pos.values.tolist() mapping = dict([(int(a), int(b)) for a,b in zip(able_to_map_to_b.id_a_pos, able_to_map_to_b.id_b_pos)]) cant_map = list(set(resnums).difference(successful_map_from_a)) if len(cant_map) > 0: log.warning('Unable to map residue numbers {} in first sequence to second'.format(cant_map)) return mapping
Map a residue number in a sequence to the corresponding residue number in an aligned sequence. Examples: >>> map_resnum_a_to_resnum_b([1,2,3], '--ABCDEF', 'XXABCDEF') {1: 3, 2: 4, 3: 5} >>> map_resnum_a_to_resnum_b(5, '--ABCDEF', 'XXABCDEF') {5: 7} >>> map_resnum_a_to_resnum_b(5, 'ABCDEF', 'ABCD--') {} >>> map_resnum_a_to_resnum_b(5, 'ABCDEF--', 'ABCD--GH') {} >>> map_resnum_a_to_resnum_b([9,10], '--MKCDLHRLE-E', 'VSNEYSFEGYKLD') {9: 11, 10: 13} Args: resnums (int, list): Residue number or numbers in the first aligned sequence a_aln (str, Seq, SeqRecord): Aligned sequence string b_aln (str, Seq, SeqRecord): Aligned sequence string Returns: int: Residue number in the second aligned sequence
def translate_message_tokens(message_tokens): trans_tokens = [] if message_tokens[0] in cv_dict[channels_key]: trans_tokens.append(cv_dict[channels_key][message_tokens[0]]) else: trans_tokens.append(int(message_tokens[0])) for token in message_tokens[1:]: if token in cv_dict[values_key]: trans_tokens.extend(cv_dict[values_key][token]) else: trans_tokens.append(int(token)) return trans_tokens
Translates alias references to their defined values. The first token is a channel alias. The remaining tokens are value aliases.
def get_mouse_pos(self, window_pos=None): window_pos = window_pos or pygame.mouse.get_pos() window_pt = point.Point(*window_pos) + 0.5 for surf in reversed(self._surfaces): if (surf.surf_type != SurfType.CHROME and surf.surf_rect.contains_point(window_pt)): surf_rel_pt = window_pt - surf.surf_rect.tl world_pt = surf.world_to_surf.back_pt(surf_rel_pt) return MousePos(world_pt, surf)
Return a MousePos filled with the world position and surf it hit.
def cdhit_from_seqs(seqs, moltype, params=None): seqs = SequenceCollection(seqs, MolType=moltype) if params is None: params = {} if '-o' not in params: _, params['-o'] = mkstemp() working_dir = mkdtemp() if moltype is PROTEIN: app = CD_HIT(WorkingDir=working_dir, params=params) elif moltype is RNA: app = CD_HIT_EST(WorkingDir=working_dir, params=params) elif moltype is DNA: app = CD_HIT_EST(WorkingDir=working_dir, params=params) else: raise ValueError, "Moltype must be either PROTEIN, RNA, or DNA" res = app(seqs.toFasta()) new_seqs = dict(parse_fasta(res['FASTA'])) res.cleanUp() shutil.rmtree(working_dir) remove(params['-o'] + '.bak.clstr') return SequenceCollection(new_seqs, MolType=moltype)
Returns the CD-HIT results given seqs seqs : dict like collection of sequences moltype : cogent.core.moltype object params : cd-hit parameters NOTE: This method will call CD_HIT if moltype is PROTIEN, CD_HIT_EST if moltype is RNA/DNA, and raise if any other moltype is passed.
def natural_name(self): try: assert self._natural_name is not None except (AssertionError, AttributeError): self._natural_name = self.attrs["name"] finally: return self._natural_name
Natural name.
def uriunsplit(parts): scheme, authority, path, query, fragment = parts if isinstance(path, bytes): result = SplitResultBytes else: result = SplitResultUnicode return result(scheme, authority, path, query, fragment).geturi()
Combine the elements of a five-item iterable into a URI reference's string representation.
def ctor_args(self): return dict( config=self._config, search=self._search, echo=self._echo, read_only=self.read_only )
Return arguments for constructing a copy
def create_log2fc_bigwigs(matrix, outdir, args): call("mkdir -p {}".format(outdir), shell=True) genome_size_dict = args.chromosome_sizes outpaths = [] for bed_file in matrix[args.treatment]: outpath = join(outdir, splitext(basename(bed_file))[0] + "_log2fc.bw") outpaths.append(outpath) data = create_log2fc_data(matrix, args) Parallel(n_jobs=args.number_cores)(delayed(_create_bigwig)(bed_column, outpath, genome_size_dict) for outpath, bed_column in zip(outpaths, data))
Create bigwigs from matrix.
def delete_all(self, criteria: Q = None): if criteria: items = self._filter(criteria, self.conn['data'][self.schema_name]) with self.conn['lock']: for identifier in items: self.conn['data'][self.schema_name].pop(identifier, None) return len(items) else: with self.conn['lock']: if self.schema_name in self.conn['data']: del self.conn['data'][self.schema_name]
Delete the dictionary object by its criteria
def value(self, value): if value is None or ( self.ptype is None or isinstance(value, self.ptype) ): self._value = value else: error = TypeError( 'Wrong value type of {0} ({1}). {2} expected.'.format( self.name, value, self.ptype ) ) self._error = error raise error
Change of parameter value. If an error occured, it is stored in this error attribute. :param value: new value to use. If input value is not an instance of self.ptype, self error :raises: TypeError if input value is not an instance of self ptype.
def find_stateless_by_name(name): try: dsa_app = StatelessApp.objects.get(app_name=name) return dsa_app.as_dash_app() except: pass dash_app = get_stateless_by_name(name) dsa_app = StatelessApp(app_name=name) dsa_app.save() return dash_app
Find stateless app given its name First search the Django ORM, and if not found then look the app up in a local registry. If the app does not have an ORM entry then a StatelessApp model instance is created.
def ToInternal(self): self.validate_units() savewunits = self.waveunits angwave = self.waveunits.Convert(self._wavetable, 'angstrom') self._wavetable = angwave.copy() self.waveunits = savewunits
Convert wavelengths to the internal representation of angstroms. For internal use only.
def chain_frames(self): prev_tb = None for tb in self.frames: if prev_tb is not None: prev_tb.tb_next = tb prev_tb = tb prev_tb.tb_next = None
Chains the frames. Requires ctypes or the speedups extension.
def indent (text, indent_string=" "): lines = str(text).splitlines() return os.linesep.join("%s%s" % (indent_string, x) for x in lines)
Indent each line of text with the given indent string.
def decrypt(text): 'Decrypt a string using an encryption key based on the django SECRET_KEY' crypt = EncryptionAlgorithm.new(_get_encryption_key()) return crypt.decrypt(text).rstrip(ENCRYPT_PAD_CHARACTER)
Decrypt a string using an encryption key based on the django SECRET_KEY
def names(self, with_namespace=False): N = self.count() names = self.name.split(settings.splittingnames)[:N] n = 0 while len(names) < N: n += 1 names.append('unnamed%s' % n) if with_namespace and self.namespace: n = self.namespace s = settings.field_separator return [n + s + f for f in names] else: return names
List of names for series in dataset. It will always return a list or names with length given by :class:`~.DynData.count`.
def run(self, params): self.params = list(reversed(params)) if not self.params: self.help() return while self.params: p = self.params.pop() if p in self.command: self.command[p](self) elif os.path.exists(p): self.read_moc(p) else: raise CommandError('file or command {0} not found'.format(p))
Main run method for PyMOC tool. Takes a list of command line arguments to process. Each operation is performed on a current "running" MOC object.
def _asvector(self, arr): result = moveaxis(arr, [-2, -1], [0, 1]) return self.domain.element(result)
Convert ``arr`` to a `domain` element. This is the inverse of `_asarray`.
def make_simulated_env_kwargs(real_env, hparams, **extra_kwargs): objs_and_attrs = [ (real_env, [ "reward_range", "observation_space", "action_space", "frame_height", "frame_width" ]), (hparams, ["frame_stack_size", "intrinsic_reward_scale"]) ] kwargs = { attr: getattr(obj, attr) for (obj, attrs) in objs_and_attrs for attr in attrs } kwargs["model_name"] = hparams.generative_model kwargs["model_hparams"] = trainer_lib.create_hparams( hparams.generative_model_params ) if hparams.wm_policy_param_sharing: kwargs["model_hparams"].optimizer_zero_grads = True kwargs.update(extra_kwargs) return kwargs
Extracts simulated env kwargs from real_env and loop hparams.
def find_observatories(self, match=None): url = "%s/gwf.json" % _url_prefix response = self._requestresponse("GET", url) sitelist = sorted(set(decode(response.read()))) if match: regmatch = re.compile(match) sitelist = [site for site in sitelist if regmatch.search(site)] return sitelist
Query the LDR host for observatories. Use match to restrict returned observatories to those matching the regular expression. Example: >>> connection.find_observatories() ['AGHLT', 'G', 'GHLTV', 'GHLV', 'GHT', 'H', 'HL', 'HLT', 'L', 'T', 'V', 'Z'] >>> connection.find_observatories("H") ['H', 'HL', 'HLT'] @type match: L{str} @param match: name to match return observatories against @returns: L{list} of observatory prefixes
def waypoint_request_send(self, seq): if self.mavlink10(): self.mav.mission_request_send(self.target_system, self.target_component, seq) else: self.mav.waypoint_request_send(self.target_system, self.target_component, seq)
wrapper for waypoint_request_send
def filter_nonspellcheckable_tokens(line, block_out_regexes=None): all_block_out_regexes = [ r"[^\s]*:[^\s]*[/\\][^\s]*", r"[^\s]*[/\\][^\s]*", r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]+\b" ] + (block_out_regexes or list()) for block_regex in all_block_out_regexes: for marker in re.finditer(block_regex, line): spaces = " " * (marker.end() - marker.start()) line = line[:marker.start()] + spaces + line[marker.end():] return line
Return line with paths, urls and emails filtered out. Block out other strings of text matching :block_out_regexes: if passed in.
def qcut(expr, bins, labels=False, sort=None, ascending=True): if labels is None or labels: raise NotImplementedError('Showing bins or customizing labels not supported') return _rank_op(expr, QCut, types.int64, sort=sort, ascending=ascending, _bins=bins)
Get quantile-based bin indices of every element of a grouped and sorted expression. The indices of bins start from 0. If cuts are not of equal sizes, extra items will be appended into the first group. :param expr: expression for calculation :param bins: number of bins :param sort: name of the sort column :param ascending: whether to sort in ascending order :return: calculated column
def flip(f): ensure_callable(f) result = lambda *args, **kwargs: f(*reversed(args), **kwargs) functools.update_wrapper(result, f, ('__name__', '__module__')) return result
Flip the order of positonal arguments of given function.
def add_traits(self, **traits): super(Widget, self).add_traits(**traits) for name, trait in traits.items(): if trait.get_metadata('sync'): self.keys.append(name) self.send_state(name)
Dynamically add trait attributes to the Widget.
def show_label(self, text, size = None, color = None, font_desc = None): font_desc = pango.FontDescription(font_desc or _font_desc) if color: self.set_color(color) if size: font_desc.set_absolute_size(size * pango.SCALE) self.show_layout(text, font_desc)
display text. unless font_desc is provided, will use system's default font
def update(self, friendly_name=values.unset, max_size=values.unset): return self._proxy.update(friendly_name=friendly_name, max_size=max_size, )
Update the QueueInstance :param unicode friendly_name: A string to describe this resource :param unicode max_size: The max number of calls allowed in the queue :returns: Updated QueueInstance :rtype: twilio.rest.api.v2010.account.queue.QueueInstance
def delete_storage_account(access_token, subscription_id, rgname, account_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '?api-version=', STORAGE_API]) return do_delete(endpoint, access_token)
Delete a storage account in the specified resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new storage account. Returns: HTTP response.
def _draw_chars(self, data, to_draw): i = 0 while not self._cursor.atBlockEnd() and i < len(to_draw) and len(to_draw) > 1: self._cursor.deleteChar() i += 1 self._cursor.insertText(to_draw, data.fmt)
Draw the specified charachters using the specified format.
def migrate(vm_, target, live=1, port=0, node=-1, ssl=None, change_home_server=0): with _get_xapi_session() as xapi: vm_uuid = _get_label_uuid(xapi, 'VM', vm_) if vm_uuid is False: return False other_config = { 'port': port, 'node': node, 'ssl': ssl, 'change_home_server': change_home_server } try: xapi.VM.migrate(vm_uuid, target, bool(live), other_config) return True except Exception: return False
Migrates the virtual machine to another hypervisor CLI Example: .. code-block:: bash salt '*' virt.migrate <vm name> <target hypervisor> [live] [port] [node] [ssl] [change_home_server] Optional values: live Use live migration port Use a specified port node Use specified NUMA node on target ssl use ssl connection for migration change_home_server change home server for managed domains
def decr(self, key, delta=1): return uwsgi.cache_dec(key, delta, self.timeout, self.name)
Decrements the specified key value by the specified value. :param str|unicode key: :param int delta: :rtype: bool
def get(self, sid): return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
Constructs a SyncListContext :param sid: The sid :returns: twilio.rest.sync.v1.service.sync_list.SyncListContext :rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
def qeuler(yaw, pitch, roll): yaw = np.radians(yaw) pitch = np.radians(pitch) roll = np.radians(roll) cy = np.cos(yaw * 0.5) sy = np.sin(yaw * 0.5) cr = np.cos(roll * 0.5) sr = np.sin(roll * 0.5) cp = np.cos(pitch * 0.5) sp = np.sin(pitch * 0.5) q = np.array(( cy * cr * cp + sy * sr * sp, cy * sr * cp - sy * cr * sp, cy * cr * sp + sy * sr * cp, sy * cr * cp - cy * sr * sp )) return q
Convert Euler angle to quaternion. Parameters ---------- yaw: number pitch: number roll: number Returns ------- np.array
def extend(self, *iterables): for value in iterables: list.extend(self, value) return self
Add all values of all iterables at the end of the list Args: iterables: iterable which content to add at the end Example: >>> from ww import l >>> lst = l([]) >>> lst.extend([1, 2]) [1, 2] >>> lst [1, 2] >>> lst.extend([3, 4]).extend([5, 6]) [1, 2, 3, 4, 5, 6] >>> lst [1, 2, 3, 4, 5, 6]
def makedirs(path): path = Path(path) if not path.exists(): path.mkdir(parents=True)
Creates the directory tree if non existing.
def update_api_endpoint(): environment = subprocess.check_output(['pipenv', 'run', 'runway', 'whichenv']).decode().strip() environment_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'src', 'environments', 'environment.prod.ts' if environment == 'prod' else 'environment.ts' ) cloudformation = boto3.resource('cloudformation') stack = cloudformation.Stack(STACK_PREFIX + environment) endpoint = [i['OutputValue'] for i in stack.outputs if i['OutputKey'] == 'ServiceEndpoint'][0] with open(environment_file, 'r') as stream: content = stream.read() content = re.sub(r'api_url: \'.*\'$', "api_url: '%s/api'" % endpoint, content, flags=re.M) with open(environment_file, 'w') as stream: stream.write(content)
Update app environment file with backend endpoint.
def validate_complex(prop, value, xpath_map=None): if value is not None: validate_type(prop, value, dict) if prop in _complex_definitions: complex_keys = _complex_definitions[prop] else: complex_keys = {} if xpath_map is None else xpath_map for complex_prop, complex_val in iteritems(value): complex_key = '.'.join((prop, complex_prop)) if complex_prop not in complex_keys: _validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys)))) validate_type(complex_key, complex_val, (string_types, list))
Default validation for single complex data structure
def get_state(self): self.step_methods = set() for s in self.stochastics: self.step_methods |= set(self.step_method_dict[s]) state = Sampler.get_state(self) state['step_methods'] = {} for sm in self.step_methods: state['step_methods'][sm._id] = sm.current_state().copy() return state
Return the sampler and step methods current state in order to restart sampling at a later time.
def facet_query_matching_method(self, facet_query_matching_method): allowed_values = ["CONTAINS", "STARTSWITH", "EXACT", "TAGPATH"] if facet_query_matching_method not in allowed_values: raise ValueError( "Invalid value for `facet_query_matching_method` ({0}), must be one of {1}" .format(facet_query_matching_method, allowed_values) ) self._facet_query_matching_method = facet_query_matching_method
Sets the facet_query_matching_method of this FacetSearchRequestContainer. The matching method used to filter when 'facetQuery' is used. Defaults to CONTAINS. # noqa: E501 :param facet_query_matching_method: The facet_query_matching_method of this FacetSearchRequestContainer. # noqa: E501 :type: str
def from_mongo(cls, doc): if doc is None: return None if isinstance(doc, Document): return doc if cls.__type_store__ and cls.__type_store__ in doc: cls = load(doc[cls.__type_store__], 'marrow.mongo.document') instance = cls(_prepare_defaults=False) instance.__data__ = doc instance._prepare_defaults() return instance
Convert data coming in from the MongoDB wire driver into a Document instance.
def parse_args( self, args = None, values = None ): q = multiproc.Queue() p = multiproc.Process(target=self._parse_args, args=(q, args, values)) p.start() ret = q.get() p.join() return ret
multiprocessing wrapper around _parse_args
def validate_units(self): if (not isinstance(self.waveunits, units.WaveUnits)): raise TypeError("%s is not a valid WaveUnit" % self.waveunits) if (not isinstance(self.fluxunits, units.FluxUnits)): raise TypeError("%s is not a valid FluxUnit" % self.fluxunits)
Ensure that wavelenth and flux units belong to the correct classes. Raises ------ TypeError Wavelength unit is not `~pysynphot.units.WaveUnits` or flux unit is not `~pysynphot.units.FluxUnits`.
def validate_week(year, week): max_week = datetime.strptime("{}-{}-{}".format(12, 31, year), "%m-%d-%Y").isocalendar()[1] if max_week == 1: max_week = 53 return 1 <= week <= max_week
Validate week.
def copy_framebuffer(self, dst, src) -> None: self.mglo.copy_framebuffer(dst.mglo, src.mglo)
Copy framebuffer content. Use this method to: - blit framebuffers. - copy framebuffer content into a texture. - downsample framebuffers. (it will allow to read the framebuffer's content) - downsample a framebuffer directly to a texture. Args: dst (Framebuffer or Texture): Destination framebuffer or texture. src (Framebuffer): Source framebuffer.
def _results(self, scheduler_instance_id): with self.app.lock: res = self.app.get_results_from_passive(scheduler_instance_id) return serialize(res, True)
Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str
def get_compatible_pyplot(backend=None, debug=True): import matplotlib existing_backend = matplotlib.get_backend() if backend is not None: matplotlib.use(backend) if debug: sys.stderr.write("Currently using '%s' MPL backend, " "switching to '%s' backend%s" % (existing_backend, backend, os.linesep)) elif debug: sys.stderr.write("Using '%s' MPL backend%s" % (existing_backend, os.linesep)) from matplotlib import pyplot as plt return plt
Make the backend of MPL compatible. In Travis Mac distributions, python is not installed as a framework. This means that using the TkAgg backend is the best solution (so it doesn't try to use the mac OS backend by default). Parameters ---------- backend : str, optional (default="TkAgg") The backend to default to. debug : bool, optional (default=True) Whether to log the existing backend to stderr.
def from_barset( cls, barset, name=None, delay=None, use_wrapper=True, wrapper=None): if wrapper: data = tuple(barset.wrap_str(s, wrapper=wrapper) for s in barset) elif use_wrapper: data = tuple(barset.wrap_str(s) for s in barset) else: data = barset.data return cls( data, name=name, delay=delay )
Copy a BarSet's frames to create a new FrameSet. Arguments: barset : An existing BarSet object to copy frames from. name : A name for the new FrameSet. delay : Delay for the animation. use_wrapper : Whether to use the old barset's wrapper in the frames. wrapper : A new wrapper pair to use for each frame. This overrides the `use_wrapper` option.
def delete(self, request, **kwargs): try: customer, _created = Customer.get_or_create( subscriber=subscriber_request_callback(self.request) ) customer.subscription.cancel(at_period_end=CANCELLATION_AT_PERIOD_END) return Response(status=status.HTTP_204_NO_CONTENT) except Exception: return Response( "Something went wrong cancelling the subscription.", status=status.HTTP_400_BAD_REQUEST, )
Mark the customers current subscription as cancelled. Returns with status code 204.
def download(self, sources, output_directory, filename): valid_sources = self._filter_sources(sources) if not valid_sources: return {'error': 'no valid sources'} manager = Manager() successful_downloads = manager.list([]) def f(source): if not successful_downloads: result = self.download_from_host( source, output_directory, filename) if 'error' in result: self._host_errors[source['host_name']] += 1 else: successful_downloads.append(result) multiprocessing.dummy.Pool(len(valid_sources)).map(f, valid_sources) return successful_downloads[0] if successful_downloads else {}
Download a file from one of the provided sources The sources will be ordered by least amount of errors, so most successful hosts will be tried first. In case of failure, the next source will be attempted, until the first successful download is completed or all sources have been depleted. :param sources: A list of dicts with 'host_name' and 'url' keys. :type sources: list :param output_directory: Directory to save the downloaded file in. :type output_directory: str :param filename: Filename assigned to the downloaded file. :type filename: str :returns: A dict with 'host_name' and 'filename' keys if the download is successful, or an empty dict otherwise. :rtype: dict
def remove_peer_from_bgp_speaker(self, speaker_id, body=None): return self.put((self.bgp_speaker_path % speaker_id) + "/remove_bgp_peer", body=body)
Removes a peer from BGP speaker.
def decode(self, data, erase_pos=None, only_erasures=False): if isinstance(data, str): data = bytearray(data, "latin-1") dec = bytearray() for i in xrange(0, len(data), self.nsize): chunk = data[i:i+self.nsize] e_pos = [] if erase_pos: e_pos = [x for x in erase_pos if x <= self.nsize] erase_pos = [x - (self.nsize+1) for x in erase_pos if x > self.nsize] dec.extend(rs_correct_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator, erase_pos=e_pos, only_erasures=only_erasures)[0]) return dec
Repair a message, whatever its size is, by using chunking
def moveTab(self, fromIndex, toIndex): try: item = self.layout().itemAt(fromIndex) self.layout().insertItem(toIndex, item.widget()) except StandardError: pass
Moves the tab from the inputed index to the given index. :param fromIndex | <int> toIndex | <int>
def cmdline(argv=sys.argv[1:]): parser = ArgumentParser( description='Create and merge collections of stop words') parser.add_argument( 'language', help='The language used in the collection') parser.add_argument('sources', metavar='FILE', nargs='+', help='Source files to parse') options = parser.parse_args(argv) factory = StopWordFactory() language = options.language stop_words = factory.get_stop_words(language, fail_safe=True) for filename in options.sources: stop_words += StopWord(language, factory.read_collection(filename)) filename = factory.get_collection_filename(stop_words.language) factory.write_collection(filename, stop_words.collection)
Script for merging different collections of stop words.
def index(args): p = OptionParser(index.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) dbfile, = args check_index(dbfile)
%prog index database.fasta Wrapper for `bwa index`. Same interface.
def to_json(self): mapper_spec = self.mapper.to_json() return { "name": self.name, "mapreduce_id": self.mapreduce_id, "mapper_spec": mapper_spec, "params": self.params, "hooks_class_name": self.hooks_class_name, }
Serializes all data in this mapreduce spec into json form. Returns: data in json format.
def start(self): self.agent.submit(self._start()) self.is_running = True
starts behaviour in the event loop
def _download_and_clean_file(filename, url): temp_file, _ = urllib.request.urlretrieve(url) with tf.gfile.Open(temp_file, 'r') as temp_eval_file: with tf.gfile.Open(filename, 'w') as eval_file: for line in temp_eval_file: line = line.strip() line = line.replace(', ', ',') if not line or ',' not in line: continue if line[-1] == '.': line = line[:-1] line += '\n' eval_file.write(line) tf.gfile.Remove(temp_file)
Downloads data from url, and makes changes to match the CSV format.
def get_model(LAB_DIR): coeffs = np.load("%s/coeffs.npz" %LAB_DIR)['arr_0'] scatters = np.load("%s/scatters.npz" %LAB_DIR)['arr_0'] chisqs = np.load("%s/chisqs.npz" %LAB_DIR)['arr_0'] pivots = np.load("%s/pivots.npz" %LAB_DIR)['arr_0'] return coeffs, scatters, chisqs, pivots
Cannon model params
def _initialize(self, **resource_attributes): self._set_attributes(**resource_attributes) for attribute, attribute_type in list(self._mapper.items()): if attribute in resource_attributes and isinstance(resource_attributes[attribute], dict): setattr(self, attribute, attribute_type(**resource_attributes[attribute]))
Initialize a resource. Default behavior is just to set all the attributes. You may want to override this. :param resource_attributes: The resource attributes
def get(object_ids): if isinstance(object_ids, (tuple, np.ndarray)): return ray.get(list(object_ids)) elif isinstance(object_ids, dict): keys_to_get = [ k for k, v in object_ids.items() if isinstance(v, ray.ObjectID) ] ids_to_get = [ v for k, v in object_ids.items() if isinstance(v, ray.ObjectID) ] values = ray.get(ids_to_get) result = object_ids.copy() for key, value in zip(keys_to_get, values): result[key] = value return result else: return ray.get(object_ids)
Get a single or a collection of remote objects from the object store. This method is identical to `ray.get` except it adds support for tuples, ndarrays and dictionaries. Args: object_ids: Object ID of the object to get, a list, tuple, ndarray of object IDs to get or a dict of {key: object ID}. Returns: A Python object, a list of Python objects or a dict of {key: object}.
def add_mrp_service(self, info, address): if self.protocol and self.protocol != PROTOCOL_MRP: return name = info.properties[b'Name'].decode('utf-8') self._handle_service(address, name, conf.MrpService(info.port))
Add a new MediaRemoteProtocol device to discovered list.
def get_attribute(self, name): def get_attribute_element(): return self.element.get_attribute(name) return self.execute_and_handle_webelement_exceptions(get_attribute_element, 'get attribute "' + str(name) + '"')
Retrieves specified attribute from WebElement @type name: str @param name: Attribute to retrieve @rtype: str @return: String representation of the attribute
def fields(self): for attr, value in self._meta.fields.items(): if isinstance(value, Field): yield attr, value
Provides an iterable for all model fields.
def _save_sign(self, filepath): if self.code_array.safe_mode: msg = _("File saved but not signed because it is unapproved.") try: post_command_event(self.main_window, self.StatusBarMsg, text=msg) except TypeError: pass else: try: self.sign_file(filepath) except ValueError, err: msg = "Signing file failed. " + unicode(err) post_command_event(self.main_window, self.StatusBarMsg, text=msg)
Sign so that the new file may be retrieved without safe mode