code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def create_video_transcript(video_id, language_code, file_format, content, provider=TranscriptProviderType.CUSTOM): transcript_serializer = TranscriptSerializer( data=dict(provider=provider, language_code=language_code, file_format=file_format), context=dict(video_id=video_id), ) if transcript_serializer.is_valid(): transcript_serializer.save(content=content) return transcript_serializer.data else: raise ValCannotCreateError(transcript_serializer.errors)
Create a video transcript. Arguments: video_id(unicode): An Id identifying the Video data model object. language_code(unicode): A language code. file_format(unicode): Transcript file format. content(InMemoryUploadedFile): Transcript content. provider(unicode): Transcript provider (it will be 'custom' by default if not selected).
def submit(self): u = urlparse(self.url) if not self.action: self.action = self.url elif self.action == u.path: self.action = self.url else: if not u.netloc in self.action: path = "/".join(u.path.split("/")[1:-1]) if self.action.startswith("/"): path = path + self.action else: path = path + "/" + self.action self.action = "http://" + u.netloc + "/" + path return self.usr.getPage(self.action, self.items, {'Referer': self.url}, self.usePin)
Posts the form's data and returns the resulting Page Returns Page - The resulting page
def mdownload(args): from jcvi.apps.grid import Jobs p = OptionParser(mdownload.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) linksfile, = args links = [(x.strip(),) for x in open(linksfile)] j = Jobs(download, links) j.run()
%prog mdownload links.txt Multiple download a list of files. Use formats.html.links() to extract the links file.
def load_file(self, filename): with open(filename, 'r') as sourcefile: self.set_string(sourcefile.read())
Read in file contents and set the current string.
def delete_params_s(s, params): patt = '(?s)' + '|'.join( '(?<=\n)' + s + '\s*:.+?\n(?=\S+|$)' for s in params) return re.sub(patt, '', '\n' + s.strip() + '\n').strip()
Delete the given parameters from a string Same as :meth:`delete_params` but does not use the :attr:`params` dictionary Parameters ---------- s: str The string of the parameters section params: list of str The names of the parameters to delete Returns ------- str The modified string `s` without the descriptions of `params`
def iterkeys(self): def _iterkeys(bin): for item in bin: yield item.key for bin in self.bins: yield _iterkeys(bin)
An iterator over the keys of each bin.
def removeOntology(self, ontology): q = models.Ontology.delete().where(id == ontology.getId()) q.execute()
Removes the specified ontology term map from this repository.
def get_job_info(current_job): trials = TrialRecord.objects.filter(job_id=current_job.job_id) total_num = len(trials) running_num = sum(t.trial_status == Trial.RUNNING for t in trials) success_num = sum(t.trial_status == Trial.TERMINATED for t in trials) failed_num = sum(t.trial_status == Trial.ERROR for t in trials) if total_num == 0: progress = 0 else: progress = int(float(success_num) / total_num * 100) winner = get_winner(trials) job_info = { "job_id": current_job.job_id, "job_name": current_job.name, "user": current_job.user, "type": current_job.type, "start_time": current_job.start_time, "end_time": current_job.end_time, "total_num": total_num, "running_num": running_num, "success_num": success_num, "failed_num": failed_num, "best_trial_id": current_job.best_trial_id, "progress": progress, "winner": winner } return job_info
Get job information for current job.
def get_decomp_and_e_above_hull(self, entry, allow_negative=False): if entry in self.stable_entries: return {entry: 1}, 0 comp = entry.composition facet, simplex = self._get_facet_and_simplex(comp) decomp_amts = simplex.bary_coords(self.pd_coords(comp)) decomp = {self.qhull_entries[f]: amt for f, amt in zip(facet, decomp_amts) if abs(amt) > PhaseDiagram.numerical_tol} energies = [self.qhull_entries[i].energy_per_atom for i in facet] ehull = entry.energy_per_atom - np.dot(decomp_amts, energies) if allow_negative or ehull >= -PhaseDiagram.numerical_tol: return decomp, ehull raise ValueError("No valid decomp found!")
Provides the decomposition and energy above convex hull for an entry. Due to caching, can be much faster if entries with the same composition are processed together. Args: entry: A PDEntry like object allow_negative: Whether to allow negative e_above_hulls. Used to calculate equilibrium reaction energies. Defaults to False. Returns: (decomp, energy above convex hull) Stable entries should have energy above hull of 0. The decomposition is provided as a dict of {Entry: amount}.
def upload_object(self, instance, bucket_name, object_name, file_obj, content_type=None): url = '/buckets/{}/{}/{}'.format(instance, bucket_name, object_name) with open(file_obj, 'rb') as f: if content_type: files = {object_name: (object_name, f, content_type)} else: files = {object_name: (object_name, f)} self._client.request(path=url, method='post', files=files)
Upload an object to a bucket. :param str instance: A Yamcs instance name. :param str bucket_name: The name of the bucket. :param str object_name: The target name of the object. :param file file_obj: The file (or file-like object) to upload. :param str content_type: The content type associated to this object. This is mainly useful when accessing an object directly via a web browser. If unspecified, a content type *may* be automatically derived from the specified ``file_obj``.
def odometry2Pose3D(odom): pose = Pose3d() ori = odom.pose.pose.orientation pose.x = odom.pose.pose.position.x pose.y = odom.pose.pose.position.y pose.z = odom.pose.pose.position.z pose.yaw = quat2Yaw(ori.w, ori.x, ori.y, ori.z) pose.pitch = quat2Pitch(ori.w, ori.x, ori.y, ori.z) pose.roll = quat2Roll(ori.w, ori.x, ori.y, ori.z) pose.q = [ori.w, ori.x, ori.y, ori.z] pose.timeStamp = odom.header.stamp.secs + (odom.header.stamp.nsecs *1e-9) return pose
Translates from ROS Odometry to JderobotTypes Pose3d. @param odom: ROS Odometry to translate @type odom: Odometry @return a Pose3d translated from odom
def _parse_nodes_section(f, current_section, nodes): section = {} dimensions = None if current_section == 'NODE_COORD_SECTION': dimensions = 3 elif current_section == 'DEMAND_SECTION': dimensions = 2 else: raise ParseException('Invalid section {}'.format(current_section)) n = 0 for line in f: line = strip(line) definitions = re.split(r'\s*', line) if len(definitions) != dimensions: raise ParseException('Invalid dimensions from section {}. Expected: {}'.format(current_section, dimensions)) node = int(definitions[0]) values = [int(v) for v in definitions[1:]] if len(values) == 1: values = values[0] section[node] = values n = n + 1 if n == nodes: break if n != nodes: raise ParseException('Missing {} nodes definition from section {}'.format(nodes - n, current_section)) return section
Parse TSPLIB NODE_COORD_SECTION or DEMAND_SECTION from file descript f Returns a dict containing the node as key
def raster_to_projection_coords(self, pixel_x, pixel_y): h_px_py = np.array([1, pixel_x, pixel_y]) gt = np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]]) arr = np.inner(gt, h_px_py) return arr[2], arr[1]
Use pixel centers when appropriate. See documentation for the GDAL function GetGeoTransform for details.
def _j9SaveCurrent(sDir = '.'): dname = os.path.normpath(sDir + '/' + datetime.datetime.now().strftime("%Y-%m-%d_J9_AbbreviationDocs")) if not os.path.isdir(dname): os.mkdir(dname) os.chdir(dname) else: os.chdir(dname) for urlID, urlString in j9urlGenerator(nameDict = True).items(): fname = "{}_abrvjt.html".format(urlID) f = open(fname, 'wb') f.write(urllib.request.urlopen(urlString).read())
Downloads and saves all the webpages For Backend
def get_series_episodes(self, id, page=1): params = {'page': page} r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params) if r.status_code == 404: return None r.raise_for_status() return r.json()
Get series episodes
def info(self, abspath=True): logger.debug(str('')) return self._call_and_parse(['info', '--json'], abspath=abspath)
Return a dictionary with configuration information. No guarantee is made about which keys exist. Therefore this function should only be used for testing and debugging.
def usearch_cluster_error_correction( fasta_filepath, output_filepath=None, output_uc_filepath=None, percent_id_err=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_err_corrected.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_err_corrected', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id_err, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--cluster': fasta_filepath, '--consout': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath if output_uc_filepath: data['--uc'] = output_uc_filepath app_result = app(data) return app_result, output_filepath
Cluster for err. correction at percent_id_err, output consensus fasta fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output error corrected fasta filepath percent_id_err = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.
def render_to_response(self, template_name, __data, content_type="text/html"): resp = self.render(template_name, __data) return Response(resp, content_type=content_type)
Given a template name and template data. Renders a template and returns `webob.Response` object
def get_comments_of_confirmation_per_page(self, confirmation_id, per_page=1000, page=1): return self._get_resource_per_page( resource=CONFIRMATION_COMMENTS, per_page=per_page, page=page, params={'confirmation_id': confirmation_id}, )
Get comments of confirmation per page :param confirmation_id: the confirmation id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
def on_new_line(self): self.set_cursor_position('eof') self.current_prompt_pos = self.get_position('cursor') self.new_input_line = False
On new input line
def compute_ecc_hash(ecc_manager, hasher, buf, max_block_size, rate, message_size=None, as_string=False): result = [] if not message_size: ecc_params = compute_ecc_params(max_block_size, rate, hasher) message_size = ecc_params["message_size"] for i in xrange(0, len(buf), message_size): mes = buf[i:i+message_size] ecc = ecc_manager.encode(mes) hash = hasher.hash(mes) if as_string: result.append("%s%s" % (str(hash),str(ecc))) else: result.append([hash, ecc]) return result
Split a string in blocks given max_block_size and compute the hash and ecc for each block, and then return a nice list with both for easy processing.
def context_list_users(zap_helper, context_name): with zap_error_handler(): info = zap_helper.get_context_info(context_name) users = zap_helper.zap.users.users_list(info['id']) if len(users): user_list = ', '.join([user['name'] for user in users]) console.info('Available users for the context {0}: {1}'.format(context_name, user_list)) else: console.info('No users configured for the context {}'.format(context_name))
List the users available for a given context.
def add_job_from_json(self, job_json, destructive=False): logger.debug('Importing job from JSON document: {0}'.format(job_json)) rec = self.backend.decode_import_json(job_json) if destructive: try: self.delete_job(rec['name']) except DagobahError: pass self._add_job_from_spec(rec, use_job_id=False) self.commit(cascade=True)
Construct a new Job from an imported JSON spec.
def get_last_lineno(node): max_lineno = 0 if hasattr(node, "lineno"): max_lineno = node.lineno for _, field in ast.iter_fields(node): if isinstance(field, list): for value in field: if isinstance(value, ast.AST): max_lineno = max(max_lineno, get_last_lineno(value)) elif isinstance(field, ast.AST): max_lineno = max(max_lineno, get_last_lineno(field)) return max_lineno
Recursively find the last line number of the ast node.
def drain(iterable): if getattr(iterable, "popleft", False): def next_item(coll): return coll.popleft() elif getattr(iterable, "popitem", False): def next_item(coll): return coll.popitem() else: def next_item(coll): return coll.pop() while True: try: yield next_item(iterable) except (IndexError, KeyError): raise StopIteration
Helper method that empties an iterable as it is iterated over. Works for: * ``dict`` * ``collections.deque`` * ``list`` * ``set``
def has_changed (filename): key = os.path.abspath(filename) mtime = get_mtime(key) if key not in _mtime_cache: _mtime_cache[key] = mtime return True return mtime > _mtime_cache[key]
Check if filename has changed since the last check. If this is the first check, assume the file is changed.
def load_entry_points(self): if self.entry_point_group: task_packages = {} for item in pkg_resources.iter_entry_points( group=self.entry_point_group): try: pkg, related_name = item.module_name.rsplit('.', 1) except ValueError: warnings.warn( 'The celery task module "{}" was not loaded. ' 'Defining modules in bare Python modules is no longer ' 'supported due to Celery v4.2 constraints. Please ' 'move the module into a Python package.'.format( item.module_name ), RuntimeWarning ) continue if related_name not in task_packages: task_packages[related_name] = [] task_packages[related_name].append(pkg) if task_packages: for related_name, packages in task_packages.items(): self.celery.autodiscover_tasks( packages, related_name=related_name, force=True )
Load tasks from entry points.
def _get_jvm_opts(out_file, data): resources = config_utils.get_resources("purple", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3500m"]) jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": dd.get_cores(data)}}}) jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file)) return jvm_opts
Retrieve Java options, adjusting memory for available cores.
def __json(self): if self.exclude_list is None: self.exclude_list = [] fields = {} for key, item in vars(self).items(): if hasattr(self, '_sa_instance_state'): if len(orm.attributes.instance_state(self).unloaded) > 0: mapper = inspect(self) for column in mapper.attrs: column.key column.value if str(key).startswith('_') or key in self.exclude_list: continue fields[key] = item obj = Json.safe_object(fields) return str(obj)
Using the exclude lists, convert fields to a string.
def _TemplateExists(unused_value, context, args): try: name = args[0] except IndexError: raise EvaluationError('The "template" predicate requires an argument.') return context.HasTemplate(name)
Returns whether the given name is in the current Template's template group.
def read_comment(self, start: int, line: int, col: int, prev: Token) -> Token: body = self.source.body body_length = len(body) position = start while True: position += 1 if position > body_length: break char = body[position] if char < " " and char != "\t": break return Token( TokenKind.COMMENT, start, position, line, col, prev, body[start + 1 : position], )
Read a comment token from the source file.
def create(**data): http_client = HttpClient() response, _ = http_client.post(routes.url(routes.PAYMENT_RESOURCE), data) return resources.Payment(**response)
Create a Payment request. :param data: data required to create the payment :return: The payment resource :rtype resources.Payment
def related_obj_to_dict(obj, **kwargs): kwargs.pop('formatter', None) suppress_private_attr = kwargs.get("suppress_private_attr", False) suppress_empty_values = kwargs.get("suppress_empty_values", False) attrs = fields(obj.__class__) return_dict = kwargs.get("dict_factory", OrderedDict)() for a in attrs: if suppress_private_attr and a.name.startswith("_"): continue metadata = a.metadata or {} formatter = metadata.get('formatter') value = getattr(obj, a.name) value = to_dict(value, formatter=formatter, **kwargs) if suppress_empty_values and value is None: continue key_name = a.metadata.get('key') or a.name return_dict[key_name] = value return return_dict
Covert a known related object to a dictionary.
def protocol_str(protocol): if protocol == const.PROTOCOL_MRP: return 'MRP' if protocol == const.PROTOCOL_DMAP: return 'DMAP' if protocol == const.PROTOCOL_AIRPLAY: return 'AirPlay' return 'Unknown'
Convert internal API protocol to string.
def _cellrepr(value, allow_formulas): if pd.isnull(value) is True: return "" if isinstance(value, float): value = repr(value) else: value = str(value) if (not allow_formulas) and value.startswith('='): value = "'%s" % value return value
Get a string representation of dataframe value. :param :value: the value to represent :param :allow_formulas: if True, allow values starting with '=' to be interpreted as formulas; otherwise, escape them with an apostrophe to avoid formula interpretation.
def clean_title(title): date_pattern = re.compile(r'\W*' r'\d{1,2}' r'[/\-.]' r'\d{1,2}' r'[/\-.]' r'(?=\d*)(?:.{4}|.{2})' r'\W*') title = date_pattern.sub(' ', title) title = re.sub(r'\s{2,}', ' ', title) title = title.strip() return title
Clean title -> remove dates, remove duplicated spaces and strip title. Args: title (str): Title. Returns: str: Clean title without dates, duplicated, trailing and leading spaces.
def _secret_yaml(loader, node): fname = os.path.join(os.path.dirname(loader.name), "secrets.yaml") try: with open(fname, encoding="utf-8") as secret_file: secrets = YAML(typ="safe").load(secret_file) except FileNotFoundError: raise ValueError("Secrets file {} not found".format(fname)) from None try: return secrets[node.value] except KeyError: raise ValueError("Secret {} not found".format(node.value)) from None
Load secrets and embed it into the configuration YAML.
def decimal(self, var, default=NOTSET, force=True): return self._get(var, default=default, cast=Decimal, force=force)
Convenience method for casting to a decimal.Decimal Note: Casting
def error(self, reason=None): self.set_status(Report.ERROR) if reason: self.add('reason', reason)
Set the test status to Report.ERROR, and set the error reason :param reason: error reason (default: None)
def authenticate(self, username=None, password=None): u = self.username p = self.password if username and password: u = username p = password client = self.client() query = client.authenticated_query(username=u, password=p) res = client.post(query) ofx = BeautifulSoup(res, 'lxml') sonrs = ofx.find('sonrs') code = int(sonrs.find('code').contents[0].strip()) try: status = sonrs.find('message').contents[0].strip() except Exception: status = '' if code == 0: return 1 raise ValueError(status)
Test the authentication credentials Raises a ``ValueError`` if there is a problem authenticating with the human readable reason given by the institution. :param username: optional username (use self.username by default) :type username: string or None :param password: optional password (use self.password by default) :type password: string or None
def slices(src_path): pages = list_slices(src_path) slices = [] for page in pages: slices.extend(page.slices) return slices
Return slices as a flat list
def _get_all_policy_ids(zap_helper): policies = zap_helper.zap.ascan.policies() return [p['id'] for p in policies]
Get all policy IDs.
def set_defaults(self): for key, val in self.c_default.items(): self._dict[key] = val
Based on specification set a parameters value to the default value.
def _format_method_nodes(self, task_method, modulename, classname): methodname = task_method.__name__ fullname = '.'.join((modulename, classname, methodname)) signature = Signature(task_method, bound_method=True) desc_sig_node = self._format_signature( signature, modulename, classname, fullname, 'py:meth') content_node = desc_content() content_node += self._create_doc_summary(task_method, fullname, 'py:meth') desc_node = desc() desc_node['noindex'] = True desc_node['domain'] = 'py' desc_node['objtype'] = 'method' desc_node += desc_sig_node desc_node += content_node return desc_node
Create a ``desc`` node summarizing a method docstring.
def sortByColumn(self, index): if self.sort_old == [None]: self.header_class.setSortIndicatorShown(True) sort_order = self.header_class.sortIndicatorOrder() self.sig_sort_by_column.emit() if not self.model().sort(index, sort_order): if len(self.sort_old) != 2: self.header_class.setSortIndicatorShown(False) else: self.header_class.setSortIndicator(self.sort_old[0], self.sort_old[1]) return self.sort_old = [index, self.header_class.sortIndicatorOrder()]
Implement a column sort.
def buildASNList(rootnames, asnname, check_for_duplicates=True): filelist, duplicates = checkForDuplicateInputs(rootnames) if check_for_duplicates and duplicates: origasn = changeSuffixinASN(asnname, 'flt') dupasn = changeSuffixinASN(asnname, 'flc') errstr = 'ERROR:\nMultiple valid input files found:\n' for fname, dname in zip(filelist, duplicates): errstr += ' %s %s\n' % (fname, dname) errstr += ('\nNew association files have been generated for each ' 'version of these files.\n %s\n %s\n\nPlease ' 're-start astrodrizzle using of these new ASN files or ' 'use widlcards for the input to only select one type of ' 'input file.' % (dupasn, origasn)) print(textutil.textbox(errstr), file=sys.stderr) raise ValueError return filelist
Return the list of filenames for a given set of rootnames
def spearmanr(self, target, correlation_length, mask=NotSpecified): from .statistical import RollingSpearman return RollingSpearman( base_factor=self, target=target, correlation_length=correlation_length, mask=mask, )
Construct a new Factor that computes rolling spearman rank correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingSpearman A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.spearmanr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingSpearmanOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.spearmanr` :class:`zipline.pipeline.factors.RollingSpearmanOfReturns` :meth:`Factor.pearsonr`
def chain(*steps): if not steps: return def on_done(sprite=None): chain(*steps[2:]) obj, params = steps[:2] if len(steps) > 2: params['on_complete'] = on_done if callable(obj): obj(**params) else: obj.animate(**params)
chains the given list of functions and object animations into a callback string. Expects an interlaced list of object and params, something like: object, {params}, callable, {params}, object, {}, object, {params} Assumes that all callees accept on_complete named param. The last item in the list can omit that. XXX - figure out where to place these guys as they are quite useful
def find(self, name, current_location): assert isinstance(name, basestring) assert isinstance(current_location, basestring) project_module = None if name[0] == '/': project_module = self.id2module.get(name) if not project_module: location = os.path.join(current_location, name) project_module = self.module_name(location) if not project_module in self.jamfile_modules: if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE): project_module = self.load(location) else: project_module = None return project_module
Given 'name' which can be project-id or plain directory name, return project module corresponding to that id or directory. Returns nothing of project is not found.
def vn(x): if x == []: return None if isinstance(x, list): return '|'.join(x) if isinstance(x, datetime): return x.isoformat() return x
value or none, returns none if x is an empty list
async def popen_uci(command: Union[str, List[str]], *, setpgrp: bool = False, loop=None, **popen_args: Any) -> Tuple[asyncio.SubprocessTransport, UciProtocol]: transport, protocol = await UciProtocol.popen(command, setpgrp=setpgrp, loop=loop, **popen_args) try: await protocol.initialize() except: transport.close() raise return transport, protocol
Spawns and initializes an UCI engine. :param command: Path of the engine executable, or a list including the path and arguments. :param setpgrp: Open the engine process in a new process group. This will stop signals (such as keyboard interrupts) from propagating from the parent process. Defaults to ``False``. :param popen_args: Additional arguments for `popen <https://docs.python.org/3/library/subprocess.html#popen-constructor>`_. Do not set ``stdin``, ``stdout``, ``bufsize`` or ``universal_newlines``. Returns a subprocess transport and engine protocol pair.
def insert_row(self, index, row): row = self._validate_row(row) row_obj = RowData(self, row) self._table.insert(index, row_obj)
Insert a row before index in the table. Parameters ---------- index : int List index rules apply row : iterable Any iterable of appropriate length. Raises ------ TypeError: If `row` is not an iterable. ValueError: If size of `row` is inconsistent with the current number of columns.
def no_auth(self): old_basic_auth, self.auth = self.auth, None old_token_auth = self.headers.pop('Authorization', None) yield self.auth = old_basic_auth if old_token_auth: self.headers['Authorization'] = old_token_auth
Unset authentication temporarily as a context manager.
def evolve(self, years): world_file = fldr + os.sep + self.name + '.txt' self.build_base() self.world.add_mountains() self.add_life() self.world.grd.save(world_file) print('TODO - run ' + str(years) + ' years')
run the evolution of the planet to see how it looks after 'years'
def _dump_multipoint(obj, big_endian, meta): coords = obj['coordinates'] vertex = coords[0] num_dims = len(vertex) wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'MultiPoint', num_dims, big_endian, meta ) point_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Point'] if big_endian: point_type = BIG_ENDIAN + point_type else: point_type = LITTLE_ENDIAN + point_type[::-1] wkb_string += struct.pack('%sl' % byte_order, len(coords)) for vertex in coords: wkb_string += point_type wkb_string += struct.pack(byte_fmt, *vertex) return wkb_string
Dump a GeoJSON-like `dict` to a multipoint WKB string. Input parameters and output are similar to :funct:`_dump_point`.
def find_paths_breadth_first(from_target, to_target, log): log.debug('Looking for all paths from {} to {}'.format(from_target.address.reference(), to_target.address.reference())) if from_target == to_target: yield [from_target] return visited_edges = set() to_walk_paths = deque([[from_target]]) while len(to_walk_paths) > 0: cur_path = to_walk_paths.popleft() target = cur_path[-1] if len(cur_path) > 1: prev_target = cur_path[-2] else: prev_target = None current_edge = (prev_target, target) if current_edge not in visited_edges: for dep in target.dependencies: dep_path = cur_path + [dep] if dep == to_target: yield dep_path else: to_walk_paths.append(dep_path) visited_edges.add(current_edge)
Yields the paths between from_target to to_target if they exist. The paths are returned ordered by length, shortest first. If there are cycles, it checks visited edges to prevent recrossing them.
def list_entitlements(owner, repo, page, page_size, show_tokens): client = get_entitlements_api() with catch_raise_api_exception(): data, _, headers = client.entitlements_list_with_http_info( owner=owner, repo=repo, page=page, page_size=page_size, show_tokens=show_tokens, ) ratelimits.maybe_rate_limit(client, headers) page_info = PageInfo.from_headers(headers) entitlements = [ent.to_dict() for ent in data] return entitlements, page_info
Get a list of entitlements on a repository.
def basename(path: Optional[str]) -> Optional[str]: if path is not None: return os.path.basename(path)
Returns the final component of a pathname and None if the argument is None
def get_new_session(self): session = Session() session.headers = self.headers session.proxies = self._get_request_proxies() return session
Returns a new session using the object's proxies and headers
def get_registration_fields(xmlstream, timeout=60, ): iq = aioxmpp.IQ( to=aioxmpp.JID.fromstr(xmlstream._to), type_=aioxmpp.IQType.GET, payload=xso.Query() ) iq.autoset_id() reply = yield from aioxmpp.protocol.send_and_wait_for(xmlstream, [iq], [aioxmpp.IQ], timeout=timeout) return reply.payload
A query is sent to the server to obtain the fields that need to be filled to register with the server. :param xmlstream: Specifies the stream connected to the server where the account will be created. :type xmlstream: :class:`aioxmpp.protocol.XMLStream` :param timeout: Maximum time in seconds to wait for an IQ response, or :data:`None` to disable the timeout. :type timeout: :class:`~numbers.Real` or :data:`None` :return: :attr:`list`
def _update_frozencell(self, frozen): toggle_state = frozen is not False self.ToggleTool(wx.FONTFLAG_MASK, toggle_state)
Updates frozen cell widget Parameters ---------- frozen: Bool or string \tUntoggled iif False
def read_magic_file(self, path, sort_by_this_name): DATA = {} try: with open(path, 'r') as finput: lines = list(finput.readlines()[1:]) except FileNotFoundError: return [] line = lines[0] header = line.strip('\n').split('\t') error_strings = [] for line in lines[1:]: tmp_data = {} tmp_line = line.strip('\n').split('\t') for i in range(len(tmp_line)): tmp_data[header[i]] = tmp_line[i] if tmp_data[sort_by_this_name] in list(DATA.keys()): error_string = "-E- ERROR: magic file %s has more than one line for %s %s" % ( path, sort_by_this_name, tmp_data[sort_by_this_name]) if error_string not in error_strings: print(error_string) error_strings.append(error_string) DATA[tmp_data[sort_by_this_name]] = tmp_data finput.close() return(DATA)
reads a magic formated data file from path and sorts the keys according to sort_by_this_name Parameters ---------- path : path to file to read sort_by_this_name : variable to sort data by
def remove_template(self, tpl): try: del self.templates[tpl.uuid] except KeyError: pass self.unindex_template(tpl)
Removes and un-index a template from the `templates` container. :param tpl: The template to remove :type tpl: alignak.objects.item.Item :return: None
def open_netcdf_reader(self, flatten=False, isolate=False, timeaxis=1): self._netcdf_reader = netcdftools.NetCDFInterface( flatten=bool(flatten), isolate=bool(isolate), timeaxis=int(timeaxis))
Prepare a new |NetCDFInterface| object for reading data.
def wirevector_subset(self, cls=None, exclude=tuple()): if cls is None: initial_set = self.wirevector_set else: initial_set = (x for x in self.wirevector_set if isinstance(x, cls)) if exclude == tuple(): return set(initial_set) else: return set(x for x in initial_set if not isinstance(x, exclude))
Return set of wirevectors, filtered by the type or tuple of types provided as cls. If no cls is specified, the full set of wirevectors associated with the Block are returned. If cls is a single type, or a tuple of types, only those wirevectors of the matching types will be returned. This is helpful for getting all inputs, outputs, or registers of a block for example.
def process(self, checksum, revision=None): revert = None if checksum in self: reverteds = list(self.up_to(checksum)) if len(reverteds) > 0: revert = Revert(revision, reverteds, self[checksum]) self.insert(checksum, revision) return revert
Process a new revision and detect a revert if it occurred. Note that you can pass whatever you like as `revision` and it will be returned in the case that a revert occurs. :Parameters: checksum : str Any identity-machable string-based hash of revision content revision : `mixed` Revision metadata. Note that any data will just be returned in the case of a revert. :Returns: a :class:`~mwreverts.Revert` if one occured or `None`
def get_monkeypatched_pathset(self): from pip_shims.shims import InstallRequirement uninstall_path = InstallRequirement.__module__.replace( "req_install", "req_uninstall" ) req_uninstall = self.safe_import(uninstall_path) self.recursive_monkey_patch.monkey_patch( PatchedUninstaller, req_uninstall.UninstallPathSet ) return req_uninstall.UninstallPathSet
Returns a monkeypatched `UninstallPathset` for using to uninstall packages from the virtualenv :return: A patched `UninstallPathset` which enables uninstallation of venv packages :rtype: :class:`pip._internal.req.req_uninstall.UninstallPathset`
def add_current_user_is_applied_representation(func): @wraps(func) def _impl(self, instance): ret = func(self, instance) user = self.context["request"].user applied = False if not user.is_anonymous(): try: applied = models.Apply.objects.filter(user=user, project=instance).count() > 0 except: pass ret["current_user_is_applied"] = applied return ret return _impl
Used to decorate Serializer.to_representation method. It sets the field "current_user_is_applied" if the user is applied to the project
def __set_log_file_name(self): dir, _ = os.path.split(self.__logFileBasename) if len(dir) and not os.path.exists(dir): os.makedirs(dir) self.__logFileName = self.__logFileBasename+"."+self.__logFileExtension number = 0 while os.path.isfile(self.__logFileName): if os.stat(self.__logFileName).st_size/1e6 < self.__maxlogFileSize: break number += 1 self.__logFileName = self.__logFileBasename+"_"+str(number)+"."+self.__logFileExtension self.__logFileStream = None
Automatically set logFileName attribute
def _seconds_to_days(cls, val, **kwargs): zero_value = kwargs.get('zero_value', 0) if val is not None: if val == zero_value: return 0 return val / 86400 else: return 'Not Defined'
converts a number of seconds to days
def bind(self, name, filterset): if self.name is not None: name = self.name self.field.bind(name, self)
attach filter to filterset gives a name to use to extract arguments from querydict
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return ExportInstance(self._version, payload, resource_type=self._solution['resource_type'], )
Fetch a ExportInstance :returns: Fetched ExportInstance :rtype: twilio.rest.preview.bulk_exports.export.ExportInstance
def hide(self, _unhide=False): return self.reddit_session.hide(self.fullname, _unhide=_unhide)
Hide object in the context of the logged in user. :param _unhide: If True, unhide the item instead. Use :meth:`~praw.objects.Hideable.unhide` instead of setting this manually. :returns: The json response from the server.
def merge(self, paths): topojson_binary = 'node_modules/bin/topojson' if not os.path.exists(topojson_binary): topojson_binary = 'topojson' merge_cmd = '%(binary)s -o %(output_path)s --bbox -p -- %(paths)s' % { 'output_path': self.args.output_path, 'paths': ' '.join(paths), 'binary': topojson_binary } sys.stdout.write('Merging layers\n') if self.args.verbose: sys.stdout.write(' %s\n' % merge_cmd) r = envoy.run(merge_cmd) if r.status_code != 0: sys.stderr.write(r.std_err)
Merge data layers into a single topojson file.
def load_projects(self): server_config = Config.instance().get_section_config("Server") projects_path = os.path.expanduser(server_config.get("projects_path", "~/GNS3/projects")) os.makedirs(projects_path, exist_ok=True) try: for project_path in os.listdir(projects_path): project_dir = os.path.join(projects_path, project_path) if os.path.isdir(project_dir): for file in os.listdir(project_dir): if file.endswith(".gns3"): try: yield from self.load_project(os.path.join(project_dir, file), load=False) except (aiohttp.web_exceptions.HTTPConflict, NotImplementedError): pass except OSError as e: log.error(str(e))
Preload the list of projects from disk
def name(self) -> Optional[str]: _, params = parse_content_disposition( self.headers.get(CONTENT_DISPOSITION)) return content_disposition_filename(params, 'name')
Returns name specified in Content-Disposition header or None if missed or header is malformed.
def router_id(self, **kwargs): router_id = kwargs.pop('router_id') rbridge_id = kwargs.pop('rbridge_id', '1') callback = kwargs.pop('callback', self._callback) rid_args = dict(rbridge_id=rbridge_id, router_id=router_id) config = self._rbridge.rbridge_id_ip_rtm_config_router_id(**rid_args) return callback(config)
Configures device's Router ID. Args: router_id (str): Router ID for the device. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `router_id` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.system.router_id(router_id='10.24.39.211', ... rbridge_id='225') ... dev.system.router_id() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
def title_line(text): columns = shutil.get_terminal_size()[0] start = columns // 2 - len(text) // 2 output = '='*columns + '\n\n' + \ ' ' * start + str(text) + "\n\n" + \ '='*columns + '\n' return output
Returns a string that represents the text as a title blurb
def compress(samples, run_parallel): to_cram = [] finished = [] for data in [x[0] for x in samples]: if "cram" in dd.get_archive(data) or "cram-lossless" in dd.get_archive(data): to_cram.append([data]) else: finished.append([data]) crammed = run_parallel("archive_to_cram", to_cram) return finished + crammed
Perform compression of output files for long term storage.
def groups_setPurpose(self, *, channel: str, purpose: str, **kwargs) -> SlackResponse: kwargs.update({"channel": channel, "purpose": purpose}) return self.api_call("groups.setPurpose", json=kwargs)
Sets the purpose for a private channel. Args: channel (str): The channel id. e.g. 'G1234567890' purpose (str): The new purpose for the channel. e.g. 'My Purpose'
def calc_login_v1(self): der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for idx in range(der.nmb): for jdx in range(der.ma_order[idx]-2, -1, -1): log.login[idx, jdx+1] = log.login[idx, jdx] for idx in range(der.nmb): log.login[idx, 0] = flu.qpin[idx]
Refresh the input log sequence for the different MA processes. Required derived parameters: |Nmb| |MA_Order| Required flux sequence: |QPIn| Updated log sequence: |LogIn| Example: Assume there are three response functions, involving one, two and three MA coefficients respectively: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> derived.ma_order.shape = 3 >>> derived.ma_order = 1, 2, 3 >>> fluxes.qpin.shape = 3 >>> logs.login.shape = (3, 3) The "memory values" of the different MA processes are defined as follows (one row for each process): >>> logs.login = ((1.0, nan, nan), ... (2.0, 3.0, nan), ... (4.0, 5.0, 6.0)) These are the new inflow discharge portions to be included into the memories of the different processes: >>> fluxes.qpin = 7.0, 8.0, 9.0 Through applying method |calc_login_v1| all values already existing are shifted to the right ("into the past"). Values, which are no longer required due to the limited order or the different MA processes, are discarded. The new values are inserted in the first column: >>> model.calc_login_v1() >>> logs.login login([[7.0, nan, nan], [8.0, 2.0, nan], [9.0, 4.0, 5.0]])
def aroon_down(data, period): catch_errors.check_for_period_error(data, period) period = int(period) a_down = [((period - list(reversed(data[idx+1-period:idx+1])).index(np.min(data[idx+1-period:idx+1]))) / float(period)) * 100 for idx in range(period-1, len(data))] a_down = fill_for_noncomputable_vals(data, a_down) return a_down
Aroon Down. Formula: AROONDWN = (((PERIOD) - (PERIODS SINCE PERIOD LOW)) / (PERIOD)) * 100
def update(self): if not os.path.isdir(os.path.join(self.path)): os.makedirs(self.path) if not os.path.isdir(os.path.join(self.path, 'refs')): subprocess.check_output([ 'git', 'clone', '--bare', self.repo_git, self.path ]) self.run(['gc', '--auto', '--prune=all']) self.run(['fetch', '-p', 'origin', '+refs/heads/*:refs/heads/*']) self.run(['fetch', 'origin', '+refs/pull/*/head:refs/pull/*']) self.run([ 'fetch', 'origin', '+refs/merge-requests/*/head:refs/pull/*'])
Get a repository git or update it
def _format_pair_with_equals(explode, separator, escape, key, value): if not value: return key + '=' return _format_pair(explode, separator, escape, key, value)
Format a key, value pair including the equals sign when there is no value
def rastrigin(self, x): if not isscalar(x[0]): N = len(x[0]) return [10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x] N = len(x) return 10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x))
Rastrigin test objective function
def fix_attr_encoding(ds): def _maybe_del_attr(da, attr): if attr in da.attrs: del da.attrs[attr] return da def _maybe_decode_attr(da, attr): if (attr in da.attrs) and (type(da.attrs[attr] == bool)): da.attrs[attr] = int(da.attrs[attr]) return da for v in ds.data_vars: da = ds[v] da = _maybe_del_attr(da, 'scale_factor') da = _maybe_del_attr(da, 'units') da = _maybe_decode_attr(da, 'hydrocarbon') da = _maybe_decode_attr(da, 'chemical') if hasattr(ds, 'time'): times = ds.time times = _maybe_del_attr(times, 'units') return ds
This is a temporary hot-fix to handle the way metadata is encoded when we read data directly from bpch files. It removes the 'scale_factor' and 'units' attributes we encode with the data we ingest, converts the 'hydrocarbon' and 'chemical' attribute to a binary integer instead of a boolean, and removes the 'units' attribute from the "time" dimension since that too is implicitly encoded. In future versions of this library, when upstream issues in decoding data wrapped in dask arrays is fixed, this won't be necessary and will be removed.
def prune(self): target_user_ids = self.get_queryset().values_list('id', flat=True) exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(), drip=self.drip_model, user__id__in=target_user_ids)\ .values_list('user_id', flat=True) self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)
Do an exclude for all Users who have a SentDrip already.
def get_data_by_hex_uuid_or_404(model, hex_uuid, kind=''): uuid = UUID(hex_uuid) bin_uuid = uuid.get_bytes() instance = get_instance_by_bin_uuid(model, bin_uuid) if not instance: return abort(404) return ins2dict(instance, kind)
Get instance data by uuid and kind. Raise 404 Not Found if there is no data. This requires model has a `bin_uuid` column. :param model: a string, model name in rio.models :param hex_uuid: a hex uuid string in 24-bytes human-readable representation. :return: a dict.
def _to_numpy(nd4j_array): buff = nd4j_array.data() address = buff.pointer().address() dtype = get_context_dtype() mapping = { 'double': ctypes.c_double, 'float': ctypes.c_float } Pointer = ctypes.POINTER(mapping[dtype]) pointer = ctypes.cast(address, Pointer) np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape())) return np_array
Convert nd4j array to numpy array
def delete(self): params = {"email": self.email_address} response = self._delete("/admins.json", params=params)
Deletes the administrator from the account.
def exit_frames(self): if self._exit_frames is None: exit_frames = [] for frame in self.frames: if any(c.group != self for c in frame.children): exit_frames.append(frame) self._exit_frames = exit_frames return self._exit_frames
Returns a list of frames whose children include a frame outside of the group
def get_new_locations(self, urls): seen = set(urls) for i in urls: for k in self.get_locations(i): if k not in seen: seen.add(k) yield k
Get valid location header values for all given URLs. The returned values are new, that is: they do not repeat any value contained in the original input. Only unique values are yielded. :param urls: a list of URL addresses :returns: valid location header values from responses to the URLs
def service(self): if not self._service: self._service = self._client.service(id=self.service_id) return self._service
Retrieve the `Service` object to which this execution is associated.
def dataset_format_to_extension(ds_format): try: return DATASET_FORMATS[ds_format] except KeyError: raise ValueError( "dataset_format is expected to be one of %s. '%s' is not valid" % (", ".join(DATASET_FORMATS.keys()), ds_format) )
Get the preferred Dataset format extension :param str ds_format: Format of the Dataset (expected to be one of `csv` or `xml`) :rtype: str
def fai_from_bam(ref_file, bam_file, out_file, data): contigs = set([x.contig for x in idxstats(bam_file, data)]) if not utils.file_uptodate(out_file, bam_file): with open(ref.fasta_idx(ref_file, data["config"])) as in_handle: with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in (l for l in in_handle if l.strip()): if line.split()[0] in contigs: out_handle.write(line) return out_file
Create a fai index with only contigs in the input BAM file.
def package_manager_owns(self, dist): if dist.location.lower() == get_python_lib().lower(): filename = os.path.join(dist.location, dist.egg_name() + ".egg-info") else: filename = dist.location status, output = getstatusoutput("/usr/bin/acmefile -q %s" % filename) if status == 0: return self.name else: return ""
Returns True if package manager 'owns' file Returns False if package manager does not 'own' file There is currently no way to determine if distutils or setuptools installed a package. A future feature of setuptools will make a package manifest which can be checked. 'filename' must be the full path to file
def generate_routes(config): routes = [] for name, config in iteritems(config): pattern = r'^%s(?P<url>.*)$' % re.escape(config['prefix'].lstrip('/')) proxy = generate_proxy( prefix=config['prefix'], base_url=config['base_url'], verify_ssl=config.get('verify_ssl', True), middleware=config.get('middleware'), append_middleware=config.get('append_middleware'), cert=config.get('cert'), timeout=config.get('timeout')) proxy_view_function = proxy.as_view() proxy_view_function.csrf_exempt = config.get('csrf_exempt', True) routes.append(url(pattern, proxy_view_function, name=name)) return routes
Generate a list of urls that map to generated proxy views. generate_routes({ 'test_proxy': { 'base_url': 'https://google.com/', 'prefix': '/test_prefix/', 'verify_ssl': False, 'csrf_exempt: False', 'middleware': ['djproxy.proxy_middleware.AddXFF'], 'append_middleware': ['djproxy.proxy_middleware.AddXFF'], 'timeout': 3.0, 'cert': None } }) Required configuration keys: * `base_url` * `prefix` Optional configuration keys: * `verify_ssl`: defaults to `True`. * `csrf_exempt`: defaults to `True`. * `cert`: defaults to `None`. * `timeout`: defaults to `None`. * `middleware`: Defaults to `None`. Specifying `None` causes djproxy to use the default middleware set. If a list is passed, the default middleware list specified by the HttpProxy definition will be replaced with the provided list. * `append_middleware`: Defaults to `None`. `None` results in no changes to the default middleware set. If a list is specified, the list will be appended to the default middleware list specified in the HttpProxy definition or, if provided, the middleware key specificed in the config dict. Returns: [ url(r'^test_prefix/', GeneratedProxy.as_view(), name='test_proxy')), ]
def AssertType(value, expected_type): if not isinstance(value, expected_type): message = "Expected type `%r`, but got value `%r` of type `%s`" message %= (expected_type, value, type(value)) raise TypeError(message)
Ensures that given value has certain type. Args: value: A value to assert the type for. expected_type: An expected type for the given value. Raises: TypeError: If given value does not have the expected type.
def infer_len(node, context=None): call = arguments.CallSite.from_call(node) if call.keyword_arguments: raise UseInferenceDefault("TypeError: len() must take no keyword arguments") if len(call.positional_arguments) != 1: raise UseInferenceDefault( "TypeError: len() must take exactly one argument " "({len}) given".format(len=len(call.positional_arguments)) ) [argument_node] = call.positional_arguments try: return nodes.Const(helpers.object_len(argument_node, context=context)) except (AstroidTypeError, InferenceError) as exc: raise UseInferenceDefault(str(exc)) from exc
Infer length calls :param nodes.Call node: len call to infer :param context.InferenceContext: node context :rtype nodes.Const: a Const node with the inferred length, if possible
def count(self): return ( self .mapPartitions(lambda p: [sum(1 for _ in p)]) .reduce(operator.add) )
Count elements per RDD. Creates a new RDD stream where each RDD has a single entry that is the count of the elements. :rtype: DStream