code
stringlengths
51
2.34k
docstring
stringlengths
11
171
async def cancel_remaining(self): self._closed = True task_list = list(self._pending) for task in task_list: task.cancel() for task in task_list: with suppress(CancelledError): await task
Cancel all remaining tasks.
def add_tag(self, resource_type, resource_id, tag, **_params): return self.put(self.tag_path % (resource_type, resource_id, tag))
Add a tag on the resource.
def scaledBy(self, scale): scaled = deepcopy(self) for test in scaled.elements[0].tests: if type(test.value) in (int, float): if test.property == 'scale-denominator': test.value /= scale elif test.property == 'zoom': test.value += log(scale)/log(2) return scaled
Return a new Selector with scale denominators scaled by a number.
def allocate_buffers(self): "Create the ragged array that will be filled when we ask for items." if self.ite_len is None: len(self) self.idx = LanguageModelPreLoader.CircularIndex(len(self.dataset.x.items), not self.backwards) self.batch = np.zeros((self.bs, self.bptt+1), dtype=np.int64) self.batch_x, self.batch_y = self.batch[:,0:self.bptt], self.batch[:,1:self.bptt+1] self.ro = np.zeros(self.bs, dtype=np.int64) self.ri = np.zeros(self.bs, dtype=np.int)
Create the ragged array that will be filled when we ask for items.
def list_users(self): folders = glob('%s/*' %(self.database)) folders.sort() return [self.print_user(x) for x in folders]
list users, each associated with a filesystem folder
def genome_coverage(covs, s2b): COV = [] for cov in covs: COV.append(parse_cov(cov, s2b)) return pd.concat(COV)
calculate genome coverage from scaffold coverage
def close_spider(self, _spider): self.df['date_download'] = pd.to_datetime( self.df['date_download'], errors='coerce', infer_datetime_format=True ) self.df['date_modify'] = pd.to_datetime( self.df['date_modify'], errors='coerce', infer_datetime_format=True ) self.df['date_publish'] = pd.to_datetime( self.df['date_publish'], errors='coerce', infer_datetime_format=True ) self.df.to_pickle(self.full_path) self.log.info("Wrote to Pandas to %s", self.full_path)
Write out to file
def open_subreddit_page(self, name): from .subreddit_page import SubredditPage with self.term.loader('Loading subreddit'): page = SubredditPage(self.reddit, self.term, self.config, self.oauth, name) if not self.term.loader.exception: return page
Open an instance of the subreddit page for the given subreddit name.
def wafer_sso_url(context, sso_method): request = context.request url = reverse(getattr(views, '%s_login' % sso_method)) if 'next' in request.GET: url += '?' + urlencode({'next': request.GET['next']}) return url
Return the correct URL to SSO with the given method.
def symmetric_elliot_function( signal, derivative=False ): s = 1.0 abs_signal = (1 + np.abs(signal * s)) if derivative: return s / abs_signal**2 else: return (signal * s) / abs_signal
A fast approximation of tanh
def _get_client(): client = salt.cloud.CloudClient( os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'), pillars=copy.deepcopy(__pillar__.get('cloud', {})) ) return client
Return a cloud client
def update_context(app, pagename, templatename, context, doctree): context['feedback_form_url'] = feedback_form_url(app.config.project, pagename)
Update the page rendering context to include ``feedback_form_url``.
def chr_rom(self): try: return self.raw_data[self.chr_rom_start:self.chr_rom_stop] except IndexError: raise ValueError('failed to read CHR-ROM on ROM.')
Return the CHR ROM of the ROM file.
def str2bytes(x): if type(x) is bytes: return x elif type(x) is str: return bytes([ ord(i) for i in x ]) else: return str2bytes(str(x))
Convert input argument to bytes
def __analizar_evento(self, ret): "Comprueba y extrae el wvento informativo si existen en la respuesta XML" evt = ret.get('evento') if evt: self.Eventos = [evt] self.Evento = "%(codigo)s: %(descripcion)s" % evt
Comprueba y extrae el wvento informativo si existen en la respuesta XML
def subtract_imagenet_mean_preprocess_batch(batch): batch = F.swapaxes(batch,0, 1) (r, g, b) = F.split(batch, num_outputs=3, axis=0) r = r - 123.680 g = g - 116.779 b = b - 103.939 batch = F.concat(b, g, r, dim=0) batch = F.swapaxes(batch,0, 1) return batch
Subtract ImageNet mean pixel-wise from a BGR image.
def rerun(client, run, job): from renku.models.provenance import ProcessRun activity = client.process_commmit() if not isinstance(activity, ProcessRun): click.secho('No tool was found.', fg='red', file=sys.stderr) return try: args = ['cwl-runner', activity.path] if job: job_file = tempfile.NamedTemporaryFile( suffix='.yml', dir=os.getcwd(), delete=False ) args.append(job_file.name) with job_file as fp: yaml.dump(yaml.safe_load(job), stream=fp, encoding='utf-8') if run: return call(args, cwd=os.getcwd()) finally: if job: os.unlink(job_file.name)
Re-run existing workflow or tool using CWL runner.
def outstanding(self): done_count = 0 for item in self: if not self.wait_any and item.fire_and_forget: done_count += 1 elif item.done: done_count += 1 if self.wait_any and done_count > 0: return False if done_count == len(self): return False return True
Returns whether or not this barrier has pending work.
def _hline_bokeh_(self, col): c = hv.HLine(self.df[col].mean()) return c
Returns an horizontal line from a column mean value
def mail_partial_json(self): if self.mail_partial.get("date"): self._mail_partial["date"] = self.date.isoformat() return json.dumps(self.mail_partial, ensure_ascii=False, indent=2)
Return the JSON of mail parsed partial
def color_stream_st(istream=sys.stdin, save_palette=False, **kwargs): for line in istream: filename = line.strip() try: palette = extract_colors(filename, **kwargs) except Exception as e: print(filename, e, file=sys.stderr) continue print_colors(filename, palette) if save_palette: save_palette_as_image(filename, palette)
Read filenames from the input stream and detect their palette.
def command_list(prog_name, prof_mgr, prof_name, prog_args): parser = argparse.ArgumentParser( prog=prog_name ) args = parser.parse_args(prog_args) prof_stub = prof_mgr.load(prof_name) out = io.StringIO() for comp_stub in prof_stub.component_list(): if comp_stub.name() is not None: out.write(comp_stub.name()) out.write("\n") out.seek(0) sys.stdout.write(out.read())
Print the list of components.
def neutralize_variables(self, tax_benefit_system): for variable_name, variable in tax_benefit_system.variables.items(): if variable.formulas: continue if self.used_as_input_variables and (variable_name in self.used_as_input_variables): continue if self.non_neutralizable_variables and (variable_name in self.non_neutralizable_variables): continue if self.weight_column_name_by_entity and (variable_name in self.weight_column_name_by_entity.values()): continue tax_benefit_system.neutralize_variable(variable_name)
Neutralizing input variables not in input dataframe and keep some crucial variables
def save(self): self.write(self.term.save) self._saved = True
Saves current cursor position, so that it can be restored later
def close_comments(self, request, queryset): queryset.update(comment_enabled=False) self.message_user( request, _('Comments are now closed for selected entries.'))
Close the comments for selected entries.
def requote_uri(uri): import requests.utils if six.PY2: def url_encode_non_ascii(bytes): pattern = '[\x80-\xFF]' replace = lambda c: ('%%%02x' % ord(c.group(0))).upper() return re.sub(pattern, replace, bytes) parts = urlparse(uri) uri = urlunparse( part.encode('idna') if index == 1 else url_encode_non_ascii(part.encode('utf-8')) for index, part in enumerate(parts)) return requests.utils.requote_uri(uri)
Requote uri if it contains non-ascii chars, spaces etc.
def get(self): if self._current: return self._resume(self._current, False) else: return self._get(None)
Called by the protocol consumer
def _startRecording(self, filename): self.setOption('_log_file_name', filename) self.setOption('_log_input_only', True) self.setOption('_log', True)
Start recording the session to a file for debug purposes.
def check_colormap(cmap): names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd', 'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning']) if cmap not in names: raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names)) else: return cmap
Check if cmap is one of the colorbrewer maps
def update(self, other): if self._pants_workdir != other._pants_workdir: raise ValueError('Other ClasspathProducts from a different pants workdir {}'.format(other._pants_workdir)) for target, products in other._classpaths._products_by_target.items(): self._classpaths.add_for_target(target, products) for target, products in other._excludes._products_by_target.items(): self._excludes.add_for_target(target, products)
Adds the contents of other to this ClasspathProducts.
def _construct_derivatives(self, coefs, **kwargs): return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
Return a list of derivatives given a list of coefficients.
def train(self, net_sizes, epochs, batchsize): self.trainer = ClassificationTrainer(self.data, self.targets, net_sizes) self.trainer.learn(epochs, batchsize) return self.trainer.evaluate(batchsize)
Initialize the base trainer
def bbox_vert_aligned_right(box1, box2): if not (box1 and box2): return False return abs(box1.right - box2.right) <= 2
Returns true if the right boundary of both boxes is within 2 pts
def compute_treshold_interval(self): intervals = [m.interval for m in self.modules if hasattr(m, "interval")] if len(intervals) > 0: self.treshold_interval = round(sum(intervals) / len(intervals))
Current method is to compute average from all intervals.
def SetCredentials(api_key,api_passwd): global V1_API_KEY global V1_API_PASSWD global _V1_ENABLED _V1_ENABLED = True V1_API_KEY = api_key V1_API_PASSWD = api_passwd
Establish API key and password associated with APIv1 commands.
def generate_config(ctx): path = click.prompt('Please specify a location for the sample gitlint config file', default=DEFAULT_CONFIG_FILE) path = os.path.abspath(path) dir_name = os.path.dirname(path) if not os.path.exists(dir_name): click.echo(u"Error: Directory '{0}' does not exist.".format(dir_name), err=True) ctx.exit(USAGE_ERROR_CODE) elif os.path.exists(path): click.echo(u"Error: File \"{0}\" already exists.".format(path), err=True) ctx.exit(USAGE_ERROR_CODE) LintConfigGenerator.generate_config(path) click.echo(u"Successfully generated {0}".format(path)) ctx.exit(0)
Generates a sample gitlint config file.
def fullqualname_py3(obj): if type(obj).__name__ == 'builtin_function_or_method': return _fullqualname_builtin_py3(obj) elif type(obj).__name__ == 'function': return _fullqualname_function_py3(obj) elif type(obj).__name__ in ['member_descriptor', 'method_descriptor', 'wrapper_descriptor']: return obj.__objclass__.__module__ + '.' + obj.__qualname__ elif type(obj).__name__ == 'method': return _fullqualname_method_py3(obj) elif type(obj).__name__ == 'method-wrapper': return fullqualname_py3(obj.__self__) + '.' + obj.__name__ elif type(obj).__name__ == 'module': return obj.__name__ elif type(obj).__name__ == 'property': return obj.fget.__module__ + '.' + obj.fget.__qualname__ elif inspect.isclass(obj): return obj.__module__ + '.' + obj.__qualname__ return obj.__class__.__module__ + '.' + obj.__class__.__qualname__
Fully qualified name for objects in Python 3.
def etag(self): CHUNKSIZE = 1024 * 64 from hashlib import md5 hash = md5() with open(self.path) as fin: chunk = fin.read(CHUNKSIZE) while chunk: hash_update(hash, chunk) chunk = fin.read(CHUNKSIZE) return hash.hexdigest()
Generates etag from file contents.
def _get_vqa_v2_image_feature_dataset( directory, feature_url, feature_filename="mscoco_feat.tar.gz"): feature_file = generator_utils.maybe_download_from_drive( directory, feature_filename, feature_url) with tarfile.open(feature_file, "r:gz") as feature_tar: feature_tar.extractall(directory)
Extract the VQA V2 feature data set to directory unless it's there.
async def _load_all(self): to_delete = [] for iden, val in self._hivedict.items(): try: appt = _Appt.unpack(val) if appt.iden != iden: raise s_exc.InconsistentStorage(mesg='iden inconsistency') self._addappt(iden, appt) self._next_indx = max(self._next_indx, appt.indx + 1) except (s_exc.InconsistentStorage, s_exc.BadStorageVersion, s_exc.BadTime, TypeError, KeyError, UnicodeDecodeError) as e: logger.warning('Invalid appointment %r found in storage: %r. Removing.', iden, e) to_delete.append(iden) continue for iden in to_delete: await self._hivedict.pop(iden) if self.appts: maxindx = max(appt.indx for appt in self.appts.values()) self._next_indx = maxindx + 1
Load all the appointments from persistent storage
def on_same_strand(self): return (self.ref_start < self.ref_end) == (self.qry_start < self.qry_end)
Returns true iff the direction of the alignment is the same in the reference and the query
def fix_paths(project_data, rel_path, extensions): norm_func = lambda path : os.path.normpath(os.path.join(rel_path, path)) for key in extensions: if type(project_data[key]) is dict: for k,v in project_data[key].items(): project_data[key][k] = [norm_func(i) for i in v] elif type(project_data[key]) is list: project_data[key] = [norm_func(i) for i in project_data[key]] else: project_data[key] = norm_func(project_data[key])
Fix paths for extension list
def cap(v, l): s = str(v) return s if len(s) <= l else s[-l:]
Shortens string is above certain length.
def _api_type(self, value): if isinstance(value, six.string_types): return 'string' elif isinstance(value, six.integer_types): return 'integer' elif type(value) is datetime.datetime: return 'date'
Returns the API type of the given value based on its python type.
def _is_in_comment_type(token_type): return token_type in [TokenType.Comment, TokenType.Newline, TokenType.Whitespace, TokenType.RST, TokenType.BeginRSTComment, TokenType.BeginInlineRST, TokenType.EndInlineRST]
Return true if this kind of token can be inside a comment.
def parse_rules(self): try: rule_options = self.config.items('rules') except configparser.NoSectionError: raise LogRaptorConfigError("the app %r has no defined rules!" % self.name) rules = [] for option, value in rule_options: pattern = value.replace('\n', '') if not self.args.filters: pattern = string.Template(pattern).safe_substitute(self.fields) rules.append(AppRule(option, pattern, self.args)) continue for filter_group in self.args.filters: _pattern, filter_keys = exact_sub(pattern, filter_group) _pattern = string.Template(_pattern).safe_substitute(self.fields) if len(filter_keys) >= len(filter_group): rules.append(AppRule(option, _pattern, self.args, filter_keys)) elif self._thread: rules.append(AppRule(option, _pattern, self.args)) return rules
Add a set of rules to the app, dividing between filter and other rule set
def bs(s: int) -> str: return str(s) if s <= 1 else bs(s >> 1) + str(s & 1)
Converts an int to its bits representation as a string of 0's and 1's.
def make_hasher(algorithm_id): if algorithm_id == 1: return hashes.Hash(hashes.SHA1(), default_backend()) elif algorithm_id == 2: return hashes.Hash(hashes.SHA384(), default_backend()) else: raise ValueError("Unsupported signing algorithm: %s" % algorithm_id)
Create a hashing object for the given signing algorithm.
def extract_rpm (archive, compression, cmd, verbosity, interactive, outdir): cpio = util.find_program("cpio") if not cpio: raise util.PatoolError("cpio(1) is required for rpm2cpio extraction; please install it") path = util.shell_quote(os.path.abspath(archive)) cmdlist = [util.shell_quote(cmd), path, "|", util.shell_quote(cpio), '--extract', '--make-directories', '--preserve-modification-time', '--no-absolute-filenames', '--force-local', '--nonmatching', r'"*\.\.*"'] if verbosity > 1: cmdlist.append('-v') return (cmdlist, {'cwd': outdir, 'shell': True})
Extract a RPM archive.
def show_event_analysis_dialog(self): self.event_analysis_dialog.update_types() self.event_analysis_dialog.update_groups() self.event_analysis_dialog.update_cycles() self.event_analysis_dialog.show()
Create the event analysis dialog.
def apply_boundary_conditions_to_cm(external_indices, cm): cm = cm.copy() cm[external_indices, :] = 0 cm[:, external_indices] = 0 return cm
Remove connections to or from external nodes.
def create_domain(self, service_id, version_number, name, comment=None): body = self._formdata({ "name": name, "comment": comment, }, FastlyDomain.FIELDS) content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number), method="POST", body=body) return FastlyDomain(self, content)
Create a domain for a particular service and version.
def validate(self): super().validate() nb_entities = len(self.entities) if nb_entities != self.rows: raise self.error( 'Number of entities: %s != number of rows: %s' % ( nb_entities, self.rows))
Base validation + entities = rows.
def _numeric_param_check_range(variable_name, variable_value, range_bottom, range_top): err_msg = "%s must be between %i and %i" if variable_value < range_bottom or variable_value > range_top: raise ToolkitError(err_msg % (variable_name, range_bottom, range_top))
Checks if numeric parameter is within given range
def _setup_decompressor(self, response): encoding = response.fields.get('Content-Encoding', '').lower() if encoding == 'gzip': self._decompressor = wpull.decompression.GzipDecompressor() elif encoding == 'deflate': self._decompressor = wpull.decompression.DeflateDecompressor() else: self._decompressor = None
Set up the content encoding decompressor.
def _simple_acronym_detection(s, i, words, *args): acronym = ''.join(words[s:i]) for _ in xrange(s, i): del words[s] words.insert(s, ''.join(acronym)) return s
Detect acronyms based on runs of upper-case letters.
def change_parameters(self,params): no_of_params = 0 for core_param in range(len(self.q)): for approx_param in range(self.q[core_param].param_no): self.q[core_param].vi_change_param(approx_param, params[no_of_params]) no_of_params += 1
Utility function for changing the approximate distribution parameters
def data_to_elem_base(self): if not self.n or self._flags['sysbase'] is False: return self.R = mul(self.R, self.Sn) / self.system.mva super(GovernorBase, self).data_to_elem_base()
Custom system base unconversion function
def a_urls(html): soup = BeautifulSoup(html, 'lxml') for node in soup.find_all('a'): try: href = node['href'] except KeyError: continue yield norm_url(href)
return normalized urls found in the 'a' tag
def add_entry(self, date, entry): in_date = False insert_at = 0 for (lineno, line) in enumerate(self.lines): if isinstance(line, DateLine) and line.date == date: in_date = True insert_at = lineno continue if in_date: if isinstance(line, Entry): insert_at = lineno elif isinstance(line, DateLine): break self.lines.insert(insert_at + 1, entry) if not isinstance(self.lines[insert_at], Entry): self.lines.insert(insert_at + 1, TextLine(''))
Add the given entry to the textual representation.
def _patch_hover(self, element, data): if not (self.inspection_policy == 'edges' and 'hover' in self.handles): return lidx = element.nodes.get_dimension(self.label_index) src, tgt = [dimension_sanitizer(kd.name) for kd in element.kdims[:2]] if src == 'start': src += '_values' if tgt == 'end': tgt += '_values' lookup = dict(zip(*(element.nodes.dimension_values(d) for d in (2, lidx)))) src_vals = data['patches_1'][src] tgt_vals = data['patches_1'][tgt] data['patches_1'][src] = [lookup.get(v, v) for v in src_vals] data['patches_1'][tgt] = [lookup.get(v, v) for v in tgt_vals]
Replace edge start and end hover data with label_index data.
def switch(self, device_id, obj_slot_id): payload = { "device-context": self._build_payload(device_id, obj_slot_id) } return self._post(self.url_prefix, payload)
Switching of device-context
def edit_file(self, filename, line): if encoding.is_text_file(filename): self.edit_goto.emit(filename, 1, '')
Handle %edit magic petitions.
def update(self): for channel in self.channels: channel.update() for i in range(len(self._project_dict["channels"])): channel_dict = self._project_dict["channels"][i] for channel in self.channels: if channel.name == channel_dict["common.ALLTYPES_NAME"]: self._project_dict["channels"][i] = channel.as_dict()
Updates the dictionary of the project
def load(self, arguments): "Load the values from the a ServerConnection arguments" features = arguments[1:-1] list(map(self.load_feature, features))
Load the values from the a ServerConnection arguments
def close(self): if not self.__closed: self.__closed = True self.pool.return_socket(self.sock) self.sock, self.pool = None, None
Return this instance's socket to the connection pool.
def tiff_header(read_buffer): data = struct.unpack('BB', read_buffer[0:2]) if data[0] == 73 and data[1] == 73: endian = '<' elif data[0] == 77 and data[1] == 77: endian = '>' else: msg = ("The byte order indication in the TIFF header ({byte_order}) " "is invalid. It should be either {little_endian} or " "{big_endian}.") msg = msg.format(byte_order=read_buffer[6:8], little_endian=bytes([73, 73]), big_endian=bytes([77, 77])) raise IOError(msg) _, offset = struct.unpack(endian + 'HI', read_buffer[2:8]) exif = ExifImageIfd(endian, read_buffer, offset) return exif.processed_ifd
Interpret the uuid raw data as a tiff header.
def _modname(path): base = os.path.basename(path) filename, ext = os.path.splitext(base) return filename
Return a plausible module name for the path
def await_connection(host, port): for i in range(CONNECT_ATTEMPTS): try: conn = socket.create_connection((host, port), CONNECT_TIMEOUT) conn.close() return True except (IOError, socket.error): time.sleep(1) return False
Wait for the mongo-orchestration server to accept connections.
def reset(self): self._poolingActivation = numpy.zeros((self._numColumns), dtype="int32") self._poolingColumns = [] self._overlapDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._activeDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._minOverlapDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._minActiveDutyCycles = numpy.zeros(self._numColumns, dtype=realDType) self._boostFactors = numpy.ones(self._numColumns, dtype=realDType)
Reset the state of the temporal pooler
def calculate_current_allocation(self): for ac in self.asset_classes: ac.curr_alloc = ac.curr_value * 100 / self.total_amount
Calculates the current allocation % based on the value
def draw(self, **kwargs): labels = ("Training Score", "Cross Validation Score") curves = ( (self.train_scores_mean_, self.train_scores_std_), (self.test_scores_mean_, self.test_scores_std_), ) colors = resolve_colors(n_colors=2) for idx, (mean, std) in enumerate(curves): self.ax.fill_between( self.train_sizes_, mean - std, mean+std, alpha=0.25, color=colors[idx], ) for idx, (mean, _) in enumerate(curves): self.ax.plot( self.train_sizes_, mean, 'o-', color=colors[idx], label=labels[idx], ) return self.ax
Renders the training and test learning curves.
def _typedef_both(t, base=0, item=0, leng=None, refs=None, kind=_kind_static, heap=False): v = _Typedef(base=_basicsize(t, base=base), item=_itemsize(t, item), refs=refs, leng=leng, both=True, kind=kind, type=t) v.save(t, base=base, heap=heap) return v
Add new typedef for both data and code.
def claim(ctx, vestingid, account, amount): vesting = Vesting(vestingid) if amount: amount = Amount(float(amount), "BTS") else: amount = vesting.claimable print_tx( ctx.bitshares.vesting_balance_withdraw( vesting["id"], amount=amount, account=vesting["owner"] ) )
Claim funds from the vesting balance
def _make_paging_message(controls): return client_list_control_pb2.ClientPagingControls( start=controls.get('start', None), limit=controls.get('limit', None))
Turns a raw paging controls dict into Protobuf ClientPagingControls.
def average_price(quantity_1, price_1, quantity_2, price_2): return (quantity_1 * price_1 + quantity_2 * price_2) / \ (quantity_1 + quantity_2)
Calculates the average price between two asset states.
def run_from_argv(self, argv): self.argv_string = ' '.join(argv) super(EmailNotificationCommand, self).run_from_argv(argv)
Overriden in order to access the command line arguments.
def _handle_substatements(self, stmt: Statement, sctx: SchemaContext) -> None: for s in stmt.substatements: if s.prefix: key = ( sctx.schema_data.modules[sctx.text_mid].prefix_map[s.prefix][0] + ":" + s.keyword) else: key = s.keyword mname = SchemaNode._stmt_callback.get(key, "_noop") method = getattr(self, mname) method(s, sctx)
Dispatch actions for substatements of `stmt`.
def load_data(fpath, **kwargs): ext = splitext(fpath)[1] if ext in ['.pickle', '.cPkl', '.pkl']: return load_cPkl(fpath, **kwargs) elif ext in ['.json']: return load_json(fpath, **kwargs) elif ext in ['.hdf5']: return load_hdf5(fpath, **kwargs) elif ext in ['.txt']: return load_text(fpath, **kwargs) elif HAS_NUMPY and ext in ['.npz', '.npy']: return load_numpy(fpath, **kwargs) else: assert False, 'unknown ext=%r for fpath=%r' % (ext, fpath)
More generic interface to load data
def urlparse(uri): scheme, netloc, path, params, query, fragment = parse.urlparse(uri) return ( parse.unquote(scheme), parse.unquote(netloc), parse.unquote(path), parse.unquote(params), parse.unquote(query), parse.unquote(fragment) )
Parse and decode the parts of a URI.
def comicDownloaded(self, comic, filename, text=None): imageUrl = self.getUrlFromFilename(filename) size = None if self.allowdownscale: size = getDimensionForImage(filename, MaxImageSize) title = '%s - %s' % (comic.name, os.path.basename(filename)) pageUrl = comic.referrer description = '<img src="%s"' % imageUrl if size: description += ' width="%d" height="%d"' % size description += '/>' if text: description += '<br/>%s' % text description += '<br/><a href="%s">View Comic Online</a>' % pageUrl args = ( title, imageUrl, description, util.rfc822date(time.time()) ) if self.newfile: self.newfile = False self.rss.addItem(*args) else: self.rss.addItem(*args, append=False)
Write RSS entry for downloaded comic.
def screenshot_themes(self, *args): from time import sleep for theme in THEMES: example.set_theme(theme) example.update() sleep(0.05) self.screenshot()
Take a screenshot for all themes available
def addLocalCacheService(self): "adds a CacheService to the instatiated HendrixService" _cache = self.getCacheService() _cache.setName('cache_proxy') _cache.setServiceParent(self.hendrix)
adds a CacheService to the instatiated HendrixService
def enable_asynchronous(self): def is_monkey_patched(): try: from gevent import monkey, socket except ImportError: return False if hasattr(monkey, "saved"): return "socket" in monkey.saved return gevent.socket.socket == socket.socket if not is_monkey_patched(): raise Exception("To activate asynchonoucity, please monkey patch" " the socket module with gevent") return True
Check if socket have been monkey patched by gevent
def separator(self): cells = dict([(column, "-" * self.column_widths[column]) for column in self.columns]) return ColorRow(self, **cells)
Generate a separator row using current column widths.
def add_tokens_for_single(self, ignore=False): args = self.single.args name = self.single.python_name self.reset_indentation(self.indent_type * self.single.indent) self.result.extend(self.tokens.make_single(name, args)) if ignore: self.single.skipped = True self.result.extend(self.tokens.test_skip) self.groups.finish_signature()
Add the tokens for the single signature
def _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list): for checksum_algorithm in _get_checksum_algorithm_set(payload_info_list): _add_tag_file( zip_file, dir_name, tag_info_list, _gen_manifest_file_tup(payload_info_list, checksum_algorithm), )
Generate the manifest files and add them to the zip.
def _sanity_check_construct_result_block(ir_blocks): if not isinstance(ir_blocks[-1], ConstructResult): raise AssertionError(u'The last block was not ConstructResult: {}'.format(ir_blocks)) for block in ir_blocks[:-1]: if isinstance(block, ConstructResult): raise AssertionError(u'Found ConstructResult before the last block: ' u'{}'.format(ir_blocks))
Assert that ConstructResult is always the last block, and only the last block.
def n_first_author_papers(self, refresh=True): first_authors = [1 for ab in self.get_journal_abstracts(refresh=refresh) if ab.authors[0].scopusid == self.author_id] return sum(first_authors)
Return number of papers with author as the first author.
def classes(self) -> Iterator[str]: yield from ( c[:-6] for c in self.path_map.keys() if c.endswith('.class') )
Yield the name of all classes discovered in the path map.
def read(self): if 'r' == self._mode: return self._read_response.text elif 'rb' == self._mode: return self._read_response.content else: raise IOError("File not opened in read mode.")
read the contents of the file that's been opened in read mode
def rate(): auth = request.authorization if not auth or not check_auth(auth.username, auth.password): return "60/minute" else: return "600/minute"
Set rate limits for authenticated and nonauthenticated users.
def add_alignment(self,align): self._target_context_errors = None self._query_context_errors = None ae = AlignmentErrors(align) self._alignment_errors.append(ae) self._general_errors.add_alignment_errors(ae)
Calculate alignment errors from the alignment and add it to the profile.
def log(msg, *args, **kwargs): if len(args) == 0 and len(kwargs) == 0: print(msg) else: print(msg.format(*args, **kwargs))
Print out a log message.
def configure_tty(self): attr = termios.tcgetattr(self.fd) attr[1] &= ~termios.ONLCR attr[3] &= ~termios.ECHO termios.tcsetattr(self.fd, termios.TCSANOW, attr) return b'unsetopt zle 2> /dev/null;stty -echo -onlcr -ctlecho;'
We don't want \n to be replaced with \r\n, and we disable the echo
def build_class(name, basenames=(), doc=None): node = nodes.ClassDef(name, doc) for base in basenames: basenode = nodes.Name() basenode.name = base node.bases.append(basenode) basenode.parent = node return node
create and initialize an astroid ClassDef node
def OnGridEditorClosed(self, event): try: dialect, self.has_header = \ self.parent.csvwidgets.get_dialect() except TypeError: event.Skip() return 0 self.fill_cells(dialect, self.has_header, choices=False)
Event handler for end of output type choice
def stamp_title(kb_app: kb, sphinx_app: Sphinx, doctree: doctree): resources = sphinx_app.env.resources confdir = sphinx_app.confdir source = PurePath(doctree.attributes['source']) docname = str(source.relative_to(confdir)).split('.rst')[0] resource = resources.get(docname) if resource: title = get_rst_title(doctree) resource.title = title
Walk the tree and extra RST title into resource.title
def cluster_set_config_epoch(self, config_epoch): fut = self.execute(b'CLUSTER', b'SET-CONFIG-EPOCH', config_epoch) return wait_ok(fut)
Set the configuration epoch in a new node.
def apply(self, callback, context): def wrapper(*args, **kwargs): try: return callback(*args, **kwargs) except bottle.HTTPError as error: return self.error_wrapper.from_status( status_line=error.status_line, msg=error.body ) return wrapper
Apply the HTTPError wrapper to the callback.