code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def dict_to_object(source): target = inspectable_class.InspectableClass() for k, v in source.items(): setattr(target, k, v) return target
Returns an object with the key-value pairs in source as attributes.
def on_right_align_toggled(self, chk): v = chk.get_active() self.settings.general.set_int('window-halignment', 1 if v else 0)
set the horizontal alignment setting.
def topics(self): return set(node.tag for node in self.root.iter() if node.attrib)
Get the set of topics that can be extracted from this report.
def check_stripe_api_host(app_configs=None, **kwargs): from django.conf import settings messages = [] if not settings.DEBUG and hasattr(settings, "STRIPE_API_HOST"): messages.append( checks.Warning( "STRIPE_API_HOST should not be set in production! This is most likely unintended.", hint="Remove STRIPE_API_HOST from your Django settings.", id="djstripe.W002", ) ) return messages
Check that STRIPE_API_HOST is not being used in production.
def truncate_selection(self, position_from): position_from = self.get_position(position_from) cursor = self.textCursor() start, end = cursor.selectionStart(), cursor.selectionEnd() if start < end: start = max([position_from, start]) else: end = max([position_from, end]) self.set_selection(start, end)
Unselect read-only parts in shell, like prompt
def _create_callback(self, function, wrap_result): if not isinstance(function, Callable): return None def command_callback(): res = function(self, self._read_lines()) if wrap_result: res = self._wrap_iterator(res) return res return command_callback
Create MPD command related response callback.
def _serialize_data(self, my_dict): new_dict = {} for item in my_dict: if isinstance(my_dict[item], datetime): new_dict[item] = my_dict[item].strftime('%Y-%m-%d%H:%M:%S') else: new_dict[item] = str(my_dict[item]) return json.dumps(new_dict)
Serialize a Dictionary into JSON
def keyword_hookup(self, noteId, keywords): try: self.cur.execute("DELETE FROM notekeyword WHERE noteid=?", [noteId]) except: self.error("ERROR: cannot unhook previous keywords") for keyword in keywords: keyword = keyword.decode('utf-8') self.fyi(" inserting keyword:", keyword) keywordId = self.con.execute("SELECT keywordId FROM keyword WHERE keyword = ?;", [keyword]).fetchone() try: if keywordId: self.fyi(" (existing keyword with id: %s)" % keywordId) keywordId = keywordId[0] else: self.fyi(" (new keyword)") self.cur.execute("INSERT INTO keyword(keyword) VALUES (?);", [keyword]) keywordId = self.cur.lastrowid self.con.execute("INSERT INTO notekeyword(noteId, keywordID) VALUES(?, ?)", [noteId, keywordId]) except: self.error("error hooking up keyword '%s'" % keyword) self.con.commit()
Unhook existing cross-linking entries.
def full_like(a, **kwargs): _like_args(a, kwargs) if isinstance(a, Array): kwargs.setdefault('fill_value', a.fill_value) return full(**kwargs)
Create a filled array like `a`.
def _writeText(self, image, text, pos): offset = 0 x, y = pos for c in text: c_size = self.font.getsize(c) c_image = Image.new('RGBA', c_size, (0, 0, 0, 0)) c_draw = ImageDraw.Draw(c_image) c_draw.text((0, 0), c, font=self.font, fill=(0, 0, 0, 255)) c_image = self._rndLetterTransform(c_image) image.paste(c_image, (x+offset, y), c_image) offset += c_size[0]
Write morphed text in Image object.
def list_files(self, project): path = "/projects/{}/files".format(project.id) res = yield from self.http_query("GET", path, timeout=120) return res.json
List files in the project on computes
def _disconnect(cls): post_save.disconnect( notify_items, sender=cls, dispatch_uid='knocker_{0}'.format(cls.__name__) )
Disconnect signal from current model
def _build_request(request): msg = bytes([request['cmd']]) if 'dest' in request: msg += bytes([request['dest']]) else: msg += b'\0' if 'sha' in request: msg += request['sha'] else: for dummy in range(64): msg += b'0' logging.debug("Request (%d): %s", len(msg), msg) return msg
Build message to transfer over the socket from a request.
def schema(self): if self.status == "DELETING": return "" parts = ["GLOBAL", self.index_type, "INDEX"] parts.append("('%s', %s," % (self.name, self.hash_key.name)) if self.range_key: parts.append("%s," % self.range_key.name) if self.includes: parts.append("[%s]," % ", ".join(("'%s'" % i for i in self.includes))) parts.append( "THROUGHPUT (%d, %d))" % (self.read_throughput, self.write_throughput) ) return " ".join(parts)
The DQL fragment for constructing this index
def import_modules(names, src, dst): for name in names: module = importlib.import_module(src + '.' + name) setattr(sys.modules[dst], name, module)
Import modules in package.
def xpath(self, xpath, dom=None): if dom is None: dom = self.browser return expect(dom.find_by_xpath, args=[xpath])
xpath find function abbreviation
def get(cls, parent=None, id=None, data=None): if parent is not None: route = copy(parent.route) else: route = {} if id is not None and cls.ID_NAME is not None: route[cls.ID_NAME] = id obj = cls(key=parent.key, route=route, config=parent.config) if data: obj.data = data else: obj.fetch() return obj
Inherit info from parent and return new object
def getCachedDataKey(engineVersionHash, key): cacheFile = CachedDataManager._cacheFileForHash(engineVersionHash) return JsonDataManager(cacheFile).getKey(key)
Retrieves the cached data value for the specified engine version hash and dictionary key
def guesser(types=GUESS_TYPES, strict=False): return TypeGuesser(types=types, strict=strict)
Create a type guesser for multiple values.
def set(self, key: bytes, value: bytes) -> Tuple[Hash32]: validate_is_bytes(key) validate_length(key, self._key_size) validate_is_bytes(value) path = to_int(key) node = value _, branch = self._get(key) proof_update = [] target_bit = 1 for sibling_node in reversed(branch): node_hash = keccak(node) proof_update.append(node_hash) self.db[node_hash] = node if (path & target_bit): node = sibling_node + node_hash else: node = node_hash + sibling_node target_bit <<= 1 self.root_hash = keccak(node) self.db[self.root_hash] = node return tuple(reversed(proof_update))
Returns all updated hashes in root->leaf order
def split_commandline(s, comments=False, posix=True): s = s.replace('\\', '\\\\') s = s.replace('\'', '\\\'') s = s.replace('\"', '\\\"') lex = shlex.shlex(s, posix=posix) lex.whitespace_split = True lex.whitespace = ';' if not comments: lex.commenters = '' return list(lex)
splits semi-colon separated commandlines
def dump_dict(cfg, f, indent=0): for key in cfg: if not isstr(key): raise ConfigSerializeError("Dict keys must be strings: %r" % (key,)) dump_value(key, cfg[key], f, indent) f.write(u';\n')
Save a dictionary of attributes
def packet2chain(packet): chain = [type(packet).__name__] payload = packet.data while not isinstance(payload, bytes): chain.append(type(payload).__name__) payload = payload.data return ':'.join(chain)
Fetch DPKT packet protocol chain.
def save_session(self, outfolder, override=None): filename = '%s_%s' % (self.protname.upper(), "_".join( [self.hetid, self.plcomplex.chain, self.plcomplex.position])) if override is not None: filename = override cmd.save("/".join([outfolder, "%s.pse" % filename]))
Saves a PyMOL session file.
def _parse_date_default_value(property_name, default_value_string): parsed_value = time.strptime(default_value_string, ORIENTDB_DATE_FORMAT) return datetime.date(parsed_value.tm_year, parsed_value.tm_mon, parsed_value.tm_mday)
Parse and return the default value for a date property.
def repackage_hidden(h): if isinstance(h, torch.Tensor): return h.detach() else: return tuple(repackage_hidden(v) for v in h)
Wraps hidden states in new Tensors, to detach them from their history.
def from_config(cls, config, prefix="postmark_", is_uppercase=False): kwargs = {} for arg in get_args(cls): key = prefix + arg if is_uppercase: key = key.upper() else: key = key.lower() if key in config: kwargs[arg] = config[key] return cls(**kwargs)
Helper method for instantiating PostmarkClient from dict-like objects.
def which(executable): locations = ( '/usr/local/bin', '/bin', '/usr/bin', '/usr/local/sbin', '/usr/sbin', '/sbin', ) for location in locations: executable_path = os.path.join(location, executable) if os.path.exists(executable_path) and os.path.isfile(executable_path): return executable_path
find the location of an executable
def find_packages(root_directory: str = '.') -> t.List[str]: exclude = ['test*', 'test.*'] if ('bdist_wheel' in sys.argv or 'bdist' in sys.argv) else [] packages_list = setuptools.find_packages(root_directory, exclude=exclude) return packages_list
Find packages to pack.
def pitremove(np, dem, filleddem, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None): fname = TauDEM.func_name('pitremove') return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), {'-z': dem}, workingdir, None, {'-fel': filleddem}, {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np}, {'logfile': log_file, 'runtimefile': runtime_file})
Run pit remove using the flooding approach
def edit_project_preferences(self): from spyder.plugins.projects.confpage import ProjectPreferences if self.project_active: active_project = self.project_list[0] dlg = ProjectPreferences(self, active_project) dlg.show() dlg.exec_()
Edit Spyder active project preferences
def make_random(cls): self = object.__new__(cls) self.rank = Rank.make_random() self.suit = Suit.make_random() return self
Returns a random Card instance.
def tops(opts): if 'master_tops' not in opts: return {} whitelist = list(opts['master_tops'].keys()) ret = LazyLoader( _module_dirs(opts, 'tops', 'top'), opts, tag='top', whitelist=whitelist, ) return FilterDictWrapper(ret, '.top')
Returns the tops modules
def return_obj(cols, df, return_cols=False): df_holder = DataFrameHolder(cols=cols, df=df) return df_holder.return_self(return_cols=return_cols)
Construct a DataFrameHolder and then return either that or the DataFrame.
def list_dataset_uris(cls, base_uri, config_path): parsed_uri = generous_parse_uri(base_uri) irods_path = parsed_uri.path uri_list = [] logger.info("irods_path: '{}'".format(irods_path)) for dir_path in _ls_abspaths(irods_path): logger.info("dir path: '{}'".format(dir_path)) base, uuid = os.path.split(dir_path) base_uri = "irods:{}".format(base) uri = cls.generate_uri( name=None, uuid=uuid, base_uri=base_uri ) storage_broker = cls(uri, config_path) if storage_broker.has_admin_metadata(): uri_list.append(uri) return uri_list
Return list containing URIs in base_uri.
def cli(debug, cache, incremental): settings.HTTP_CACHE = cache settings.INCREMENTAL = incremental settings.DEBUG = debug if settings.DEBUG: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) init_memorious()
Crawler framework for documents and structured scrapers.
def address(self): if self._address is None: self._address = multisig_to_address(self.public_keys, self.m, version=self.version) return self._address
The public address you share with others to receive funds.
def relax_isometry(self): for ii in range(self.relaxation_kwds['niter']): self.H = self.compute_dual_rmetric() self.loss = self.rieman_loss() self.trace_var.update(ii,self.H,self.Y,self.eta,self.loss) self.trace_var.print_report(ii) self.trace_var.save_backup(ii) self.compute_gradient() self.make_optimization_step(first_iter=(ii == 0)) self.H = self.compute_dual_rmetric() self.trace_var.update(-1,self.H,self.Y,self.eta,self.loss) self.trace_var.print_report(ii) tracevar_path = os.path.join(self.trace_var.backup_dir, 'results.pyc') TracingVariable.save(self.trace_var,tracevar_path)
Main function for doing riemannian relaxation.
def read_csv(text, sep="\t"): import pandas as pd return pd.read_csv(StringIO(text), sep="\t")
Create a DataFrame from CSV text
def next_frame_base(): hparams = common_hparams.basic_params1() hparams.add_hparam("video_modality_loss_cutoff", 0.01) hparams.add_hparam("preprocess_resize_frames", None) hparams.add_hparam("shuffle_buffer_size", 128) hparams.add_hparam("tiny_mode", False) hparams.add_hparam("small_mode", False) hparams.add_hparam("stochastic_model", False) hparams.add_hparam("internal_loss", True) hparams.add_hparam("action_injection", "multi_additive") hparams.add_hparam("scheduled_sampling_mode", "prediction_only") hparams.add_hparam("scheduled_sampling_decay_steps", 10000) hparams.add_hparam("scheduled_sampling_max_prob", 1.0) hparams.add_hparam("scheduled_sampling_k", 900.0) return hparams
Common HParams for next_frame models.
def _create_axes(filenames, file_dict): try: f = iter(f for tup in file_dict.itervalues() for f in tup if f is not None).next() except StopIteration as e: raise (ValueError("No FITS files were found. " "Searched filenames: '{f}'." .format( f=filenames.values())), None, sys.exc_info()[2]) axes = FitsAxes(f[0].header) for i, u in enumerate(axes.cunit): if u == 'DEG': axes.cunit[i] = 'RAD' axes.set_axis_scale(i, np.pi/180.0) return axes
Create a FitsAxes object
def _addAccountRights(sidObject, user_right): try: if sidObject: _polHandle = win32security.LsaOpenPolicy(None, win32security.POLICY_ALL_ACCESS) user_rights_list = [user_right] _ret = win32security.LsaAddAccountRights(_polHandle, sidObject, user_rights_list) return True except Exception as e: log.exception('Error attempting to add account right, exception was %s', e) return False
helper function to add an account right to a user
def _extend_data(self, datapoint, new_data): if new_data: try: self.data[datapoint].extend(new_data) except KeyError: self.data[datapoint] = new_data
extend or assign new data to datapoint
def get(self, key, failobj=None, exact=0): if not exact: key = self.getfullkey(key,new=1) return self.data.get(key,failobj)
Raises exception if key is ambiguous
def _read_config_file(args): stage = args.stage with open(args.config, 'rt') as f: config = yaml.safe_load(f.read()) STATE['stages'] = config['stages'] config['config'] = _decrypt_item(config['config'], stage=stage, key='', render=True) return config['stages'], config['config']
Decrypt config file, returns a tuple with stages and config.
def _simplify_feature_value(self, name, value): if name == 'prefix': channel_modes, channel_chars = value.split(')') channel_modes = channel_modes[1:] value = OrderedDict(list(zip(channel_modes, channel_chars))[::-1]) return value elif name == 'chanmodes': value = value.split(',') return value elif name == 'targmax': max_available = {} for sort in value.split(','): command, limit = sort.split(':') command = command.casefold() max_available[command] = limit_to_number(limit) return max_available elif name == 'chanlimit': limit_available = {} for sort in value.split(','): chan_types, limit = sort.split(':') for prefix in chan_types: limit_available[prefix] = limit_to_number(limit) return limit_available elif name in _limits: value = limit_to_number(value) return value else: return value
Return simplified and more pythonic feature values.
def main(): sandbox = create_sandbox() directory = download_package_to_sandbox( sandbox, 'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz' ) print(directory) destroy_sandbox(sandbox)
Main function for this module
def namePop(ctxt): if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.namePop(ctxt__o) return ret
Pops the top element name from the name stack
def _handle_command(self, connection, sender, target, command, payload): try: handler = getattr(self, "cmd_{0}".format(command)) except AttributeError: self.safe_send(connection, target, "Unknown command: %s", command) else: try: logging.info("! Handling command: %s", command) handler(connection, sender, target, payload) except Exception as ex: logging.exception("Error calling command handler: %s", ex)
Handles a command, if any
def zk_walk(self, root_path, branch_path): full_path = os.path.join(root_path, branch_path) if branch_path else root_path try: children = self.client.get_children(full_path) except NoNodeError: children = set() except NoAuthError: raise AuthError("read children", full_path) for child in children: child_path = os.path.join(branch_path, child) if branch_path else child try: stat = self.client.exists(os.path.join(root_path, child_path)) except NoAuthError: raise AuthError("read", child) if stat is None or stat.ephemeralOwner != 0: continue yield child_path for new_path in self.zk_walk(root_path, child_path): yield new_path
skip ephemeral znodes since there's no point in copying those
def _IOC(cls, dir, op, structure=None): control = cls(dir, op, structure) def do(dev, **args): return control(dev, **args) return do
Encode an ioctl id.
def _add_label_edges(self): labels = self.ast.select('atom_label') if not labels: return label_digits = defaultdict(list) for label in labels: digits = list(label.tail[0]) for digit in digits: label_digits[digit].append(label.parent()) for label, (atom1, atom2) in label_digits.items(): atom1_idx = self._atom_indices[id(atom1)] atom2_idx = self._atom_indices[id(atom2)] self.add_edge(atom1_idx, atom2_idx)
Add edges between all atoms with the same atom_label in rings.
def remove_user(config, group, username): client = Client() client.prepare_connection() group_api = API(client) try: group_api.remove_user(group, username) except ldap_tools.exceptions.NoGroupsFound: print("Group ({}) not found".format(group)) except ldap_tools.exceptions.TooManyResults: print("Query for group ({}) returned multiple results.".format( group)) except ldap3.NO_SUCH_ATTRIBUTE: print("{} does not exist in {}".format(username, group))
Remove specified user from specified group.
def check_shutdown_flag(self): if self.shutdown_requested: tornado.ioloop.IOLoop.instance().stop() print("web server stopped.")
Shutdown the server if the flag has been set
def puts(s='', newline=True, stream=STDOUT): if not is_werkzeug_process(): try: return _puts(s, newline, stream) except UnicodeEncodeError: return _puts(s.encode(sys.stdout.encoding), newline, stream)
Wrap puts to avoid getting called twice by Werkzeug reloader.
def bread(stream): if hasattr(stream, "read"): return bdecode(stream.read()) else: handle = open(stream, "rb") try: return bdecode(handle.read()) finally: handle.close()
Decode a file or stream to an object.
def download(self, url): logging.getLogger().debug("Downloading '%s'..." % (url)) response = __class__.session.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=3.1) response.raise_for_status() return response.content
Download a sound file.
def align(self, alignment = None): if alignment is None: if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64: alignment = 8 else: alignment = 4 offset = self.current_position % alignment if offset == 0: return offset_to_aligned = (alignment - offset) % alignment self.seek(offset_to_aligned, 1) return
Repositions the current reader to match architecture alignment
def _normalize_sv_coverage_gatk(group_id, inputs, backgrounds, work_dir, back_files, out_files): input_backs = set(filter(lambda x: x is not None, [dd.get_background_cnv_reference(d, "gatk-cnv") for d in inputs])) if input_backs: assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs) pon = list(input_backs)[0] elif backgrounds: pon = gatkcnv.create_panel_of_normals(backgrounds, group_id, work_dir) else: pon = None for data in inputs: work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural", dd.get_sample_name(data), "bins")) denoise_file = gatkcnv.denoise(data, pon, work_dir) out_files[dd.get_sample_name(data)] = denoise_file back_files[dd.get_sample_name(data)] = pon return back_files, out_files
Normalize CNV coverage using panel of normals with GATK's de-noise approaches.
def check_page(fn): "Decorator to protect drawing methods" @wraps(fn) def wrapper(self, *args, **kwargs): if not self.page and not kwargs.get('split_only'): self.error("No page open, you need to call add_page() first") else: return fn(self, *args, **kwargs) return wrapper
Decorator to protect drawing methods
def durables(self): results = dict() for child in self.connection.retry(self.connection.get_children, self.keyspace): value, _ = self.connection.retry( self.connection.get, self.__path_of(child), watch=self.__increment_last_updated ) results[child] = self.encoding.decode(value) return results
Dictionary of all keys and their values in Zookeeper.
def funname(file): if isinstance(file, str): files = [file] else: files = file bases = [os.path.basename(f) for f in files] names = [os.path.splitext(b)[0] for b in bases] if isinstance(file, str): return names[0] else: return names
Return variable names from file names.
def search_subreddit(self, name=None): name = name or self.content.name query = self.term.prompt_input('Search {0}: '.format(name)) if not query: return with self.term.loader('Searching'): self.content = SubredditContent.from_name( self.reddit, name, self.term.loader, query=query) if not self.term.loader.exception: self.nav = Navigator(self.content.get)
Open a prompt to search the given subreddit
def _write_vcf_breakend(brend, out_handle): out_handle.write("{0}\n".format("\t".join(str(x) for x in [brend.chrom, brend.pos + 1, brend.id, brend.ref, brend.alt, ".", "PASS", brend.info])))
Write out a single VCF line with breakpoint information.
def _compute_a21_factor(self, C, imt, z1pt0, vs30): e2 = self._compute_e2_factor(imt, vs30) a21 = e2.copy() vs30_star, v1 = self._compute_vs30_star_factor(imt, vs30) median_z1pt0 = self._compute_median_z1pt0(vs30) numerator = ((C['a10'] + C['b'] * self.CONSTS['n']) * np.log(vs30_star / np.min([v1, 1000]))) denominator = np.log((z1pt0 + self.CONSTS['c2']) / (median_z1pt0 + self.CONSTS['c2'])) idx = numerator + e2 * denominator < 0 a21[idx] = - numerator[idx] / denominator[idx] idx = vs30 >= 1000 a21[idx] = 0.0 return a21
Compute and return a21 factor, equation 18, page 80.
def stats_evaluation(stats): statement = stats.get('statement') error = stats.get('error', 0) warning = stats.get('warning', 0) refactor = stats.get('refactor', 0) convention = stats.get('convention', 0) if not statement or statement <= 0: return None malus = float(5 * error + warning + refactor + convention) malus_ratio = malus / statement return 10.0 - (malus_ratio * 10)
Generate an evaluation for the given pylint ``stats``.
def on_for_seconds(self, steering, speed, seconds, brake=True, block=True): (left_speed, right_speed) = self.get_speed_steering(steering, speed) MoveTank.on_for_seconds(self, SpeedNativeUnits(left_speed), SpeedNativeUnits(right_speed), seconds, brake, block)
Rotate the motors according to the provided ``steering`` for ``seconds``.
def resolve(self, authorization: http.Header): if authorization is None: return None scheme, token = authorization.split() if scheme.lower() != 'basic': return None username, password = base64.b64decode(token).decode('utf-8').split(':') user = authenticate(username=username, password=password) return user
Determine the user associated with a request, using HTTP Basic Authentication.
def split_pattern(self): patterns = [] for p in self.split_order: patterns.append('_{}%{}'.format(p.capitalize(), p)) return ''.join(patterns)
Pattern used to split the input file.
def harvest(lancet, config_section): url, username, password = lancet.get_credentials( config_section, credentials_checker ) project_id_getter = lancet.get_instance_from_config( "timer", "project_id_getter", lancet ) task_id_getter = lancet.get_instance_from_config( "timer", "task_id_getter", lancet ) client = HarvestPlatform( server=url, basic_auth=(username, password), project_id_getter=project_id_getter, task_id_getter=task_id_getter, ) lancet.call_on_close(client.close) return client
Construct a new Harvest client.
def create_app(settings): app = Flask(__name__) for name in dir(settings): value = getattr(settings, name) if not (name.startswith('_') or isinstance(value, ModuleType) or isinstance(value, FunctionType)): app.config[name] = value if 'INSTALLED_APPS' in app.config: app.installed_apps = app.config.get('INSTALLED_APPS', []) Funnel(app) Mobility(app) for app_path in app.installed_apps: app.register_blueprint( getattr(__import__('{0}.views'.format(app_path), fromlist=['blueprint']), 'blueprint')) register_error_handlers(app) @app.context_processor def context_processor(): return dict(config=app.config) @app.teardown_request def teardown_request(exception=None): if hasattr(app, 'db_session'): app.db_session.close() return app
Create a new Flask application
def _overlapping(files): segments = set() for path in files: seg = file_segment(path) for s in segments: if seg.intersects(s): return True segments.add(seg) return False
Quick method to see if a file list contains overlapping files
def write_to_files_np(features, tokenizer, max_seq_length, max_predictions_per_seq, output_files): next_sentence_labels = [] valid_lengths = [] assert len(output_files) == 1, 'numpy format only support single output file' output_file = output_files[0] (input_ids, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels, valid_lengths) = features total_written = len(next_sentence_labels) outputs = collections.OrderedDict() outputs['input_ids'] = np.array(input_ids, dtype=object) outputs['segment_ids'] = np.array(segment_ids, dtype=object) outputs['masked_lm_positions'] = np.array(masked_lm_positions, dtype=object) outputs['masked_lm_ids'] = np.array(masked_lm_ids, dtype=object) outputs['masked_lm_weights'] = np.array(masked_lm_weights, dtype=object) outputs['next_sentence_labels'] = np.array(next_sentence_labels, dtype='int32') outputs['valid_lengths'] = np.array(valid_lengths, dtype='int32') np.savez_compressed(output_file, **outputs) logging.info('Wrote %d total instances', total_written)
Write to numpy files from `TrainingInstance`s.
def do_gather(flist): hlist = [] nskip = 3 for fname in flist: fin = fits.open(fname) if len(hlist) == 0: if fin[1].name == 'SKYMAP': nskip = 4 start = 0 else: start = nskip for h in fin[start:]: hlist.append(h) hdulistout = fits.HDUList(hlist) return hdulistout
Gather all the HDUs from a list of files
def updateKronCovs(covs,Acovs,N,P): if (covs is None) and (Acovs is None): covs = [SP.ones([N,1])] Acovs = [SP.eye(P)] if Acovs is None or covs is None: raise Exception("Either Acovs or covs is None, while the other isn't") if (type(Acovs)!=list) and (type(covs)!=list): Acovs= [Acovs] covs = [covs] if (type(covs)!=list) or (type(Acovs)!=list) or (len(covs)!=len(Acovs)): raise Exception("Either Acovs or covs is not a list or they missmatch in length") return covs, Acovs
make sure that covs and Acovs are lists
def numberOfXTilesAtZoom(self, zoom): "Returns the number of tiles over x at a given zoom level" [minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom) return maxCol - minCol + 1
Returns the number of tiles over x at a given zoom level
def parse_numeric(self): word = '' frac = False if self.char == '-': word += self.char self.update_chars() while self.char.isdigit() or (self.char == '.' and not frac): if self.char == '.': frac = True word += self.char self.update_chars() if self.char in 'eEdD': word += self.char self.update_chars() if self.char in '+-': word += self.char self.update_chars() while self.char.isdigit(): word += self.char self.update_chars() return word
Tokenize a Fortran numerical value.
def report_view(self, request, key, period): if not self.has_change_permission(request, None): raise PermissionDenied reporters = self.get_reporters() try: reporter = reporters[key] except KeyError: return self.render_report_error(request, _('Report not found'), 404) allowed_periods = [k for (k, v) in self.get_period_options()] if period == 'A': period = '' if period and period not in allowed_periods: return self.render_report_error(request, _('Invalid report type'), 400) try: return reporter.process(request, self.get_period_queryset(request, period), period) except: logger.exception('Tracking Reports could not generate the report due to an internal error') return self.render_report_error(request, _('An unexpected error has occurred'), 500)
Processes the reporting action.
def _get_stddevs(self, C, stddev_types, mag, num_sites): stddevs = [] for _ in stddev_types: if mag < 7.16: sigma = C['c11'] + C['c12'] * mag elif mag >= 7.16: sigma = C['c13'] stddevs.append(np.zeros(num_sites) + sigma) return stddevs
Return total standard deviation as for equation 35, page 1021.
def _get_k8s_model_dict(model_type, model): model = copy.deepcopy(model) if isinstance(model, model_type): return model.to_dict() elif isinstance(model, dict): return _map_dict_keys_to_model_attributes(model_type, model) else: raise AttributeError("Expected object of type '{}' (or 'dict') but got '{}'.".format(model_type.__name__, type(model).__name__))
Returns a dictionary representation of a provided model type
def load_tables(self, query, meta): try: for table in meta.tables: self.load_table(table) except NoCredentialsError: help_link = 'http://boto3.readthedocs.io/en/latest/guide/configuration.html' raise QueryError('Unable to locate AWS credential. ' 'Please see {0} on how to configure AWS credential.'.format(help_link))
Load necessary resources tables into db to execute given query.
def mirror_stdout_stderr(self): fs_api = self._api.get_file_stream_api() io_wrap.SimpleTee(sys.stdout, streaming_log.TextStreamPusher( fs_api, OUTPUT_FNAME, prepend_timestamp=True)) io_wrap.SimpleTee(sys.stderr, streaming_log.TextStreamPusher( fs_api, OUTPUT_FNAME, prepend_timestamp=True, line_prepend='ERROR'))
Simple STDOUT and STDERR mirroring used by _init_jupyter
def rating_score(obj, user): if not user.is_authenticated() or not hasattr(obj, '_ratings_field'): return False ratings_descriptor = getattr(obj, obj._ratings_field) try: rating = ratings_descriptor.get(user=user).score except ratings_descriptor.model.DoesNotExist: rating = None return rating
Returns the score a user has given an object
def load(): plugins = [] for filename in os.listdir(PLUGINS_PATH): if not filename.endswith(".py") or filename.startswith("_"): continue if not os.path.isfile(os.path.join(PLUGINS_PATH, filename)): continue plugin = filename[:-3] if plugin in FAILED_PLUGINS: continue try: __import__(PLUGINS.__name__, {}, {}, [plugin]) plugins.append(plugin) log.debug("Successfully imported {0} plugin".format(plugin)) except (ImportError, SyntaxError) as error: message = "Failed to import {0} plugin ({1})".format(plugin, error) if Config().sections(kind=plugin): log.warn(message) else: log.debug(message) FAILED_PLUGINS.append(plugin) return plugins
Check available plugins and attempt to import them
def valid_batch(self): valid_fns = list(zip(*self.corpus.get_valid_fns())) return self.load_batch(valid_fns)
Returns a single batch with all the validation cases.
def css1(self, css_path, dom=None): if dom is None: dom = self.browser def _css1(path, domm): return self.css(path, domm)[0] return expect(_css1, args=[css_path, dom])
return the first value of self.css
def create_legacy_graph_tasks(): return [ transitive_hydrated_targets, transitive_hydrated_target, hydrated_targets, hydrate_target, find_owners, hydrate_sources, hydrate_bundles, RootRule(OwnersRequest), ]
Create tasks to recursively parse the legacy graph.
def initialize(self, timeouts): if self.bind is True: self.socket.bind(self.address) else: self.socket.connect(self.address) self._set_timeouts(timeouts)
Bind or connect the nanomsg socket to some address
def cleanup(self): if not self.pending_children: return nclist = [(n, find_cycle([n], set())) for n in self.pending_children] genuine_cycles = [ node for node,cycle in nclist if cycle or node.get_state() != NODE_EXECUTED ] if not genuine_cycles: return desc = 'Found dependency cycle(s):\n' for node, cycle in nclist: if cycle: desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" else: desc = desc + \ " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ (node, repr(node), StateString[node.get_state()]) raise SCons.Errors.UserError(desc)
Check for dependency cycles.
def _get_next_positional(self): active_parser = self.active_parsers[-1] last_positional = self.visited_positionals[-1] all_positionals = active_parser._get_positional_actions() if not all_positionals: return None if active_parser == last_positional: return all_positionals[0] i = 0 for i in range(len(all_positionals)): if all_positionals[i] == last_positional: break if i + 1 < len(all_positionals): return all_positionals[i + 1] return None
Get the next positional action if it exists.
def connect(self): self.socketIO = SocketIO( host=self.iosocket_server, port=80, resource=self.iosocket_resource, proxies=self.proxies, headers=self.headers, transports=["websocket"], Namespace=AtlasNamespace, ) self.socketIO.on(self.EVENT_NAME_ERROR, self.handle_error)
Initiate the channel we want to start streams from.
def failed_hosts(self): return {h: r for h, r in self.items() if r.failed}
Hosts that failed during the execution of the task.
def label_subplot(ax=None, x=0.5, y=-0.25, text="(a)", **kwargs): if ax is None: ax = plt.gca() ax.text(x=x, y=y, s=text, transform=ax.transAxes, horizontalalignment="center", verticalalignment="top", **kwargs)
Create a subplot label.
def tryDynMod(name): try: return importlib.import_module(name) except ModuleNotFoundError: raise s_exc.NoSuchDyn(name=name)
Dynamically import a python module or exception.
def elem_add(self, idx=None, name=None, **kwargs): self.jit_load() if self.loaded: return self.system.__dict__[self.name].elem_add( idx, name, **kwargs)
overloading elem_add function of a JIT class
def json_to_response(self, action=None, json_status=None, success_url=None, json_data=None, **response_kwargs): data = { "status": self.get_status(json_status), "action": self.get_action(action), "extra_data": self.get_json_data(json_data or {}) } if self.action == AjaxResponseAction.REDIRECT: data["action_url"] = success_url or self.get_success_url() return JsonResponse(data, **response_kwargs)
Valid response with next action to be followed by the JS
def _next_port(self, port): port = 5000 + (port + 1) % 1000 if port == self.port: raise DatacatsError('Too many instances running') return port
Return another port from the 5000-5999 range
def reset(self): self.indexCount = 0 indexDir = self.store.newDirectory(self.indexDirectory) if indexDir.exists(): indexDir.remove() for src in self.getSources(): src.removeReliableListener(self) src.addReliableListener(self, style=iaxiom.REMOTE)
Process everything all over again.
def on_helpButton(self, event, page=None): path = find_pmag_dir.get_pmag_dir() help_page = os.path.join(path, 'dialogs', 'help_files', page) if not os.path.exists(help_page): help_page = os.path.join(path, 'help_files', page) html_frame = pw.HtmlFrame(self, page=help_page) html_frame.Show()
shows html help page
def send_confirm_password_email(person): url = '%s/profile/login/%s/' % ( settings.REGISTRATION_BASE_URL, person.username) context = CONTEXT.copy() context.update({ 'url': url, 'receiver': person, }) to_email = person.email subject, body = render_email('confirm_password', context) send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
Sends an email to user allowing them to confirm their password.