code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def to_json(df, values): records = [] if df.empty: return {"data": []} sum_ = float(np.sum([df[c].iloc[0] for c in values])) for c in values: records.append({ "label": values[c], "value": "%.2f"%np.around(df[c].iloc[0] / sum_, decimals=2) }) return { "data" : records }
Format output for the json response.
def cal_model_performance(obsl, siml): nse = MathClass.nashcoef(obsl, siml) r2 = MathClass.rsquare(obsl, siml) rmse = MathClass.rmse(obsl, siml) pbias = MathClass.pbias(obsl, siml) rsr = MathClass.rsr(obsl, siml) print('NSE: %.2f, R-square: %.2f, PBIAS: %.2f%%, RMSE: %.2f, RSR: %.2f' % (nse, r2, pbias, rmse, rsr))
Calculate model performance indexes.
def dlp_job_path(cls, project, dlp_job): return google.api_core.path_template.expand( "projects/{project}/dlpJobs/{dlp_job}", project=project, dlp_job=dlp_job )
Return a fully-qualified dlp_job string.
def download_threads(self, url): output_file = os.path.join(self.output_path, '{}.tfa'.format(os.path.split(url)[-1])) size = 0 try: stats = os.stat(output_file) size = stats.st_size except FileNotFoundError: pass if not os.path.isfile(output_file) or size <= 100: session = OAuth1Session(self.consumer_key, self.consumer_secret, access_token=self.session_token, access_token_secret=self.session_secret) r = session.get(url + '/alleles_fasta') if r.status_code == 200 or r.status_code == 201: if re.search('json', r.headers['content-type'], flags=0): decoded = r.json() else: decoded = r.text with open(output_file, 'w') as allele: allele.write(decoded)
Download the allele files
def run(self): log.debug("Starting Kafka producer I/O thread.") while self._running: try: self.run_once() except Exception: log.exception("Uncaught error in kafka producer I/O thread") log.debug("Beginning shutdown of Kafka producer I/O thread, sending" " remaining records.") while (not self._force_close and (self._accumulator.has_unsent() or self._client.in_flight_request_count() > 0)): try: self.run_once() except Exception: log.exception("Uncaught error in kafka producer I/O thread") if self._force_close: self._accumulator.abort_incomplete_batches() try: self._client.close() except Exception: log.exception("Failed to close network client") log.debug("Shutdown of Kafka producer I/O thread has completed.")
The main run loop for the sender thread.
def history(verbose, range): alembic_command.history( config=get_config(), rev_range=range, verbose=verbose )
List revision changesets chronologically
def from_string(cls, s): for num, text in cls._STATUS2STR.items(): if text == s: return cls(num) else: raise ValueError("Wrong string %s" % s)
Return a `Status` instance from its string representation.
def create_router(self, name, tenant_id, subnet_lst): try: body = {'router': {'name': name, 'tenant_id': tenant_id, 'admin_state_up': True}} router = self.neutronclient.create_router(body=body) rout_dict = router.get('router') rout_id = rout_dict.get('id') except Exception as exc: LOG.error("Failed to create router with name %(name)s" " Exc %(exc)s", {'name': name, 'exc': str(exc)}) return None ret = self.add_intf_router(rout_id, tenant_id, subnet_lst) if not ret: try: ret = self.neutronclient.delete_router(rout_id) except Exception as exc: LOG.error("Failed to delete router %(name)s, Exc %(exc)s", {'name': name, 'exc': str(exc)}) return None return rout_id
Create a openstack router and add the interfaces.
def _CreateSudoersGroup(self): if not self._GetGroup(self.google_sudoers_group): try: command = self.groupadd_cmd.format(group=self.google_sudoers_group) subprocess.check_call(command.split(' ')) except subprocess.CalledProcessError as e: self.logger.warning('Could not create the sudoers group. %s.', str(e)) if not os.path.exists(self.google_sudoers_file): try: with open(self.google_sudoers_file, 'w') as group: message = '%{0} ALL=(ALL:ALL) NOPASSWD:ALL'.format( self.google_sudoers_group) group.write(message) except IOError as e: self.logger.error( 'Could not write sudoers file. %s. %s', self.google_sudoers_file, str(e)) return file_utils.SetPermissions( self.google_sudoers_file, mode=0o440, uid=0, gid=0)
Create a Linux group for Google added sudo user accounts.
def _get_sql(filename): with open(os.path.join(SQL_DIR, filename), 'r') as f: return f.read()
Returns the contents of the sql file from the given ``filename``.
def endSubscription(self, subscriber): self._reqId2Contract.pop(subscriber.reqId, None) self.reqId2Subscriber.pop(subscriber.reqId, None)
Unregister a live subscription.
def sphinx_extension(app, exception): "Wrapped up as a Sphinx Extension" if not app.builder.name in ("html", "dirhtml"): return if not app.config.sphinx_to_github: if app.config.sphinx_to_github_verbose: print("Sphinx-to-github: Disabled, doing nothing.") return if exception: if app.config.sphinx_to_github_verbose: print("Sphinx-to-github: Exception raised in main build, doing nothing.") return dir_helper = DirHelper( os.path.isdir, os.listdir, os.walk, shutil.rmtree ) file_helper = FileSystemHelper( open, os.path.join, shutil.move, os.path.exists ) operations_factory = OperationsFactory() handler_factory = HandlerFactory() layout_factory = LayoutFactory( operations_factory, handler_factory, file_helper, dir_helper, app.config.sphinx_to_github_verbose, sys.stdout, force=True ) layout = layout_factory.create_layout(app.outdir) layout.process()
Wrapped up as a Sphinx Extension
def getPreprocessorDefinitions(self, engineRoot, delimiter=' '): return delimiter.join(self.resolveRoot(self.definitions, engineRoot))
Returns the list of preprocessor definitions for this library, joined using the specified delimiter
def check_who_read(self, messages): for m in messages: readers = [] for p in m.thread.participation_set.all(): if p.date_last_check is None: pass elif p.date_last_check > m.sent_at: readers.append(p.participant.id) setattr(m, "readers", readers) return messages
Check who read each message.
def WaitForSnapshotCompleted(snapshot): print 'Waiting for snapshot %s to be completed...' % (snapshot) while True: snapshot.update() sys.stdout.write('.') sys.stdout.flush() if snapshot.status == 'completed': break time.sleep(5) return
Blocks until snapshot is complete.
def validate(self, data, schema, **kwargs): if not isinstance(schema, dict): schema = {'$ref': schema} return validate( data, schema, resolver=self.ref_resolver_cls.from_schema(schema), types=self.app.config.get('RECORDS_VALIDATION_TYPES', {}), **kwargs )
Validate data using schema with ``JSONResolver``.
def _checkDragDropEvent(self, ev): mimedata = ev.mimeData() if mimedata.hasUrls(): urls = [str(url.toLocalFile()) for url in mimedata.urls() if url.toLocalFile()] else: urls = [] if urls: ev.acceptProposedAction() return urls else: ev.ignore() return None
Checks if event contains a file URL, accepts if it does, ignores if it doesn't
def setup_multiifo_combine_statmap(workflow, final_bg_file_list, out_dir, tags): if tags is None: tags = [] make_analysis_dir(out_dir) logging.info('Setting up multiifo combine statmap') cstat_exe = PyCBCMultiifoCombineStatmap(workflow.cp, 'combine_statmap', ifos=workflow.ifos, tags=tags, out_dir=out_dir) ifolist = ' '.join(workflow.ifos) cluster_window = float(workflow.cp.get_opt_tags('combine_statmap', 'cluster-window', tags)) combine_statmap_node = cstat_exe.create_node(final_bg_file_list, ifolist, cluster_window, tags) workflow.add_node(combine_statmap_node) return combine_statmap_node.output_file
Combine the multiifo statmap files into one background file
def render(self, context, instance, placeholder): if instance and instance.template: self.render_template = instance.template return super(PluginTemplateMixin,self).render(context,instance,placeholder)
Permits setting of the template in the plugin instance configuration
def clean_workspace(self): if os.path.isdir(self._temp_workspace): shutil.rmtree(self._temp_workspace)
Clean up the temporary workspace if one exists
def coverage_lineplot (self): data = list() data_labels = list() if len(self.rna_seqc_norm_high_cov) > 0: data.append(self.rna_seqc_norm_high_cov) data_labels.append({'name': 'High Expressed'}) if len(self.rna_seqc_norm_medium_cov) > 0: data.append(self.rna_seqc_norm_medium_cov) data_labels.append({'name': 'Medium Expressed'}) if len(self.rna_seqc_norm_low_cov) > 0: data.append(self.rna_seqc_norm_low_cov) data_labels.append({'name': 'Low Expressed'}) pconfig = { 'id': 'rna_seqc_mean_coverage_plot', 'title': 'RNA-SeQC: Gene Body Coverage', 'ylab': '% Coverage', 'xlab': "Gene Body Percentile (5' -> 3')", 'xmin': 0, 'xmax': 100, 'tt_label': "<strong>{point.x}% from 5'</strong>: {point.y:.2f}", 'data_labels': data_labels } if len(data) > 0: self.add_section ( name = 'Gene Body Coverage', anchor = 'rseqc-rna_seqc_mean_coverage', helptext = 'The metrics are calculated across the transcripts with tiered expression levels.', plot = linegraph.plot(data, pconfig) )
Make HTML for coverage line plots
def nearest(self, idx): hi = self.after(idx) lo = self.before(idx) if hi is None: return lo if lo is None: return hi if abs(hi - idx) < abs(lo - idx): return hi return lo
Return datetime of record whose datetime is nearest idx.
def Construct(self): self.gdml_parser.Read(self.filename) self.world = self.gdml_parser.GetWorldVolume() self.log.info("Materials:") self.log.info(G4.G4Material.GetMaterialTable()) return self.world
Construct a cuboid from a GDML file without sensitive detector
def ignored_regions(source): return [(match.start(), match.end()) for match in _str.finditer(source)]
Return ignored regions like strings and comments in `source`
def timestamp(self): if self.conf.time_source == CTIME: return os.path.getctime(self.path) return email.utils.mktime_tz(email.utils.parsedate_tz( self.message.get('Date')))
Compute the normalized canonical timestamp of the mail.
def initialize_dag(self, targets: Optional[List[str]] = [], nested: bool = False) -> SoS_DAG: self.reset_dict() dag = SoS_DAG(name=self.md5) targets = sos_targets(targets) self.add_forward_workflow(dag, self.workflow.sections) if self.resolve_dangling_targets(dag, targets) == 0: if targets: raise UnknownTarget(f'No step to generate target {targets}.') dag.build() if targets: dag = dag.subgraph_from(targets) cycle = dag.circular_dependencies() if cycle: raise RuntimeError( f'Circular dependency detected {cycle}. It is likely a later step produces input of a previous step.' ) dag.save(env.config['output_dag']) return dag
Create a DAG by analyzing sections statically.
def make_gym_env(env_id, num_env=2, seed=123, wrapper_kwargs=None, start_index=0): if wrapper_kwargs is None: wrapper_kwargs = {} def make_env(rank): def _thunk(): env = gym.make(env_id) env.seed(seed + rank) return env return _thunk set_global_seeds(seed) return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
Create a wrapped, SubprocVecEnv for Gym Environments.
def cast_column(self, keys, func): import utool as ut for key in ut.ensure_iterable(keys): self[key] = [func(v) for v in self[key]]
like map column but applies values inplace
def deleted_records(endpoint): @utils.for_each_value def _deleted_records(self, key, value): deleted_recid = maybe_int(value.get('a')) if deleted_recid: return get_record_ref(deleted_recid, endpoint) return _deleted_records
Populate the ``deleted_records`` key.
def delay_or_fail(self, *args, **kwargs): return self.async_or_fail(args=args, kwargs=kwargs)
Wrap async_or_fail with a convenience signiture like delay
def _classify_load_constant(self, regs_init, regs_fini, mem_fini, written_regs, read_regs): matches = [] for dst_reg, dst_val in regs_fini.items(): if dst_reg not in written_regs: continue if dst_val == regs_init[dst_reg]: continue dst_val_ir = ReilImmediateOperand(dst_val, self._arch_regs_size[dst_reg]) dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg]) matches.append({ "src": [dst_val_ir], "dst": [dst_reg_ir] }) return matches
Classify load-constant gadgets.
def del_module(self, module): rev = util.get_latest_revision(module) del self.modules[(module.arg, rev)]
Remove a module from the context
def fix_e731(self, result): (line_index, _, target) = get_index_offset_contents(result, self.source) match = LAMBDA_REGEX.search(target) if match: end = match.end() self.source[line_index] = '{}def {}({}): return {}'.format( target[:match.start(0)], match.group(1), match.group(2), target[end:].lstrip())
Fix do not assign a lambda expression check.
def pypi( click_ctx, requirements, index=None, python_version=3, exclude_packages=None, output=None, subgraph_check_api=None, no_transitive=True, no_pretty=False, ): requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement] if not requirements: _LOG.error("No requirements specified, exiting") sys.exit(1) if not subgraph_check_api: _LOG.info( "No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided" ) result = resolve_python( requirements, index_urls=index.split(",") if index else ("https://pypi.org/simple",), python_version=int(python_version), transitive=not no_transitive, exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))), subgraph_check_api=subgraph_check_api, ) print_command_result( click_ctx, result, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output or "-", pretty=not no_pretty, )
Manipulate with dependency requirements using PyPI.
def asDictionary(self): feat_dict = {} if self._geom is not None: if 'feature' in self._dict: feat_dict['geometry'] = self._dict['feature']['geometry'] elif 'geometry' in self._dict: feat_dict['geometry'] = self._dict['geometry'] if 'feature' in self._dict: feat_dict['attributes'] = self._dict['feature']['attributes'] else: feat_dict['attributes'] = self._dict['attributes'] return self._dict
returns the feature as a dictionary
def greedy_trails(subg, odds, verbose): if verbose: print('\tCreating edge map') edges = defaultdict(list) for x,y in subg.edges(): edges[x].append(y) edges[y].append(x) if verbose: print('\tSelecting trails') trails = [] for x in subg.nodes(): if verbose > 2: print('\t\tNode {0}'.format(x)) while len(edges[x]) > 0: y = edges[x][0] trail = [(x,y)] edges[x].remove(y) edges[y].remove(x) while len(edges[y]) > 0: x = y y = edges[y][0] trail.append((x,y)) edges[x].remove(y) edges[y].remove(x) trails.append(trail) return trails
Greedily select trails by making the longest you can until the end
def _clone(self): instance = super(Bungiesearch, self)._clone() instance._raw_results_only = self._raw_results_only return instance
Must clone additional fields to those cloned by elasticsearch-dsl-py.
def mutate(self, p_mutate): new_dna = [] for bit in self.dna: if random.random() < p_mutate: bit = '1' if bit == '0' else '0' new_dna.append(bit) self.dna = ''.join(new_dna)
Check each element for mutation, swapping "0" for "1" and vice-versa.
def _get_config(self, unit, filename): file_contents = unit.file_contents(filename) config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config
Get a ConfigParser object for parsing a unit's config file.
def _version_newer_than(self, vers): v = self.version(self.executable()) vers_num = v[:v.index('-')] if not vers_num[0].isdigit(): return False v1 = list(map(int, vers_num.split('.'))) v2 = list(map(int, vers.split('.'))) assert len(v1) == 3 assert len(v2) == 3 if v1[0] > v2[0]: return True elif v1[0] == v2[0]: if v1[1] == v2[1]: return v1[2] >= v2[2] elif v1[1] > v2[1]: return True return False
Determine whether the version is greater than some given version
def to_gpu(*args): if len(args) > 1: return (cp.asarray(x) for x in args) else: return cp.asarray(args[0])
Upload numpy arrays to GPU and return them
def del_pickled_ontology(filename): pickledfile = ONTOSPY_LOCAL_CACHE + "/" + filename + ".pickle" if os.path.isfile(pickledfile) and not GLOBAL_DISABLE_CACHE: os.remove(pickledfile) return True else: return None
try to remove a cached ontology
def xgroup_setid(self, stream, group_name, latest_id='$'): fut = self.execute(b'XGROUP', b'SETID', stream, group_name, latest_id) return wait_ok(fut)
Set the latest ID for a consumer group
def save_png_with_metadata(fig, filename, fig_kwds, kwds): from PIL import Image, PngImagePlugin fig.savefig(filename, **fig_kwds) im = Image.open(filename) meta = PngImagePlugin.PngInfo() for key in kwds: meta.add_text(str(key), str(kwds[key])) im.save(filename, "png", pnginfo=meta)
Save a matplotlib figure to a png with metadata
def stream_length(self): if self._stream_length is None: try: current_position = self.source_stream.tell() self.source_stream.seek(0, 2) self._stream_length = self.source_stream.tell() self.source_stream.seek(current_position, 0) except Exception as error: raise NotSupportedError(error) return self._stream_length
Returns the length of the source stream, determining it if not already known.
def histogram_info(self) -> dict: return { 'support_atoms': self.support_atoms, 'atom_delta': self.atom_delta, 'vmin': self.vmin, 'vmax': self.vmax, 'num_atoms': self.atoms }
Return extra information about histogram
def create_entity_class(self): entity = Entity() entity.PartitionKey = 'pk{}'.format(str(uuid.uuid4()).replace('-', '')) entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', '')) entity.age = 39 entity.large = 933311100 entity.sex = 'male' entity.married = True entity.ratio = 3.1 entity.birthday = datetime(1970, 10, 4) entity.binary = EntityProperty(EdmType.BINARY, b'xyz') entity.other = EntityProperty(EdmType.INT32, 20) entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity
Creates a class-based entity with fixed values, using all of the supported data types.
def objref(obj): ref = _objrefs.get(obj) if ref is None: clsname = obj.__class__.__name__.split('.')[-1] seqno = _lastids.setdefault(clsname, 1) ref = '{}-{}'.format(clsname, seqno) _objrefs[obj] = ref _lastids[clsname] += 1 return ref
Return a string that uniquely and compactly identifies an object.
def hostname_text(self): if self._hostname_text is None: self.chain.connection.log("Collecting hostname information") self._hostname_text = self.driver.get_hostname_text() if self._hostname_text: self.chain.connection.log("Hostname info collected") else: self.chain.connection.log("Hostname info not collected") return self._hostname_text
Return hostname text and collect if not collected.
def _extract_sender( message: Message, resent_dates: List[Union[str, Header]] = None ) -> str: if resent_dates: sender_header = "Resent-Sender" from_header = "Resent-From" else: sender_header = "Sender" from_header = "From" if sender_header in message: sender = message[sender_header] else: sender = message[from_header] return str(sender) if sender else ""
Extract the sender from the message object given.
def auth_request(self, url, headers, body): return self.req.post(url, headers, body=body)
Perform auth request for token.
def print_yaml(o): print(yaml.dump(o, default_flow_style=False, indent=4, encoding='utf-8'))
Pretty print an object as YAML.
def loss(loss_value): total_loss = tf.Variable(0.0, False) loss_count = tf.Variable(0, False) total_loss_update = tf.assign_add(total_loss, loss_value) loss_count_update = tf.assign_add(loss_count, 1) loss_op = total_loss / tf.cast(loss_count, tf.float32) return [total_loss_update, loss_count_update], loss_op
Calculates aggregated mean loss.
def _detect(env): QTDIR = None if not QTDIR: QTDIR = env.get('QTDIR',None) if not QTDIR: QTDIR = os.environ.get('QTDIR',None) if not QTDIR: moc = env.WhereIs('moc') if moc: QTDIR = os.path.dirname(os.path.dirname(moc)) SCons.Warnings.warn( QtdirNotFound, "Could not detect qt, using moc executable as a hint (QTDIR=%s)" % QTDIR) else: QTDIR = None SCons.Warnings.warn( QtdirNotFound, "Could not detect qt, using empty QTDIR") return QTDIR
Not really safe, but fast method to detect the QT library
def n_hot(ids, c): res = np.zeros((c,), dtype=np.float32) res[ids] = 1 return res
one hot encoding by index. Returns array of length c, where all entries are 0, except for the indecies in ids
def draw_separators(self): total = 1 self._timeline.create_line((0, 1, self.pixel_width, 1)) for index, (category, label) in enumerate(self._category_labels.items()): height = label.winfo_reqheight() self._rows[category] = (total, total + height) total += height self._timeline.create_line((0, total, self.pixel_width, total)) pixel_height = total self._timeline.config(height=pixel_height)
Draw the lines separating the categories on the Canvas
def deploy_webconf(): deployed = [] log_dir = '/'.join([deployment_root(),'log']) if webserver_list(): if env.verbosity: print env.host,"DEPLOYING webconf:" if not exists(log_dir): run('ln -s /var/log log') if 'apache2' in get_packages(): deployed += _deploy_webconf('/etc/apache2/sites-available','django-apache-template.txt') deployed += _deploy_webconf('/etc/nginx/sites-available','nginx-template.txt') elif 'gunicorn' in get_packages(): deployed += _deploy_webconf('/etc/nginx/sites-available','nginx-gunicorn-template.txt') if not exists('/var/www/nginx-default'): sudo('mkdir /var/www/nginx-default') upload_template('woven/maintenance.html','/var/www/nginx-default/maintenance.html',use_sudo=True) sudo('chmod ugo+r /var/www/nginx-default/maintenance.html') else: print env.host, return deployed
Deploy nginx and other wsgi server site configurations to the host
def on_retry(self, exc, task_id, args, kwargs, einfo): super(LoggedTask, self).on_retry(exc, task_id, args, kwargs, einfo) log.warning('[{}] retried due to {}'.format(task_id, getattr(einfo, 'traceback', None)))
Capture the exception that caused the task to be retried, if any.
def create_update_symlink(self, link_destination, remote_path): try: self.sftp.remove(remote_path) except IOError: pass finally: try: self.sftp.symlink(link_destination, remote_path) except OSError as e: self.logger.error("error while symlinking {} to {}: {}".format( remote_path, link_destination, e))
Create a new link pointing to link_destination in remote_path position.
def contribute_to_class(self, cls, name, virtual_only=False): super(RegexField, self).contribute_to_class(cls, name, virtual_only) setattr(cls, name, CastOnAssignDescriptor(self))
Cast to the correct value every
def active_plan_summary(self): return self.active().values("plan").order_by().annotate(count=models.Count("plan"))
Return active Subscriptions with plan counts annotated.
def feature_analysis(fname="feature_analysis.png"): _, axes = plt.subplots(ncols=2, figsize=(18,6)) data = load_occupancy(split=False) oz = RadViz(ax=axes[0], classes=["unoccupied", "occupied"]) oz.fit(data.X, data.y) oz.finalize() data = load_concrete(split=False) oz = Rank2D(ax=axes[1]) oz.fit_transform(data.X, data.y) oz.finalize() path = os.path.join(FIGURES, fname) plt.tight_layout() plt.savefig(path)
Create figures for feature analysis
def list_tables(refresh=False, cache_file=None): if not cache_file: cache_file = os.path.join(str(Path.home()), ".bcdata") if refresh or check_cache(cache_file): wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)] with open(cache_file, "w") as outfile: json.dump(sorted(bcdata_objects), outfile) else: with open(cache_file, "r") as infile: bcdata_objects = json.load(infile) return bcdata_objects
Return a list of all datasets available via WFS
def resources_assigned(self) -> List[Resource]: resources_str = DB.get_hash_value(self.key, 'resources_assigned') resources_assigned = [] for resource in ast.literal_eval(resources_str): resources_assigned.append(Resource(resource)) return resources_assigned
Return list of resources assigned to the PB.
def _create_websession(self): from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False
Create a web session.
def Logger(name, **kargs): path_dirs = PathDirs(**kargs) logging.captureWarnings(True) logger = logging.getLogger(name) logger.setLevel(logging.INFO) handler = logging.handlers.WatchedFileHandler(os.path.join( path_dirs.meta_dir, 'vent.log')) handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s:%(lineno)-4d - ' '%(levelname)s - %(message)s') handler.setFormatter(formatter) if not len(logger.handlers): logger.addHandler(handler) return logger
Create and return logger
def import_it(cls): if not cls in cls._FEATURES: try: cls._FEATURES[cls] = cls._import_it() except ImportError: raise cls.Error(cls._import_error_message(), cls.Error.UNSATISFIED_IMPORT_REQ) return cls._FEATURES[cls]
Performs the import only once.
def copy_file(filename): print("Updating file %s" % filename) out_dir = os.path.abspath(DIRECTORY) tags = filename[:-4].split("-") tags[-2] = tags[-2].replace("m", "") new_name = "-".join(tags) + ".whl" wheel_flag = "-".join(tags[2:]) with InWheelCtx(os.path.join(DIRECTORY, filename)) as ctx: info_fname = os.path.join(_dist_info_dir(ctx.path), 'WHEEL') infos = pkginfo.read_pkg_info(info_fname) print("Changing Tag %s to %s" % (infos["Tag"], wheel_flag)) del infos['Tag'] infos.add_header('Tag', wheel_flag) pkginfo.write_pkg_info(info_fname, infos) ctx.out_wheel = os.path.join(out_dir, new_name) print("Saving new wheel into %s" % ctx.out_wheel)
Copy the file and put the correct tag
def format_call(self, api_version, api_call): api_call = api_call.lstrip('/') api_call = api_call.rstrip('?') logger.debug('api_call post strip =\n%s' % api_call) if (api_version == 2 and api_call[-1] != '/'): logger.debug('Adding "/" to api_call.') api_call += '/' if api_call in self.api_methods_with_trailing_slash[api_version]: logger.debug('Adding "/" to api_call.') api_call += '/' return api_call
Return properly formatted QualysGuard API call according to api_version etiquette.
def _get_dL2L(self, imt_per): if imt_per < 0.18: dL2L = -0.06 elif 0.18 <= imt_per < 0.35: dL2L = self._interp_function(0.12, -0.06, 0.35, 0.18, imt_per) elif 0.35 <= imt_per <= 10: dL2L = self._interp_function(0.65, 0.12, 10, 0.35, imt_per) else: dL2L = 0 return dL2L
Table 3 and equation 19 of 2013 report.
def _channel_exists_and_not_settled( self, participant1: Address, participant2: Address, block_identifier: BlockSpecification, channel_identifier: ChannelID = None, ) -> bool: try: channel_state = self._get_channel_state( participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_identifier, ) except RaidenRecoverableError: return False exists_and_not_settled = ( channel_state > ChannelState.NONEXISTENT and channel_state < ChannelState.SETTLED ) return exists_and_not_settled
Returns if the channel exists and is in a non-settled state
def mail_json(self): if self.mail.get("date"): self._mail["date"] = self.date.isoformat() return json.dumps(self.mail, ensure_ascii=False, indent=2)
Return the JSON of mail parsed
def GetParserFromFilename(self, path): handler_name = path.split("://")[0] for parser_cls in itervalues(GRRConfigParser.classes): if parser_cls.name == handler_name: return parser_cls extension = os.path.splitext(path)[1] if extension in [".yaml", ".yml"]: return YamlParser return ConfigFileParser
Returns the appropriate parser class from the filename.
def issuperset(self, other): self._binary_sanity_check(other) return set.issuperset(self, other)
Report whether this RangeSet contains another set.
def kvlclient(self): if self._kvlclient is None: self._kvlclient = kvlayer.client() return self._kvlclient
Return a thread local ``kvlayer`` client.
def create_untl_xml_subelement(parent, element, prefix=''): subelement = SubElement(parent, prefix + element.tag) if element.content is not None: subelement.text = element.content if element.qualifier is not None: subelement.attrib["qualifier"] = element.qualifier if element.children > 0: for child in element.children: SubElement(subelement, prefix + child.tag).text = child.content else: subelement.text = element.content return subelement
Create a UNTL XML subelement.
def save_db(self): with self.db_mutex: if not isinstance(self.db, dict) and not isinstance(self.db, list): return False try: with open(self.json_db_path, "w") as fp: json.dump(self.db, fp, indent=4) except Exception as e: _logger.debug("*** Write JSON DB to file error.") raise e else: self.sync() return True
" Save json db to file system.
def s2time(secs, show_secs=True, show_fracs=True): try: secs = float(secs) except: return "--:--:--.--" wholesecs = int(secs) centisecs = int((secs - wholesecs) * 100) hh = int(wholesecs / 3600) hd = int(hh % 24) mm = int((wholesecs / 60) - (hh*60)) ss = int(wholesecs - (hh*3600) - (mm*60)) r = "{:02d}:{:02d}".format(hd, mm) if show_secs: r += ":{:02d}".format(ss) if show_fracs: r += ".{:02d}".format(centisecs) return r
Converts seconds to time
def _get_hydrated_path(field): if isinstance(field, str) and hasattr(field, 'file_name'): return field if isinstance(field, dict) and 'file' in field: hydrated_path = field['file'] if not hasattr(hydrated_path, 'file_name'): raise TypeError("Filter argument must be a valid file-type field.") return hydrated_path
Return HydratedPath object for file-type field.
def selector(C, style): clas = C.classname(style.name) if style.type == 'paragraph': outlineLvl = int((style.properties.get('outlineLvl') or {}).get('val') or 8) + 1 if outlineLvl < 9: tag = 'h%d' % outlineLvl else: tag = 'p' elif style.type == 'character': tag = 'span' elif style.type == 'table': tag = 'table' elif style.type == 'numbering': tag = 'ol' return "%s.%s" % (tag, clas)
return the selector for the given stylemap style
def sam_readline(sock, partial = None): response = b'' exception = None while True: try: c = sock.recv(1) if not c: raise EOFError('SAM connection died. Partial response %r %r' % (partial, response)) elif c == b'\n': break else: response += c except (BlockingIOError, pysocket.timeout) as e: if partial is None: raise e else: exception = e break if partial is None: return response.decode('ascii') else: return (partial + response.decode('ascii'), exception)
read a line from a sam control socket
def split(src, chunksize=MINWEIGHT): for i, block in enumerate(block_splitter(src.iter_ruptures(), chunksize, key=operator.attrgetter('mag'))): rup = block[0] source_id = '%s:%d' % (src.source_id, i) amfd = mfd.ArbitraryMFD([rup.mag], [rup.mag_occ_rate]) rcs = RuptureCollectionSource( source_id, src.name, src.tectonic_region_type, amfd, block) yield rcs
Split a complex fault source in chunks
def find_all_runs(self, session=None): with self._session(session) as session: return session.query(TaskRecord).all()
Return all tasks that have been updated.
def sanitize_win_path(winpath): intab = '<>:|?*' if isinstance(winpath, six.text_type): winpath = winpath.translate(dict((ord(c), '_') for c in intab)) elif isinstance(winpath, six.string_types): outtab = '_' * len(intab) trantab = ''.maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab) winpath = winpath.translate(trantab) return winpath
Remove illegal path characters for windows
def _set_conn(self): if self._tls: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) try: conn = ldap.initialize(self._url) conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self._timeout) conn.simple_bind_s(self._binddn, self._bindpw) except Exception as e: if hasattr(e, 'message') and 'desc' in e.message: msg = e.message['desc'] else: msg = e.args[0]['desc'] log.critical(msg) raise log.debug('%s connection established' % ('LDAPS' if self._tls else 'LDAP')) self._conn = conn
Establish connection to the server
def _compress_obj(obj, level): return zlib.compress(pickle.dumps(obj, protocol=2), level)
Compress object to bytes.
def divideHosts(self, hosts, qty): maximumWorkers = sum(host[1] for host in hosts) if qty > maximumWorkers: index = 0 while qty > maximumWorkers: hosts[index] = (hosts[index][0], hosts[index][1] + 1) index = (index + 1) % len(hosts) maximumWorkers += 1 elif qty < maximumWorkers: while qty < maximumWorkers: maximumWorkers -= hosts[-1][1] if qty > maximumWorkers: hosts[-1] = (hosts[-1][0], qty - maximumWorkers) maximumWorkers += hosts[-1][1] else: del hosts[-1] if self.externalHostname in utils.loopbackReferences and \ len(hosts) > 1 and \ not self.tunnel: raise Exception("\n" "Could not find route from external worker to the " "broker: Unresolvable hostname or IP address.\n " "Please specify your externally routable hostname " "or IP using the --external-hostname parameter or " "use the --tunnel flag.") return hosts
Divide processes among hosts.
def coroutine(func): def __start(*args, **kwargs): __cr = func(*args, **kwargs) next(__cr) return __cr return __start
Basic decorator to implement the coroutine pattern.
def house_exists(self, complex: str, house: str) -> bool: try: self.check_house(complex, house) except exceptions.RumetrHouseNotFound: return False return True
Shortcut to check if house exists in our database.
def user_is_submission_owner(self, submission): if not self._user_manager.session_logged_in(): raise Exception("A user must be logged in to verify if he owns a jobid") return self._user_manager.session_username() in submission["username"]
Returns true if the current user is the owner of this jobid, false else
def ConsultarLiquidacionesPorContrato(self, nro_contrato=None, cuit_comprador=None, cuit_vendedor=None, cuit_corredor=None, cod_grano=None, **kwargs): "Obtener los COE de liquidaciones relacionadas a un contrato" ret = self.client.liquidacionPorContratoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, nroContrato=nro_contrato, cuitComprador=cuit_comprador, cuitVendedor=cuit_vendedor, cuitCorredor=cuit_corredor, codGrano=cod_grano, ) ret = ret['liqPorContratoCons'] self.__analizar_errores(ret) if 'coeRelacionados' in ret: self.DatosLiquidacion = sorted(ret['coeRelacionados']) self.LeerDatosLiquidacion() return True
Obtener los COE de liquidaciones relacionadas a un contrato
def delete(ctx, resource, id): session, api_url, project_id = build_client_from_context(ctx) url = '/'.join([api_url, resource, id]) r = session.delete(url) if r.status_code != 200: raise failed_request_exception('failed to delete resource', r) click.echo(r.text)
Delete given device model or instance.
def draw(self, surf): if self.shown: for w in self.widgets: surf.blit(w.image, self.convert_rect(w.rect)) for c in self.containers: c.draw(surf)
Draw all widgets and sub-containers to @surf.
def _parse_title(line_iter, cur_line, conf): title = [] conf['title'].append(title) title.append(('title_name', cur_line.split('title', 1)[1].strip())) while (True): line = next(line_iter) if line.startswith("title "): return line cmd, opt = _parse_cmd(line) title.append((cmd, opt))
Parse "title" in grub v1 config
def runlist_remove(name, **kwargs): ctx = Context(**kwargs) ctx.execute_action('runlist:remove', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, })
Remove runlist from the storage.
def categories(self): if not getattr(self, '_categories', False): self._categories = [re.sub(r'^Category:', '', x) for x in [link['title'] for link in self.__continued_query({ 'prop': 'categories', 'cllimit': 'max' }) ]] return self._categories
List of categories of a page.
def causally_significant_nodes(cm): inputs = cm.sum(0) outputs = cm.sum(1) nodes_with_inputs_and_outputs = np.logical_and(inputs > 0, outputs > 0) return tuple(np.where(nodes_with_inputs_and_outputs)[0])
Return indices of nodes that have both inputs and outputs.
def register_db(cls, dbname): def decorator(subclass): cls._dbs[dbname] = subclass subclass.name = dbname return subclass return decorator
Register method to keep list of dbs.
def project(self, lng_lat): (lng, lat) = lng_lat x = lng * DEG_TO_RAD lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE) y = lat * DEG_TO_RAD y = log(tan((pi / 4) + (y / 2))) return (x*EARTH_RADIUS, y*EARTH_RADIUS)
Returns the coordinates in meters from WGS84
def ExtendAnomalies(self, other): for o in other: if o is not None: self.anomaly.Extend(list(o.anomaly))
Merge anomalies from another CheckResult.