code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _image_of_size(image_size): return np.random.uniform(0, 256, [image_size, image_size, 3]).astype(np.uint8)
Generate a square RGB test image of the given side length.
def frontiers_style(): inchpercm = 2.54 frontierswidth=8.5 textsize = 5 titlesize = 7 plt.rcdefaults() plt.rcParams.update({ 'figure.figsize' : [frontierswidth/inchpercm, frontierswidth/inchpercm], 'figure.dpi' : 160, 'xtick.labelsize' : textsize, 'ytick.labelsize' : textsize, 'font.size' : textsize, 'axes.labelsize' : textsize, 'axes.titlesize' : titlesize, 'axes.linewidth': 0.75, 'lines.linewidth': 0.75, 'legend.fontsize' : textsize, }) return None
Figure styles for frontiers
def _download_raw(self, url=None): if url is None: url = self.url req = request.Request(url, headers=self.HEADERS_PLAIN) return request.urlopen(req).read().decode("utf8")
Download content from URL directly.
def _write(self, session, openFile, replaceParamFile): for line in self.lines: openFile.write(line.contents)
Replace Val File Write to File Method
def reset_context(**options): local_context._options = {} local_context._options.update(options) log.debug("New TelluricContext context %r created", local_context._options)
Reset context to default.
def from_slice(cls, coord): if coord.step is not None: raise NotImplementedError('no slice step support') elif coord.start is not None and coord.stop is not None: return DoubleSlice.from_slice(coord) elif coord.start is not None: xcol, xrow, col, row = cls._parse(coord.start) if xcol is not None: return StartCell(cls._cint(xcol), cls._rint(xrow)) elif col is not None: return StartCol(cls._cint(col)) return StartRow(cls._rint(row)) elif coord.stop is not None: xcol, xrow, col, row = cls._parse(coord.stop) if xcol is not None: return StopCell(cls._cint(xcol) + 1, cls._rint(xrow) + 1) elif col is not None: return StopCol(cls._cint(col) + 1) return StopRow(cls._rint(row) + 1) return cls()
Return a value fetching callable given a slice of coordinate strings.
def check_port(helper, port): try: int(port) except ValueError: helper.exit(summary="Port (-p) must be a integer value.", exit_code=unknown, perfdata='')
check if the port parameter is really a port or "scan"
def CheckRepeatLogic(filename, linenumber, clean_lines, errors): line = clean_lines.lines[linenumber] for cmd in _logic_commands: if re.search(r'\b%s\b'%cmd, line.lower()): m = _RE_LOGIC_CHECK.search(line) if m: errors(filename, linenumber, 'readability/logic', 'Expression repeated inside %s; ' 'better to use only %s()'%(cmd, m.group(1))) break
Check for logic inside else, endif etc
def process(self, document): content = json.dumps(document) versions = {} versions.update({'Spline': Version(VERSION)}) versions.update(self.get_version("Bash", self.BASH_VERSION)) if content.find('"docker(container)":') >= 0 or content.find('"docker(image)":') >= 0: versions.update(VersionsCheck.get_version("Docker", self.DOCKER_VERSION)) if content.find('"packer":') >= 0: versions.update(VersionsCheck.get_version("Packer", self.PACKER_VERSION)) if content.find('"ansible(simple)":') >= 0: versions.update(VersionsCheck.get_version('Ansible', self.ANSIBLE_VERSION)) return versions
Logging versions of required tools.
def AddShadow(self, fileset): shadow = fileset.get("/etc/shadow") if shadow: self._ParseFile(shadow, self.ParseShadowEntry) else: logging.debug("No /etc/shadow file.")
Add the shadow entries to the shadow store.
def make_plots(self, prefix, mcube_map=None, **kwargs): if mcube_map is None: mcube_map = self.model_counts_map() plotter = plotting.AnalysisPlotter(self.config['plotting'], fileio=self.config['fileio'], logging=self.config['logging']) plotter.run(self, mcube_map, prefix=prefix, **kwargs)
Make diagnostic plots using the current ROI model.
def latex_to_img(tex): with tempfile.TemporaryDirectory() as tmpdirname: with open(tmpdirname + r'\tex.tex', 'w') as f: f.write(tex) os.system(r"latex {0}\tex.tex -halt-on-error -interaction=batchmode -disable-installer -aux-directory={0} " r"-output-directory={0}".format(tmpdirname)) os.system(r"dvipng -T tight -z 9 --truecolor -o {0}\tex.png {0}\tex.dvi".format(tmpdirname)) image = pygame.image.load(tmpdirname + r'\tex.png') return image
Return a pygame image from a latex template.
def cli_main(): if '--debug' in sys.argv: LOG.setLevel(logging.DEBUG) elif '--verbose' in sys.argv: LOG.setLevel(logging.INFO) args = _get_arguments() try: plugin, folder = get_plugin_and_folder( inputzip=args.inputzip, inputdir=args.inputdir, inputfile=args.inputfile) LOG.debug('Plugin: %s -- Folder: %s' % (plugin.name, folder)) run_mq2( plugin, folder, lod_threshold=args.lod, session=args.session) except MQ2Exception as err: print(err) return 1 return 0
Main function when running from CLI.
def save(self, filename, content): with open(filename, "w") as f: if hasattr(content, '__iter__'): f.write('\n'.join([row for row in content])) else: print('WRINGI CONTWETESWREWR') f.write(str(content))
default is to save a file from list of lines
def exec_cmd(self, command, **kwargs): self._is_allowed_command(command) self._check_command_parameters(**kwargs) return self._exec_cmd(command, **kwargs)
Wrapper method that can be changed in the inheriting classes.
def entropy(string): p, lns = Counter(string), float(len(string)) return -sum(count/lns * math.log(count/lns, 2) for count in p.values())
Compute entropy on the string
def assemble(self): first_block = ray.get(self.objectids[(0, ) * self.ndim]) dtype = first_block.dtype result = np.zeros(self.shape, dtype=dtype) for index in np.ndindex(*self.num_blocks): lower = DistArray.compute_block_lower(index, self.shape) upper = DistArray.compute_block_upper(index, self.shape) result[[slice(l, u) for (l, u) in zip(lower, upper)]] = ray.get( self.objectids[index]) return result
Assemble an array from a distributed array of object IDs.
def isRunning(self, waitTime=0): waitUntil = time.time() + waitTime while True: if self.getPID() > 0: return True else: self._pid = PlatformManager.getWindowPID(PlatformManager.getWindowByTitle(re.escape(self._title))) if time.time() > waitUntil: break else: time.sleep(self._defaultScanRate) return self.getPID() > 0
If PID isn't set yet, checks if there is a window with the specified title.
def capture_pywarnings(handler): logger = logging.getLogger('py.warnings') for h in logger.handlers: if isinstance(h, handler.__class__): return logger.addHandler(handler) logger.setLevel(logging.WARNING)
Log python system warnings.
def updateTraceCount(self): self.ui.ntracesLbl.setNum(self.ui.trackview.model().traceCount())
Updates the trace count label with the data from the model
def store(self, store_item): required_keys = {"type": str, "timestamp": float} if not isinstance(store_item, dict): raise TypeError("The stored item should be a dict") for k, v in required_keys.items(): if k not in store_item: raise AttributeError("{} is not available. Please add it.".format(k)) if not isinstance(store_item[k], v): raise TypeError("{} is not a {}. Please change it. ".format(k, v)) self._arctic_lib.check_quota() self._collection.update(store_item, store_item, upsert=True)
Store for tweets and user information. Must have all required information and types
def column_keymap(self): keystates = set() shortcuts = self.cp.items('column_keymap') keymap_dict = dict(shortcuts) for combo, action in shortcuts: combo_as_list = re.split('(<[A-Z].+?>|.)', combo)[1::2] if len(combo_as_list) > 1: keystates |= set(accumulate(combo_as_list[:-1])) if action in ['pri', 'postpone', 'postpone_s']: keystates.add(combo) if action == 'pri': for c in ascii_lowercase: keymap_dict[combo + c] = 'cmd pri {} ' + c return (keymap_dict, keystates)
Returns keymap and keystates used in column mode
def synchronize (lock, func, log_duration_secs=0): def newfunc (*args, **kwargs): t = time.time() with lock: duration = time.time() - t if duration > log_duration_secs > 0: print("WARN:", func.__name__, "locking took %0.2f seconds" % duration, file=sys.stderr) return func(*args, **kwargs) return update_func_meta(newfunc, func)
Return synchronized function acquiring the given lock.
def _basic_field_data(field, obj): value = field.value_from_object(obj) return {Field.TYPE: FieldType.VAL, Field.VALUE: value}
Returns ``obj.field`` data as a dict
def _generate_detailed_table(data): headers = _get_general_stats_headers() for problems in data.values(): for problem in problems: if problem not in headers and problem in WARNING_DESCRIPTIONS: headers['WARNING_count']['hidden'] = False headers[problem] = { 'description': WARNING_DESCRIPTIONS[problem], 'namespace': 'WARNING', 'scale': headers['WARNING_count']['scale'], 'format': '{:.0f}', 'shared_key': 'warnings', 'hidden': True, } if problem not in headers and problem in ERROR_DESCRIPTIONS: headers['ERROR_count']['hidden'] = False headers[problem] = { 'description': ERROR_DESCRIPTIONS[problem], 'namespace': 'ERROR', 'scale': headers['ERROR_count']['scale'], 'format': '{:.0f}', 'shared_key': 'errors', 'hidden': True, } table_config = { 'table_title': 'Picard: SAM/BAM File Validation', } return table.plot(data=data, headers=headers, pconfig=table_config)
Generates and retuns the HTML table that overviews the details found.
def move_towards(self, other, beta, alpha): self.position += beta * (other.position - self.position) self.position += alpha * (np.random.uniform(-0.5, 0.5, len(self.position))) self.position = np.minimum(self.position, [b[1] for b in self.bounds]) self.position = np.maximum(self.position, [b[0] for b in self.bounds])
Move firefly towards another given beta and alpha values
def create(cls, parent=None, **kwargs): if parent is None: raise Exception("Parent class is required") route = copy(parent.route) if cls.ID_NAME is not None: route[cls.ID_NAME] = "" obj = cls(key=parent.key, route=route, config=parent.config) start = datetime.now() response = requests.post(obj._url(), auth=(obj.key, ""), data=kwargs) cls._delay_for_ratelimits(start) if response.status_code not in cls.TRUTHY_CODES: return cls._handle_request_exception(response) data = response.json() obj.route[obj.ID_NAME] = data.get("id", data.get(obj.ID_NAME)) obj.data = data return obj
Create an object and return it
def omit_loglevel(self, msg) -> bool: return self.loglevels and ( self.loglevels[0] > fontbakery.checkrunner.Status(msg) )
Determine if message is below log level.
def _clean_ctx(self): try: if self.ctx is not None: libcrypto.EVP_MD_CTX_free(self.ctx) del self.ctx except AttributeError: pass self.digest_out = None self.digest_finalized = False
Clears and deallocates context
def save_partial(self, data=None, allow_protected_fields=False, **kwargs): if "dotnotation" in kwargs: del kwargs["dotnotation"] if data is None: data = dotdict(self) if "_id" not in data: raise KeyError("_id must be set in order to do a save_partial()") del data["_id"] if len(data) == 0: return if not allow_protected_fields: self.mongokat_collection._check_protected_fields(data) apply_on = dotdict(self) self._initialized_with_doc = False self.mongokat_collection.update_one({"_id": self["_id"]}, {"$set": data}, **kwargs) for k, v in data.items(): apply_on[k] = v self.update(dict(apply_on))
Saves just the currently set fields in the database.
def parse(self): self._parse(self.method) return list(set([deco for deco in self.decos if deco]))
Return the list of string of all the decorators found
def keys(self): "Returns a list of ConfigMap keys." return (list(self._pb.IntMap.keys()) + list(self._pb.StringMap.keys()) + list(self._pb.FloatMap.keys()) + list(self._pb.BoolMap.keys()))
Returns a list of ConfigMap keys.
def make_list_threads_message(self, py_db, seq): try: threads = get_non_pydevd_threads() cmd_text = ["<xml>"] append = cmd_text.append for thread in threads: if is_thread_alive(thread): append(self._thread_to_xml(thread)) append("</xml>") return NetCommand(CMD_RETURN, seq, ''.join(cmd_text)) except: return self.make_error_message(seq, get_exception_traceback_str())
returns thread listing as XML
def _set_time(self, m): m._timestamp = self.timestamp if len(m._fieldnames) > 0 and self.clock is not None: self.clock.set_message_timestamp(m)
set time for a message
def check_errors(self, data): if (type(data) == dict and 'status' in data.keys() and data['status'] == 'failed'): if data.get('exception_msg') and 'last_id' in data.get('exception_msg'): raise PyBossaServerNoKeysetPagination else: raise Error(data) return False
Check for errors on data payload.
def validate(self, value) : for v in self.validators : v.validate(value) return True
checks the validity of 'value' given the lits of validators
def getLocalIPaddress(): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('google.com', 0)) my_local_ip = s.getsockname()[0] except Exception: my_local_ip = None return my_local_ip
visible to other machines on LAN
def _includes_base_class(self, iter_classes, base_class): return any( issubclass(auth_class, base_class) for auth_class in iter_classes, )
Returns whether any class in iter_class is a subclass of the given base_class.
def none(self): return EmptyQuerySet(model=self.model, using=self._using, connection=self._connection)
Returns an empty QuerySet.
def profile_path(cls, project, tenant, profile): return google.api_core.path_template.expand( "projects/{project}/tenants/{tenant}/profiles/{profile}", project=project, tenant=tenant, profile=profile, )
Return a fully-qualified profile string.
def results(self): d = dict() words = [word.strip() for word in self.haystack] if not words: raise NoResultException('No similar word found.') for w in words: d[w] = Levenshtein.ratio(self.needle, w) return sorted(d.items(), key=operator.itemgetter(1), reverse=True)
Returns a list of tuple, ordered by similarity.
def _get_id2children(id2children, item_id, item_obj): if item_id in id2children: return id2children[item_id] child_ids = set() for child_obj in item_obj.children: child_id = child_obj.item_id child_ids.add(child_id) child_ids |= _get_id2children(id2children, child_id, child_obj) id2children[item_id] = child_ids return child_ids
Add the child item IDs for one item object and their children.
def _scale_tdb_minus_tt(self, mjd, eop): jd = mjd + Date.JD_MJD jj = Date._julian_century(jd) m = radians(357.5277233 + 35999.05034 * jj) delta_lambda = radians(246.11 + 0.90251792 * (jd - 2451545.)) return 0.001657 * sin(m) + 0.000022 * sin(delta_lambda)
Definition of the Barycentric Dynamic Time scale relatively to Terrestrial Time
def delete_color_scheme_stack(self, scheme_name): self.set_scheme(scheme_name) widget = self.stack.currentWidget() self.stack.removeWidget(widget) index = self.order.index(scheme_name) self.order.pop(index)
Remove stack widget by 'scheme_name'.
def validate_appname_or_none(option, value): if value is None: return value validate_string(option, value) if len(value.encode('utf-8')) > 128: raise ValueError("%s must be <= 128 bytes" % (option,)) return value
Validate the appname option.
def launch(thing,title=False): html=htmlFromThing(thing,title=title) if not html: print("no HTML was generated.") return fname="%s/%s.html"%(tempfile.gettempdir(),str(time.time())) with open(fname,'w') as f: f.write(html) webbrowser.open(fname)
analyze a thing, create a nice HTML document, and launch it.
def __read_byte_align(decl, attrs): align = attrs.get(XML_AN_ALIGN, 0) decl.byte_align = int(align) / 8
Using duck typing to set the alignment
def setup_addon_register(self, harpoon): self.addon_getter = AddonGetter() self.addon_getter.add_namespace("harpoon.crosshairs", Result.FieldSpec(), Addon.FieldSpec()) register = Register(self.addon_getter, self) if "addons" in harpoon: addons = harpoon["addons"] if type(addons) in (MergedOptions, dict) or getattr(addons, "is_dict", False): spec = sb.dictof(sb.string_spec(), sb.listof(sb.string_spec())) meta = Meta(harpoon, []).at("addons") for namespace, adns in spec.normalise(meta, addons).items(): register.add_pairs(*[(namespace, adn) for adn in adns]) register.recursive_import_known() register.recursive_resolve_imported() return register
Setup our addon register
def _output_validators(self): if self._walk_for_type('Boolean'): print("from .validators import boolean") if self._walk_for_type('Integer'): print("from .validators import integer") vlist = self.override.get_validator_list() for override in vlist: if override.startswith('common/'): override = override.lstrip('common/') filename = "validators" else: filename = "%s_validators" % self.filename print("from .%s import %s" % (filename, override))
Output common validator types based on usage.
def execute(self, command): pData = c_char_p(command) cbData = DWORD(len(command) + 1) hDdeData = DDE.ClientTransaction(pData, cbData, self._hConv, HSZ(), CF_TEXT, XTYP_EXECUTE, TIMEOUT_ASYNC, LPDWORD()) if not hDdeData: raise DDEError("Unable to send command", self._idInst) DDE.FreeDataHandle(hDdeData)
Execute a DDE command.
def count_(self): try: num = len(self.df.index) except Exception as e: self.err(e, "Can not count data") return return num
Returns the number of rows of the main dataframe
def instantiate_from_config(cfg): for h in cfg: if h.get("type") in data_types: raise KeyError("Data type '%s' already exists" % h) data_types[h.get("type")] = DataType(h)
Instantiate data types from config
def make_sftp(self): transport = self.client.get_transport() transport.open_session() return paramiko.SFTPClient.from_transport(transport)
Make SFTP client from open transport
def nodes(self, tree): if self.frequency == 'per_session': nodes = [] for subject in tree.subjects: for sess in subject.sessions: nodes.append(sess) elif self.frequency == 'per_subject': nodes = tree.subjects elif self.frequency == 'per_visit': nodes = tree.visits elif self.frequency == 'per_study': nodes = [tree] else: assert False, "Unrecognised frequency '{}'".format( self.frequency) return nodes
Returns the relevant nodes for the spec's frequency
def section_path_lengths(neurites, neurite_type=NeuriteType.all): dist = {} neurite_filter = is_type(neurite_type) for s in iter_sections(neurites, neurite_filter=neurite_filter): dist[s] = s.length def pl2(node): return sum(dist[n] for n in node.iupstream()) return map_sections(pl2, neurites, neurite_type=neurite_type)
Path lengths of a collection of neurites
def check_rst(code, ignore): filename = '<string>' for result in check(code, filename=filename, ignore=ignore): yield result
Yield errors in nested RST code.
def clog(color): logger = log(color) return lambda msg: logger(centralize(msg).rstrip())
Same to ``log``, but this one centralizes the message first.
def deprecated(operation=None): def inner(o): o.deprecated = True return o return inner(operation) if operation else inner
Mark an operation deprecated.
def distance(self,value): self.stars['distance'] = value.to('pc').value old_distmod = self.stars['distmod'].copy() new_distmod = distancemodulus(self.stars['distance']) for m in self.bands: self.stars[m] += new_distmod - old_distmod self.stars['distmod'] = new_distmod logging.warning('Setting the distance manually may have screwed up your constraints. Re-apply constraints as necessary.')
New distance value must be a ``Quantity`` object
def _is_from_chemical_system(chemical_system, struct): chemsys = list(set([sp.symbol for sp in struct.composition])) if len(chemsys) != len(chemical_system): return False for el in chemsys: if not el in chemical_system: return False return True
checks if the structure object is from the given chemical system
def as_dict(self): d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "structure": self.structure.as_dict(), "frequencies": list(self.frequencies), "densities": list(self.densities), "pdos": []} if len(self.pdos) > 0: for at in self.structure: d["pdos"].append(list(self.pdos[at])) return d
Json-serializable dict representation of CompletePhononDos.
def find_by_ids(ids, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): ids = ','.join([str(i) for i in ids]) return pybrightcove.connection.ItemResultSet('find_playlists_by_ids', Playlist, connection, page_size, page_number, sort_by, sort_order, playlist_ids=ids)
List playlists by specific IDs.
def _check_samples_nodups(fnames): counts = defaultdict(int) for f in fnames: for s in get_samples(f): counts[s] += 1 duplicates = [s for s, c in counts.items() if c > 1] if duplicates: raise ValueError("Duplicate samples found in inputs %s: %s" % (duplicates, fnames))
Ensure a set of input VCFs do not have duplicate samples.
def UpdateSNMPObjs(): global threadingString LogMsg("Beginning data update.") data = "" LogMsg("Calling external command \"sleep 5; date\".") proc = subprocess.Popen( "sleep 5; date", shell=True, env={ "LANG": "C" }, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output = proc.communicate()[0].splitlines()[0] rc = proc.poll() if rc != 0: LogMsg("An error occured executing the command: {0}".format(output)) return msg = "Updating \"threadingString\" object with data \"{0}\"." LogMsg(msg.format(output)) threadingString.update(output) LogMsg("Data update done, exiting thread.")
Function that does the actual data update.
def division_series_logs(): file_name = 'GLDV.TXT' z = get_zip_file(division_series_url) data = pd.read_csv(z.open(file_name), header=None, sep=',', quotechar='"') data.columns = gamelog_columns return data
Pull Retrosheet Division Series Game Logs
def _create_sample_file(data, out_dir): sample_file = os.path.join(out_dir, "sample_file.txt") with open(sample_file, 'w') as outh: for sample in data: outh.write(sample[0]["clean_fastq"] + "\n") return sample_file
from data list all the fastq files in a file
def format_stack_frame_json(self): stack_frame_json = {} stack_frame_json['function_name'] = get_truncatable_str( self.func_name) stack_frame_json['original_function_name'] = get_truncatable_str( self.original_func_name) stack_frame_json['file_name'] = get_truncatable_str(self.file_name) stack_frame_json['line_number'] = self.line_num stack_frame_json['column_number'] = self.col_num stack_frame_json['load_module'] = { 'module': get_truncatable_str(self.load_module), 'build_id': get_truncatable_str(self.build_id), } stack_frame_json['source_version'] = get_truncatable_str( self.source_version) return stack_frame_json
Convert StackFrame object to json format.
def remove_elements_with_source(source, field): return freeze( [element for element in field if element.get('source', '').lower() != source] )
Remove all elements matching ``source`` in ``field``.
def all(self, msg, *args, **kwargs): self.multi(ALL, msg, args, **kwargs)
Log a message at every known log level
def load_json(self, path): with open(self.profile_path(path, must_exist=True), encoding='utf-8') as f: data = json.load(f) return data
Load a JSON file from the user profile.
def fermion_avg(efermi, norm_hopping, func): if func == 'ekin': func = bethe_ekin_zeroT elif func == 'ocupation': func = bethe_filling_zeroT return np.asarray([func(ef, tz) for ef, tz in zip(efermi, norm_hopping)])
calcules for every slave it's average over the desired observable
def have_active_commit(self): commit_state = sfs.file_or_default(sfs.cpjoin(self.base_path, 'active_commit'), None) if commit_state != None: return True return False
Checks if there is an active commit owned by the specified user
def login(self): data = {'user': self.options['username'], 'passwd': self.options['password'], 'api_type': 'json'} response = self.client.post('http://www.reddit.com/api/login', data=data) self.client.modhash = response.json()['json']['data']['modhash']
Logs into Reddit in order to display a personalised front page.
def add_actions_to_context_menu(self, menu): inspect_action = create_action(self, _("Inspect current object"), QKeySequence(get_shortcut('console', 'inspect current object')), icon=ima.icon('MessageBoxInformation'), triggered=self.inspect_object) clear_line_action = create_action(self, _("Clear line or block"), QKeySequence(get_shortcut( 'console', 'clear line')), triggered=self.clear_line) reset_namespace_action = create_action(self, _("Remove all variables"), QKeySequence(get_shortcut( 'ipython_console', 'reset namespace')), icon=ima.icon('editdelete'), triggered=self.reset_namespace) clear_console_action = create_action(self, _("Clear console"), QKeySequence(get_shortcut('console', 'clear shell')), triggered=self.clear_console) quit_action = create_action(self, _("&Quit"), icon=ima.icon('exit'), triggered=self.exit_callback) add_actions(menu, (None, inspect_action, clear_line_action, clear_console_action, reset_namespace_action, None, quit_action)) return menu
Add actions to IPython widget context menu
def parse_kv_args(self, args): for arg in ["start", "end", "count", "stride"]: try: arg_raw = args.pop(arg, None) if arg_raw is None: continue arg_cooked = int(arg_raw, 0) setattr(self, arg, arg_cooked) except ValueError: raise AnsibleError( "can't parse arg %s=%r as integer" % (arg, arg_raw) ) if 'format' in args: self.format = args.pop("format") if args: raise AnsibleError( "unrecognized arguments to with_sequence: %r" % args.keys() )
parse key-value style arguments
def _get_fault_dip_term(self, C, rup): if rup.mag < 4.5: return C["c19"] * rup.dip elif rup.mag > 5.5: return 0.0 else: return C["c19"] * (5.5 - rup.mag) * rup.dip
Returns the fault dip term, defined in equation 24
def zip(value=data, mu=mu, psi=psi): like = 0.0 for x in value: if not x: like += np.log((1. - psi) + psi * np.exp(-mu)) else: like += np.log(psi) + poisson_like(x, mu) return like
Zero-inflated Poisson likelihood
def next(self): try: results = self._stride_buffer.pop() except (IndexError, AttributeError): self._rebuffer() results = self._stride_buffer.pop() if not results: raise StopIteration return results
Returns the next sequence of results, given stride and n.
def update(self, id, **dict): if not self._item_path: raise AttributeError('update is not available for %s' % self._item_name) target = (self._update_path or self._item_path) % id payload = json.dumps({self._item_type:dict}) self._redmine.put(target, payload) return None
Update a given item with the passed data.
def funcschema(default=None, *args, **kwargs): if default is None: return lambda default: funcschema(default=default, *args, **kwargs) return FunctionSchema(default=default, *args, **kwargs)
Decorator to use in order to transform a function into a schema.
def check_required_arguments(ribo_file, transcriptome_fasta, transcript_name=None): try: is_bam_valid(ribo_file) except ValueError: log.error('The given RiboSeq BAM file is not valid') raise if not bam_has_index(ribo_file): log.info('Creating an index for the BAM file...') create_bam_index(ribo_file) if not bam_has_index(ribo_file): msg = ('Could not create an index for this BAM file. Is this a valid BAM file ' 'and/or is the BAM file sorted by chromosomal coordinates?') log.error(msg) raise BamFileError(msg) fasta_valid = False try: fasta_valid = is_fasta_valid(transcriptome_fasta) except IOError: log.error('Transcriptome FASTA file is not valid') raise if fasta_valid: if transcript_name: try: get_fasta_records(transcriptome_fasta, [transcript_name]) except IOError: log.error('Could not get FASTA sequence of "{}" from transcriptome FASTA file'.format(transcript_name)) raise else: transcript_name = get_first_transcript_name(transcriptome_fasta) with pysam.AlignmentFile(ribo_file, 'rb') as bam_file: if transcript_name not in bam_file.references: msg = 'Transcript "{}" does not exist in BAM file'.format(transcript_name) log.error(msg) raise ArgumentError(msg)
Check required arguments of both riboplot and ribocount.
def initialize_switch_endpoints(self): self._switches = {} self._port_group_info = {} self._validate_config() for s in cfg.CONF.ml2_arista.switch_info: switch_ip, switch_user, switch_pass = s.split(":") if switch_pass == "''": switch_pass = '' self._switches[switch_ip] = api.EAPIClient( switch_ip, switch_user, switch_pass, verify=False, timeout=cfg.CONF.ml2_arista.conn_timeout) self._check_dynamic_acl_support()
Initialize endpoints for switch communication
def generate_child_leaf_nodes(self): def _yield_child_leaf_nodes(node): if not node.has_children(): yield node else: for child_node in node.generate_child_nodes(): for child in _yield_child_leaf_nodes(child_node): yield child return _yield_child_leaf_nodes(self)
Generate leaf nodes of this node.
def _load_stream(self): meta = self._request.META try: content_length = int( meta.get('CONTENT_LENGTH', meta.get('HTTP_CONTENT_LENGTH', 0)) ) except (ValueError, TypeError): content_length = 0 if content_length == 0: self._stream = None elif hasattr(self._request, 'read'): self._stream = self._request else: self._stream = six.BytesIO(self.raw_post_data)
Return the content body of the request, as a stream.
def _fastfood_build(args): written_files, cookbook = food.build_cookbook( args.config_file, args.template_pack, args.cookbooks, args.force) if len(written_files) > 0: print("%s: %s files written" % (cookbook, len(written_files))) else: print("%s up to date" % cookbook) return written_files, cookbook
Run on `fastfood build`.
def validate_read(self, kwargs): kwargs = super().validate_read(kwargs) if 'start' in kwargs or 'stop' in kwargs: raise NotImplementedError("start and/or stop are not supported " "in fixed Sparse reading") return kwargs
we don't support start, stop kwds in Sparse
def __zip_file(self): if self.zip_path: self.__print('Opening local zipfile: %s' % self.zip_path) return open(self.zip_path, 'rb') url = self.__release_url self.__print('Downloading from URL: %s' % url) response = urlopen(url) return io.BytesIO(response.read())
Get a file object of the FA zip file.
def log_uniform_prior(value, umin=0, umax=None): if value > 0 and value >= umin: if umax is not None: if value <= umax: return 1 / value else: return -np.inf else: return 1 / value else: return -np.inf
Log-uniform prior distribution.
def make_posthook(self): print(id(self.posthook), self.posthook) print(id(super(self.__class__, self).posthook), super(self.__class__, self).posthook) import ipdb;ipdb.set_trace() if self.posthook: os.chdir(self.project_name) self.posthook()
Run the post hook into the project directory.
def policy(self, args): word = args[0] if word == 'reject': self.accepted_ports = None self.rejected_ports = [] target = self.rejected_ports elif word == 'accept': self.accepted_ports = [] self.rejected_ports = None target = self.accepted_ports else: raise RuntimeError("Don't understand policy word \"%s\"" % word) for port in args[1].split(','): if '-' in port: (a, b) = port.split('-') target.append(PortRange(int(a), int(b))) else: target.append(int(port))
setter for the policy descriptor
async def jsk_vc_stop(self, ctx: commands.Context): voice = ctx.guild.voice_client voice.stop() await ctx.send(f"Stopped playing audio in {voice.channel.name}.")
Stops running an audio source, if there is one.
def visit_statements(self, nodes): primals, adjoints = [], collections.deque() for node in nodes: primal, adjoint = self.visit(node) if not isinstance(primal, list): primal = [primal] if not isinstance(adjoint, list): adjoint = [adjoint] primals.extend(filter(None, primal)) adjoints.extendleft(filter(None, adjoint[::-1])) return primals, list(adjoints)
Generate the adjoint of a series of statements.
def bin_priority(op,left,right): "I don't know how to handle order of operations in the LR grammar, so here it is" if isinstance(left,BinX) and left.op < op: return bin_priority(left.op,left.left,bin_priority(op,left.right,right)) elif isinstance(left,UnX) and left.op < op: return un_priority(left.op,BinX(op,left.val,right)) elif isinstance(right,BinX) and right.op < op: return bin_priority(right.op,bin_priority(op,left,right.left),right.right) else: return BinX(op,left,right)
I don't know how to handle order of operations in the LR grammar, so here it is
def rest_del(self, url, params=None, session=None, verify=True, cert=None): res = session.delete(url, params=params, verify=verify, cert=cert) return res.text, res.status_code
Perform a DELETE request to url with requests.session
def user_exists(username, token_manager=None, app_url=defaults.APP_URL): headers = token_manager.get_access_token_headers() auth_url = environment.get_auth_url(app_url=app_url) url = "%s/api/v1/accounts?username=%s" % (auth_url, username) response = requests.get(url, headers=headers) if response.status_code == 404: return False elif response.status_code == 200: return True else: raise JutException('Error %s: %s' % (response.status_code, response.text))
check if the user exists with the specified username
def _sort_schema(schema): if isinstance(schema, dict): for k, v in sorted(schema.items()): if isinstance(v, dict): yield k, OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield k, list(_sort_schema(v)) else: yield k, v elif isinstance(schema, list): for v in schema: if isinstance(v, dict): yield OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield list(_sort_schema(v)) else: yield v else: yield d
Recursively sorts a JSON schema by dict key.
def pip(self, cmd): pip_bin = self.cmd_path('pip') cmd = '{0} {1}'.format(pip_bin, cmd) return self._execute(cmd)
Execute some pip function using the virtual environment pip.
def user_default_serializer(self, obj): if not obj: return None self.user_default_add_related_pks(obj) return dict(( ('id', obj.id), ('username', obj.username), self.field_to_json('DateTime', 'date_joined', obj.date_joined), self.field_to_json( 'PKList', 'votes', model=Choice, pks=obj._votes_pks), ))
Convert a User to a cached instance representation.
def readable_size(C, bytes, suffix='B', decimals=1, sep='\u00a0'): if bytes is None: return size = float(bytes) for unit in C.SIZE_UNITS: if abs(size) < 1024 or unit == C.SIZE_UNITS[-1]: return "{size:.{decimals}f}{sep}{unit}{suffix}".format( size=size, unit=unit, suffix=suffix, sep=sep, decimals=C.SIZE_UNITS.index(unit) > 0 and decimals or 0, ) size /= 1024
given a number of bytes, return the file size in readable units
def log_path(scraper): return os.path.join(scraper.config.data_path, '%s.jsonlog' % scraper.name)
Determine the file name for the JSON log.