code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_app_data(app_id): """ get app data ( include name and app_data ) """ try: conn = get_conn() c = conn.cursor() c.execute("SELECT id,name,app_data FROM app WHERE id='{0}' ".format(app_id)) result = c.fetchone() conn.close() if result: appname = result[1] app_data = utils.str2dict( base64.b64decode(result[2]) ) return (appname,app_data) else: return (None,None) except Exception,e: raise RuntimeError('get app data failed! %s' % e)
get app data ( include name and app_data )
def find_parents(self): """Take a tree and set the parents according to the children Takes a tree structure which lists the children of each vertex and computes the parents for each vertex and places them in.""" for i in range(len(self.vertices)): self.vertices[i].parents = [] for i in range(len(self.vertices)): for child in self.vertices[i].children: if i not in self.vertices[child].parents: self.vertices[child].parents.append(i)
Take a tree and set the parents according to the children Takes a tree structure which lists the children of each vertex and computes the parents for each vertex and places them in.
def _configuration(self, kwargs, config_item): """Combine configuration-related keyworded arguments into notification_configuration. """ if 'notification_configuration' not in config_item: if 'notification_type' not in kwargs: return nc = kwargs['notification_configuration'] = {} for field in Resource.configuration[kwargs['notification_type']]: if field not in config_item: raise exc.TowerCLIError('Required config field %s not' ' provided.' % field) else: nc[field] = config_item[field] else: kwargs['notification_configuration'] = \ config_item['notification_configuration']
Combine configuration-related keyworded arguments into notification_configuration.
def read_env(cls, env_file=None, **overrides): """Read a .env file into os.environ. If not given a path to a dotenv path, does filthy magic stack backtracking to find manage.py and then find the dotenv. http://www.wellfireinteractive.com/blog/easier-12-factor-django/ https://gist.github.com/bennylope/2999704 """ if env_file is None: frame = sys._getframe() env_file = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env') if not os.path.exists(env_file): warnings.warn( "%s doesn't exist - if you're not configuring your " "environment separately, create one." % env_file) return try: with open(env_file) if isinstance(env_file, basestring) else env_file as f: content = f.read() except IOError: warnings.warn( "Error reading %s - if you're not configuring your " "environment separately, check this." % env_file) return logger.debug('Read environment variables from: {0}'.format(env_file)) for line in content.splitlines(): m1 = re.match(r'\A(?:export )?([A-Za-z_0-9]+)=(.*)\Z', line) if m1: key, val = m1.group(1), m1.group(2) m2 = re.match(r"\A'(.*)'\Z", val) if m2: val = m2.group(1) m3 = re.match(r'\A"(.*)"\Z', val) if m3: val = re.sub(r'\\(.)', r'\1', m3.group(1)) cls.ENVIRON.setdefault(key, str(val)) # set defaults for key, value in overrides.items(): cls.ENVIRON.setdefault(key, value)
Read a .env file into os.environ. If not given a path to a dotenv path, does filthy magic stack backtracking to find manage.py and then find the dotenv. http://www.wellfireinteractive.com/blog/easier-12-factor-django/ https://gist.github.com/bennylope/2999704
def run(self): """Run the process. Iterate the GLib main loop and process the task queue. """ loop = GLib.MainLoop() context = loop.get_context() while True: time.sleep(0.1) if context.pending(): context.iteration() self._manager[ATTR_POSITION] = self._position() try: method, args = self._task_queue.get(False) getattr(self, method)(**args) except queue.Empty: pass if self.state != STATE_IDLE: continue try: uri = self._media_queue.get(False) self.media(uri) except queue.Empty: pass
Run the process. Iterate the GLib main loop and process the task queue.
def init_app(self, app): ''' Initialize this Flask extension for given app. ''' self.app = app if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['plugin_manager'] = self self.reload()
Initialize this Flask extension for given app.
def python(source): r""" >>> python('def add(a, b): return a + b').add(40, 2) 42 """ obj = type('', (object,), {})() _exec(source, obj.__dict__, obj.__dict__) return obj
r""" >>> python('def add(a, b): return a + b').add(40, 2) 42
def grains_dict(self): """Allowing to lookup grain by either label or duration For backward compatibility""" d = {grain.duration: grain for grain in self.grains()} d.update({grain.label: grain for grain in self.grains()}) return d
Allowing to lookup grain by either label or duration For backward compatibility
def update_metric_tags(self, metric_type, metric_id, **tags): """ Replace the metric_id's tags with given **tags :param metric_type: MetricType to be matched (required) :param metric_id: Exact string matching metric id :param tags: Updated key/value tag values of the metric """ self._put(self._get_metrics_tags_url(self._get_metrics_single_url(metric_type, metric_id)), tags, parse_json=False)
Replace the metric_id's tags with given **tags :param metric_type: MetricType to be matched (required) :param metric_id: Exact string matching metric id :param tags: Updated key/value tag values of the metric
def _setup_output_file(self, output_filename, args, write_header=True): """Open and prepare output file.""" # write command line into outputFile # (without environment variables, they are documented by benchexec) try: output_file = open(output_filename, 'w') # override existing file except IOError as e: sys.exit(e) if write_header: output_file.write(' '.join(map(util.escape_string_shell, self._build_cmdline(args))) + '\n\n\n' + '-' * 80 + '\n\n\n') output_file.flush() return output_file
Open and prepare output file.
def run_config_diagnostics(config_path=CONFIG_PATH): """Run diagnostics on the configuration file. Args: config_path (str): Path to the configuration file. Returns: str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing sections and a dict that maps each section to the entries that have either missing or empty options. """ config = read_config(config_path) missing_sections = set() malformed_entries = defaultdict(set) for section, expected_section_keys in SECTION_KEYS.items(): section_content = config.get(section) if not section_content: missing_sections.add(section) else: for option in expected_section_keys: option_value = section_content.get(option) if not option_value: malformed_entries[section].add(option) return config_path, missing_sections, malformed_entries
Run diagnostics on the configuration file. Args: config_path (str): Path to the configuration file. Returns: str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing sections and a dict that maps each section to the entries that have either missing or empty options.
def handle_document_error(self, item_session: ItemSession) -> Actions: '''Callback for when the document only describes an server error. Returns: A value from :class:`.hook.Actions`. ''' self._waiter.increment() self._statistics.errors[ServerError] += 1 action = self.handle_response(item_session) if action == Actions.NORMAL: item_session.set_status(Status.error) return action
Callback for when the document only describes an server error. Returns: A value from :class:`.hook.Actions`.
def handle_result(self, idents, parent, raw_msg, success=True): """handle a real task result, either success or failure""" # first, relay result to client engine = idents[0] client = idents[1] # swap_ids for ROUTER-ROUTER mirror raw_msg[:2] = [client,engine] # print (map(str, raw_msg[:4])) self.client_stream.send_multipart(raw_msg, copy=False) # now, update our data structures msg_id = parent['msg_id'] self.pending[engine].pop(msg_id) if success: self.completed[engine].add(msg_id) self.all_completed.add(msg_id) else: self.failed[engine].add(msg_id) self.all_failed.add(msg_id) self.all_done.add(msg_id) self.destinations[msg_id] = engine self.update_graph(msg_id, success)
handle a real task result, either success or failure
def plot_histogram(self, filename=None): """ Plot a histogram of data values """ header, data = self.read_next_data_block() data = data.view('float32') plt.figure("Histogram") plt.hist(data.flatten(), 65, facecolor='#cc0000') if filename: plt.savefig(filename) plt.show()
Plot a histogram of data values
def _fill_syns(self, new_syns, rpacketlists_per_worker): """ rpacket_per_worker is a list of packetlists as returned by _run_chunk """ # TODO: move to BaseBackendByDataset or BaseBackend? logger.debug("rank:{}/{} {}._fill_syns".format(mpi.myrank, mpi.nprocs, self.__class__.__name__)) for packetlists in rpacketlists_per_worker: # single worker for packetlist in packetlists: # single time/dataset for packet in packetlist: # single parameter new_syns.set_value(**packet) return new_syns
rpacket_per_worker is a list of packetlists as returned by _run_chunk
def inline(text, data=None): """ Creates a new inline button. If `data` is omitted, the given `text` will be used as `data`. In any case `data` should be either ``bytes`` or ``str``. Note that the given `data` must be less or equal to 64 bytes. If more than 64 bytes are passed as data, ``ValueError`` is raised. """ if not data: data = text.encode('utf-8') elif not isinstance(data, (bytes, bytearray, memoryview)): data = str(data).encode('utf-8') if len(data) > 64: raise ValueError('Too many bytes for the data') return types.KeyboardButtonCallback(text, data)
Creates a new inline button. If `data` is omitted, the given `text` will be used as `data`. In any case `data` should be either ``bytes`` or ``str``. Note that the given `data` must be less or equal to 64 bytes. If more than 64 bytes are passed as data, ``ValueError`` is raised.
def error(self, fail=True, action=''): """ SHOULD BE PRIVATE METHOD """ e = 'There was an unknown error communicating with the device.' if action: e = 'While %s: %s' % (action, e) log.error(e) if fail: raise IOError(e)
SHOULD BE PRIVATE METHOD
def best_diff(img1, img2, opts): """Find the best alignment of two images that minimizes the differences. Returns (diff, alignments) where ``diff`` is a difference map, and ``alignments`` is a tuple ((x1, y2), (x2, y2)). See ``diff()`` for the description of the alignment numbers. """ w1, h1 = img1.size w2, h2 = img2.size w, h = min(w1, w2), min(h1, h2) best = None best_value = 255 * w * h + 1 xr = abs(w1 - w2) + 1 yr = abs(h1 - h2) + 1 p = Progress(xr * yr, timeout=opts.timeout) for x in range(xr): if w1 > w2: x1, x2 = x, 0 else: x1, x2 = 0, x for y in range(yr): if h1 > h2: y1, y2 = y, 0 else: y1, y2 = 0, y p.next() this = diff(img1, img2, (x1, y1), (x2, y2)) this_value = diff_badness(this) if this_value < best_value: best = this best_value = this_value best_pos = (x1, y1), (x2, y2) return best, best_pos
Find the best alignment of two images that minimizes the differences. Returns (diff, alignments) where ``diff`` is a difference map, and ``alignments`` is a tuple ((x1, y2), (x2, y2)). See ``diff()`` for the description of the alignment numbers.
def exciter(self, Xexc, Pexc, Vexc): """ Exciter model. Based on Exciter.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/ matdyn/} for more information. """ exciters = self.exciters F = zeros(Xexc.shape) typ1 = [e.generator._i for e in exciters if e.model ==CONST_EXCITATION] typ2 = [e.generator._i for e in exciters if e.model == IEEE_DC1A] # Exciter type 1: constant excitation F[typ1, :] = 0.0 # Exciter type 2: IEEE DC1A Efd = Xexc[typ2, 0] Uf = Xexc[typ2, 1] Ur = Xexc[typ2, 2] Ka = Pexc[typ2, 0] Ta = Pexc[typ2, 1] Ke = Pexc[typ2, 2] Te = Pexc[typ2, 3] Kf = Pexc[typ2, 4] Tf = Pexc[typ2, 5] Aex = Pexc[typ2, 6] Bex = Pexc[typ2, 7] Ur_min = Pexc[typ2, 8] Ur_max = Pexc[typ2, 9] Uref = Pexc[typ2, 10] Uref2 = Pexc[typ2, 11] U = Vexc[typ2, 1] Ux = Aex * exp(Bex * Efd) dUr = 1 / Ta * (Ka * (Uref - U + Uref2 - Uf) - Ur) dUf = 1 / Tf * (Kf / Te * (Ur - Ux - Ke * Efd) - Uf) if sum(flatnonzero(Ur > Ur_max)) >= 1: Ur2 = Ur_max elif sum(flatnonzero(Ur < Ur_max)) >= 1: Ur2 = Ur_min else: Ur2 = Ur dEfd = 1 / Te * (Ur2 - Ux - Ke * Efd) F[typ2, :] = c_[dEfd, dUf, dUr] # Exciter type 3: # Exciter type 4: return F
Exciter model. Based on Exciter.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/ matdyn/} for more information.
def build_network(self, network=None, *args, **kwargs): """ Core method to construct PyPSA Network object. """ # TODO: build_network takes care of divergences in database design and # future PyPSA changes from PyPSA's v0.6 on. This concept should be # replaced, when the oedb has a revision system in place, because # sometime this will break!!! if network != None: network = network else: network = pypsa.Network() network.set_snapshots(self.timeindex) timevarying_override = False if pypsa.__version__ == '0.11.0': old_to_new_name = {'Generator': {'p_min_pu_fixed': 'p_min_pu', 'p_max_pu_fixed': 'p_max_pu', 'source': 'carrier', 'dispatch': 'former_dispatch'}, 'Bus': {'current_type': 'carrier'}, 'Transformer': {'trafo_id': 'transformer_id'}, 'Storage': {'p_min_pu_fixed': 'p_min_pu', 'p_max_pu_fixed': 'p_max_pu', 'soc_cyclic': 'cyclic_state_of_charge', 'soc_initial': 'state_of_charge_initial', 'source': 'carrier'}} timevarying_override = True else: old_to_new_name = {'Storage': {'soc_cyclic': 'cyclic_state_of_charge', 'soc_initial': 'state_of_charge_initial'}} for comp, comp_t_dict in self.config.items(): # TODO: This is confusing, should be fixed in db pypsa_comp_name = 'StorageUnit' if comp == 'Storage' else comp df = self.fetch_by_relname(comp) if comp in old_to_new_name: tmp = old_to_new_name[comp] df.rename(columns=tmp, inplace=True) network.import_components_from_dataframe(df, pypsa_comp_name) if comp_t_dict: for comp_t, columns in comp_t_dict.items(): for col in columns: df_series = self.series_fetch_by_relname(comp_t, col) # TODO: VMagPuSet is not implemented. if timevarying_override and comp == 'Generator' \ and not df_series.empty: idx = df[df.former_dispatch == 'flexible'].index idx = [i for i in idx if i in df_series.columns] df_series.drop(idx, axis=1, inplace=True) try: pypsa.io.import_series_from_dataframe( network, df_series, pypsa_comp_name, col) except (ValueError, AttributeError): print("Series %s of component %s could not be " "imported" % (col, pypsa_comp_name)) # populate carrier attribute in PyPSA network network.import_components_from_dataframe( self.fetch_by_relname(carr_ormclass), 'Carrier') self.network = network return network
Core method to construct PyPSA Network object.
def random_box(molecules, total=None, proportions=None, size=[1.,1.,1.], maxtries=100): '''Create a System made of a series of random molecules. Parameters: total: molecules: proportions: ''' # Setup proportions to be right if proportions is None: proportions = np.ones(len(molecules)) / len(molecules) else: proportions = np.array(proportions) size = np.array(size) tree = CoverTree(metric="periodic", metric_args={'cell_lengths': size}) type_array = [] result = [] vdw_radii = [] max_vdw = max(vdw_radius(np.concatenate([m.type_array for m in molecules]))) first = True for l, n in enumerate((proportions * total).astype(int)): # We try to insert each molecule for i in range(n): # Attempts for k in range(maxtries): template = molecules[l].copy() reference = np.random.uniform(0, 1, 3) * size r_array = template.r_array + reference # Find all collision candidates pts_list, distances_list = tree.query_ball_many(r_array, vdw_radius(template.type_array) + max_vdw) # print pts_list, distances_list # Check if there is any collision ok = True for i, (dist, pts) in enumerate(zip(distances_list, pts_list)): if len(dist) == 0: break found_vdw = np.array([vdw_radii[p] for p in pts]) ok &= all(dist > found_vdw + vdw_radius(template.type_array[i])) if ok: tree.insert_many(r_array) template.r_array = r_array result.append(template) vdw_radii.extend(vdw_radius(template.type_array)) break if not ok: raise Exception("Trials exceeded") system = System(result) system.box_vectors[0, 0] = size[0] system.box_vectors[1, 1] = size[1] system.box_vectors[2, 2] = size[2] return system
Create a System made of a series of random molecules. Parameters: total: molecules: proportions:
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): """Look for any references, without object type This always searches in "refspecific" mode """ prefix = node.get('dn:prefix') results = [] match = self.find_obj(env, prefix, target, None, 1) if match is not None: (name, obj) = match results.append(('dn:' + self.role_for_objtype(obj[1]), make_refnode(builder, fromdocname, obj[0], name, contnode, name))) return results
Look for any references, without object type This always searches in "refspecific" mode
def PwDecrypt(self, password): """Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string """ buf = password pw = six.b('') last = self.authenticator while buf: hash = md5_constructor(self.secret + last).digest() if six.PY3: for i in range(16): pw += bytes((hash[i] ^ buf[i],)) else: for i in range(16): pw += chr(ord(hash[i]) ^ ord(buf[i])) (last, buf) = (buf[:16], buf[16:]) while pw.endswith(six.b('\x00')): pw = pw[:-1] return pw.decode('utf-8')
Unobfuscate a RADIUS password. RADIUS hides passwords in packets by using an algorithm based on the MD5 hash of the packet authenticator and RADIUS secret. This function reverses the obfuscation process. :param password: obfuscated form of password :type password: binary string :return: plaintext password :rtype: unicode string
def create(cls, name, ne_ref=None, operator='exclusion', sub_expression=None, comment=None): """ Create the expression :param str name: name of expression :param list ne_ref: network element references for expression :param str operator: 'exclusion' (negation), 'union', 'intersection' (default: exclusion) :param dict sub_expression: sub expression used :param str comment: optional comment :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Expression """ sub_expression = [] if sub_expression is None else [sub_expression] json = {'name': name, 'operator': operator, 'ne_ref': ne_ref, 'sub_expression': sub_expression, 'comment': comment} return ElementCreator(cls, json)
Create the expression :param str name: name of expression :param list ne_ref: network element references for expression :param str operator: 'exclusion' (negation), 'union', 'intersection' (default: exclusion) :param dict sub_expression: sub expression used :param str comment: optional comment :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Expression
def get_corpus(self): """获取语料库 Return: corpus -- 语料库,str类型 """ # 正向判定 corpus = [] cd = 0 tag = None for i in range(0, self.init_corpus[0][0]): init_unit = self.unit_raw[self.init_corpus[0][0] - i] cdm = CDM(init_unit) alpha = cdm.get_alpha() if cd <= self.cd_min and cdm.NC is not 0: tag = True if cd > self.cd_max or cdm.NC == 0: tag = False if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0: if alpha > 0: tag = True else: tag = False if cdm.NC == 0: cd += 1 else: cd = 0 if tag == True: corpus.append(init_unit) elif tag == False: if alpha < 0 or cd > self.cd_max: break else: continue corpus = list(reversed(corpus)) try: self.index = self.init_corpus[0][0] - i + 1 except UnboundLocalError: log('err', '正向判定完成,索引定位出错') self.index = self.init_corpus[0][0] # 反向判定 cd = 0 tag = None for i in range(1, len(self.unit_raw) - self.init_corpus[0][0]): init_unit = self.unit_raw[self.init_corpus[0][0] + i] cdm = CDM(init_unit) alpha = cdm.get_alpha() if cd <= self.cd_min and cdm.NC is not 0: tag = True if cd > self.cd_max or cdm.NC == 0: tag = False if cd in range(self.cd_min + 1, self.cd_max) and cdm.NC is not 0: if alpha > 0: tag = True else: tag = False if cdm.NC == 0: cd += 1 else: cd = 0 if tag == True: corpus.append(init_unit) elif tag == False: if alpha < 0 or cd > self.cd_max: break else: continue log('debug', '\n获取语料库成功:【{}】\n'.format(corpus)) return ''.join(corpus)
获取语料库 Return: corpus -- 语料库,str类型
def get_working_login(self, database, username=None, password=None): """ authenticate to the specified database starting with specified username/password (if present), try to return a successful login within 3 attempts """ login_user = None # this will authenticate and update login user self.get_db(database, username=username, password=password, never_auth_with_admin=True) login_user = self.get_login_user(database) if login_user: username = login_user["username"] password = (login_user["password"] if "password" in login_user else None) return username, password
authenticate to the specified database starting with specified username/password (if present), try to return a successful login within 3 attempts
def reboot_autopilot(self, hold_in_bootloader=False): '''reboot the autopilot''' if self.mavlink10(): if hold_in_bootloader: param1 = 3 else: param1 = 1 self.mav.command_long_send(self.target_system, self.target_component, mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0, param1, 0, 0, 0, 0, 0, 0) # send an old style reboot immediately afterwards in case it is an older firmware # that doesn't understand the new convention self.mav.command_long_send(self.target_system, self.target_component, mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0, 1, 0, 0, 0, 0, 0, 0)
reboot the autopilot
def create_session(docker_image=None, docker_rm=None, echo=False, loglevel='WARNING', nocolor=False, session_type='bash', vagrant_session_name=None, vagrant_image='ubuntu/xenial64', vagrant_gui=False, vagrant_memory='1024', vagrant_num_machines='1', vagrant_provider='virtualbox', vagrant_root_folder=None, vagrant_swapsize='2G', vagrant_version='1.8.6', vagrant_virt_method='virtualbox', vagrant_cpu='1', video=-1, walkthrough=False): """Creates a distinct ShutIt session. Sessions can be of type: bash - a bash shell is spawned and vagrant - a Vagrantfile is created and 'vagrant up'ped """ assert session_type in ('bash','docker','vagrant'), shutit_util.print_debug() shutit_global_object = shutit_global.shutit_global_object if video != -1 and video > 0: walkthrough = True if session_type in ('bash','docker'): return shutit_global_object.create_session(session_type, docker_image=docker_image, rm=docker_rm, echo=echo, walkthrough=walkthrough, walkthrough_wait=video, nocolor=nocolor, loglevel=loglevel) elif session_type == 'vagrant': if vagrant_session_name is None: vagrant_session_name = 'shutit' + shutit_util.random_id() if isinstance(vagrant_num_machines, int): vagrant_num_machines = str(vagrant_num_machines) assert isinstance(vagrant_num_machines, str) assert isinstance(int(vagrant_num_machines), int) if vagrant_root_folder is None: vagrant_root_folder = shutit_global.shutit_global_object.owd return create_session_vagrant(vagrant_session_name, vagrant_num_machines, vagrant_image, vagrant_provider, vagrant_gui, vagrant_memory, vagrant_swapsize, echo, walkthrough, nocolor, video, vagrant_version, vagrant_virt_method, vagrant_root_folder, vagrant_cpu, loglevel)
Creates a distinct ShutIt session. Sessions can be of type: bash - a bash shell is spawned and vagrant - a Vagrantfile is created and 'vagrant up'ped
def _ip_int_from_string(self, ip_str): """Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: A long, the IPv6 ip_str. Raises: AddressValueError: if ip_str isn't a valid IPv6 Address. """ parts = ip_str.split(':') # An IPv6 address needs at least 2 colons (3 parts). if len(parts) < 3: raise AddressValueError(ip_str) # If the address has an IPv4-style suffix, convert it to hexadecimal. if '.' in parts[-1]: ipv4_int = IPv4Address(parts.pop())._ip parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) parts.append('%x' % (ipv4_int & 0xFFFF)) # An IPv6 address can't have more than 8 colons (9 parts). if len(parts) > self._HEXTET_COUNT + 1: raise AddressValueError(ip_str) # Disregarding the endpoints, find '::' with nothing in between. # This indicates that a run of zeroes has been skipped. try: skip_index, = ( [i for i in xrange(1, len(parts) - 1) if not parts[i]] or [None]) except ValueError: # Can't have more than one '::' raise AddressValueError(ip_str) # parts_hi is the number of parts to copy from above/before the '::' # parts_lo is the number of parts to copy from below/after the '::' if skip_index is not None: # If we found a '::', then check if it also covers the endpoints. parts_hi = skip_index parts_lo = len(parts) - skip_index - 1 if not parts[0]: parts_hi -= 1 if parts_hi: raise AddressValueError(ip_str) # ^: requires ^:: if not parts[-1]: parts_lo -= 1 if parts_lo: raise AddressValueError(ip_str) # :$ requires ::$ parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo) if parts_skipped < 1: raise AddressValueError(ip_str) else: # Otherwise, allocate the entire address to parts_hi. The endpoints # could still be empty, but _parse_hextet() will check for that. if len(parts) != self._HEXTET_COUNT: raise AddressValueError(ip_str) parts_hi = len(parts) parts_lo = 0 parts_skipped = 0 try: # Now, parse the hextets into a 128-bit integer. ip_int = 0L for i in xrange(parts_hi): ip_int <<= 16 ip_int |= self._parse_hextet(parts[i]) ip_int <<= 16 * parts_skipped for i in xrange(-parts_lo, 0): ip_int <<= 16 ip_int |= self._parse_hextet(parts[i]) return ip_int except ValueError: raise AddressValueError(ip_str)
Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: A long, the IPv6 ip_str. Raises: AddressValueError: if ip_str isn't a valid IPv6 Address.
def write_graph(self, filename): """ Write raw graph data which can be post-processed using graphviz. """ f = open(filename, 'w') f.write(self._get_graphviz_data()) f.close()
Write raw graph data which can be post-processed using graphviz.
def to_timedelta(value, strict=True): """ converts duration string to timedelta strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values """ if isinstance(value, int): return timedelta(seconds=value) # assuming it's seconds elif isinstance(value, timedelta): return value elif isinstance(value, str): hours, minutes, seconds = _parse(value, strict) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = value else: raise TypeError( 'Value %s (type %s) not supported' % ( value, type(value).__name__ ) ) return timedelta(hours=hours, minutes=minutes, seconds=seconds)
converts duration string to timedelta strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values
def acceptNavigationRequest(self, url, navigation_type, isMainFrame): """ Overloaded method to handle links ourselves """ if navigation_type == QWebEnginePage.NavigationTypeLinkClicked: self.linkClicked.emit(url) return False return True
Overloaded method to handle links ourselves
def serialize(self, o): ''' Returns a safe serializable object that can be serialized into JSON. @param o Python object to serialize ''' if isinstance(o, (list, tuple)): return [self.serialize(i) for i in o] if isinstance(o, dict): return {k: self.serialize(v) for k, v in o.items()} if isinstance(o, datetime): return o.isoformat() if isinstance(o, Result): return self.serialize(o.serialize()) return o
Returns a safe serializable object that can be serialized into JSON. @param o Python object to serialize
def timeout(timeout_time, default): ''' Decorate a method so it is required to execute in a given time period, or return a default value. ''' def timeout_function(f): def f2(*args): def timeout_handler(signum, frame): raise MethodTimer.DecoratorTimeout() old_handler = signal.signal(signal.SIGALRM, timeout_handler) # triger alarm in timeout_time seconds signal.alarm(timeout_time) try: retval = f(*args) except MethodTimer.DecoratorTimeout: return default finally: signal.signal(signal.SIGALRM, old_handler) signal.alarm(0) return retval return f2 return timeout_function
Decorate a method so it is required to execute in a given time period, or return a default value.
def load_secret(self, secret): """ Ask YubiHSM to load a pre-existing YubiKey secret. The data is stored internally in the YubiHSM in temporary memory - this operation would typically be followed by one or more L{generate_aead} commands to actually retreive the generated secret (in encrypted form). @param secret: YubiKey secret to load @type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string @returns: Number of bytes in YubiHSM internal buffer after load @rtype: integer @see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load} """ if isinstance(secret, pyhsm.aead_cmd.YHSM_YubiKeySecret): secret = secret.pack() return pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load(self.stick, secret).execute()
Ask YubiHSM to load a pre-existing YubiKey secret. The data is stored internally in the YubiHSM in temporary memory - this operation would typically be followed by one or more L{generate_aead} commands to actually retreive the generated secret (in encrypted form). @param secret: YubiKey secret to load @type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string @returns: Number of bytes in YubiHSM internal buffer after load @rtype: integer @see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load}
def output_file_job(job, filename, file_id, output_dir, s3_key_path=None): """ Uploads a file from the FileStore to an output directory on the local filesystem or S3. :param JobFunctionWrappingJob job: passed automatically by Toil :param str filename: basename for file :param str file_id: FileStoreID :param str output_dir: Amazon S3 URL or local path :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption :return: """ job.fileStore.logToMaster('Writing {} to {}'.format(filename, output_dir)) work_dir = job.fileStore.getLocalTempDir() filepath = job.fileStore.readGlobalFile(file_id, os.path.join(work_dir, filename)) if urlparse(output_dir).scheme == 's3': s3am_upload(job=job, fpath=os.path.join(work_dir, filepath), s3_dir=output_dir, s3_key_path=s3_key_path) elif os.path.exists(os.path.join(output_dir, filename)): job.fileStore.logToMaster("File already exists: {}".format(filename)) else: mkdir_p(output_dir) copy_files([filepath], output_dir)
Uploads a file from the FileStore to an output directory on the local filesystem or S3. :param JobFunctionWrappingJob job: passed automatically by Toil :param str filename: basename for file :param str file_id: FileStoreID :param str output_dir: Amazon S3 URL or local path :param str s3_key_path: (OPTIONAL) Path to 32-byte key to be used for SSE-C encryption :return:
def get_fit_failed_candidate_model(model_type, formula): """ Return a Candidate model that indicates the fitting routine failed. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). formula : :any:`float` The candidate model formula. Returns ------- candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Candidate model instance with status ``'ERROR'``, and warning with traceback. """ warnings = [ EEMeterWarning( qualified_name="eemeter.caltrack_daily.{}.model_results".format(model_type), description=( "Error encountered in statsmodels.formula.api.ols method. (Empty data?)" ), data={"traceback": traceback.format_exc()}, ) ] return CalTRACKUsagePerDayCandidateModel( model_type=model_type, formula=formula, status="ERROR", warnings=warnings )
Return a Candidate model that indicates the fitting routine failed. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). formula : :any:`float` The candidate model formula. Returns ------- candidate_model : :any:`eemeter.CalTRACKUsagePerDayCandidateModel` Candidate model instance with status ``'ERROR'``, and warning with traceback.
def fetch_all_records(self): r""" Returns a generator that yields all of the DNS records for the domain :rtype: generator of `DomainRecord`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return map(self._record, api.paginate(self.record_url, 'domain_records'))
r""" Returns a generator that yields all of the DNS records for the domain :rtype: generator of `DomainRecord`\ s :raises DOAPIError: if the API endpoint replies with an error
def _initialize(self, funs_to_tally, length): """Create a group named ``chain#`` to store all data for this chain.""" chain = self.nchains self._chains[chain] = self._h5file.create_group( '/', 'chain%d' % chain, 'chain #%d' % chain) for name, fun in six.iteritems(funs_to_tally): arr = np.asarray(fun()) assert arr.dtype != np.dtype('object') array = self._h5file.createEArray( self._chains[chain], name, tables.Atom.from_dtype(arr.dtype), (0,) + arr.shape, filters=self.filter) self._arrays[chain, name] = array self._traces[name] = Trace(name, getfunc=fun, db=self) self._traces[name]._initialize(self.chains, length) self.trace_names.append(list(funs_to_tally.keys()))
Create a group named ``chain#`` to store all data for this chain.
def set_type(self, value): """Setter for type attribute""" if value not in self.types_available: log = "Sources field 'type' should be in one of %s" % ( self.types_available ) raise MalFormattedSource(log) self._type = value
Setter for type attribute
def isdir(self, path): """ Return `True` if directory at `path` exist, False otherwise. """ try: self.remote_context.check_output(["test", "-d", path]) except subprocess.CalledProcessError as e: if e.returncode == 1: return False else: raise return True
Return `True` if directory at `path` exist, False otherwise.
def get_best_local_timezone(): """ Compares local timezone offset to pytz's timezone db, to determine a matching timezone name to use when TIME_ZONE is not set. """ zone_name = tzlocal.get_localzone().zone if zone_name in pytz.all_timezones: return zone_name if time.daylight: local_offset = time.altzone localtz = time.tzname[1] else: local_offset = time.timezone localtz = time.tzname[0] local_offset = datetime.timedelta(seconds=-local_offset) for zone_name in pytz.all_timezones: timezone = pytz.timezone(zone_name) if not hasattr(timezone, '_tzinfos'): continue for utcoffset, daylight, tzname in timezone._tzinfos: if utcoffset == local_offset and tzname == localtz: return zone_name
Compares local timezone offset to pytz's timezone db, to determine a matching timezone name to use when TIME_ZONE is not set.
def data_objet_class(data_mode='value', time_mode='framewise'): """ Factory function for Analyzer result """ classes_table = {('value', 'global'): GlobalValueObject, ('value', 'event'): EventValueObject, ('value', 'segment'): SegmentValueObject, ('value', 'framewise'): FrameValueObject, ('label', 'global'): GlobalLabelObject, ('label', 'event'): EventLabelObject, ('label', 'segment'): SegmentLabelObject, ('label', 'framewise'): FrameLabelObject} try: return classes_table[(data_mode, time_mode)] except KeyError as e: raise ValueError('Wrong arguments')
Factory function for Analyzer result
def unitary(self, obj, qubits, label=None): """Apply u2 to q.""" if isinstance(qubits, QuantumRegister): qubits = qubits[:] return self.append(UnitaryGate(obj, label=label), qubits, [])
Apply u2 to q.
def _handle_dbproc_call(self, parts, parameters_metadata): """Handle reply messages from STORED PROCEDURE statements""" for part in parts: if part.kind == part_kinds.ROWSAFFECTED: self.rowcount = part.values[0] elif part.kind == part_kinds.TRANSACTIONFLAGS: pass elif part.kind == part_kinds.STATEMENTCONTEXT: pass elif part.kind == part_kinds.OUTPUTPARAMETERS: self._buffer = part.unpack_rows(parameters_metadata, self.connection) self._received_last_resultset_part = True self._executed = True elif part.kind == part_kinds.RESULTSETMETADATA: self.description, self._column_types = self._handle_result_metadata(part) elif part.kind == part_kinds.RESULTSETID: self._resultset_id = part.value elif part.kind == part_kinds.RESULTSET: self._buffer = part.unpack_rows(self._column_types, self.connection) self._received_last_resultset_part = part.attribute & 1 self._executed = True else: raise InterfaceError("Stored procedure call, unexpected part kind %d." % part.kind) self._executed = True
Handle reply messages from STORED PROCEDURE statements
def get_scales(self, aesthetic): """ Return the scale for the aesthetic or None if there isn't one. These are the scales specified by the user e.g `ggplot() + scale_x_continuous()` or those added by default during the plot building process """ bool_lst = self.find(aesthetic) try: idx = bool_lst.index(True) return self[idx] except ValueError: return None
Return the scale for the aesthetic or None if there isn't one. These are the scales specified by the user e.g `ggplot() + scale_x_continuous()` or those added by default during the plot building process
def javadoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """ Role for linking to external Javadoc """ has_explicit_title, title, target = split_explicit_title(text) title = utils.unescape(title) target = utils.unescape(target) if not has_explicit_title: target = target.lstrip('~') if title[0] == '~': title = title[1:].rpartition('.')[2] app = inliner.document.settings.env.app ref = get_javadoc_ref(app, rawtext, target) if not ref: raise ValueError("no Javadoc source found for %s in javadoc_url_map" % (target,)) ref.append(nodes.Text(title, title)) return [ref], []
Role for linking to external Javadoc
def loadRule(rule_json_object): """ Method to load the rules - when adding a new rule it must be added to the if statement within this method. """ name = rule_json_object['name'] rule_type = rule_json_object['rule_type'] validation_regex = None required = False removehtml = False include_end_regex = False #Default to false for bakward compatibility strip_end_regex = None sub_rules = [] begin_stripe_id = None end_stripe_id = None begin_shift = 0 end_shift = 0 if 'sub_rules' in rule_json_object: sub_rules = rule_json_object['sub_rules'] if 'validation_regex' in rule_json_object: validation_regex = rule_json_object['validation_regex'] if 'required' in rule_json_object: required = rule_json_object['required'] if 'removehtml' in rule_json_object: removehtml = rule_json_object['removehtml'] if 'include_end_regex' in rule_json_object: include_end_regex = rule_json_object['include_end_regex'] if 'strip_end_regex' in rule_json_object: strip_end_regex = rule_json_object['strip_end_regex'] if 'begin_stripe_id' in rule_json_object: begin_stripe_id = rule_json_object['begin_stripe_id'] if 'end_stripe_id' in rule_json_object: end_stripe_id = rule_json_object['end_stripe_id'] if 'begin_shift' in rule_json_object: begin_shift = rule_json_object['begin_shift'] if 'end_shift' in rule_json_object: end_shift = rule_json_object['end_shift'] rule = {} """ This is where we add our new type """ if rule_type == ITEM_RULE or rule_type == 'RegexRule': begin_regex = rule_json_object['begin_regex'] end_regex = rule_json_object['end_regex'] rule = ItemRule(name, begin_regex, end_regex, include_end_regex, strip_end_regex, validation_regex, required, removehtml, sub_rules, begin_stripe_id, end_stripe_id, begin_shift, end_shift) if rule_type == ITERATION_RULE or rule_type == 'RegexIterationRule': begin_regex = rule_json_object['begin_regex'] end_regex = rule_json_object['end_regex'] iter_begin_regex = rule_json_object['iter_begin_regex'] iter_end_regex = rule_json_object['iter_end_regex'] no_first_begin_iter_rule = False if 'no_first_begin_iter_rule' in rule_json_object: no_first_begin_iter_rule = rule_json_object['no_first_begin_iter_rule'] no_last_end_iter_rule = False if 'no_last_end_iter_rule' in rule_json_object: no_last_end_iter_rule = rule_json_object['no_last_end_iter_rule'] rule = IterationRule(name, begin_regex, end_regex, iter_begin_regex, iter_end_regex, include_end_regex, strip_end_regex, no_first_begin_iter_rule, no_last_end_iter_rule, validation_regex, required, removehtml, sub_rules, begin_shift=begin_shift, end_shift=end_shift) if 'id' in rule_json_object: rule.id = rule_json_object['id'] return rule
Method to load the rules - when adding a new rule it must be added to the if statement within this method.
def best_model(seq2hmm): """ determine the best model: archaea, bacteria, eukarya (best score) """ for seq in seq2hmm: best = [] for model in seq2hmm[seq]: best.append([model, sorted([i[-1] for i in seq2hmm[seq][model]], reverse = True)[0]]) best_model = sorted(best, key = itemgetter(1), reverse = True)[0][0] seq2hmm[seq] = [best_model] + [seq2hmm[seq][best_model]] return seq2hmm
determine the best model: archaea, bacteria, eukarya (best score)
def get_used_entities(self,use_specs): """ Returns the entities which are imported by a use statement. These are contained in dicts. """ if len(use_specs.strip()) == 0: return (self.pub_procs, self.pub_absints, self.pub_types, self.pub_vars) only = bool(self.ONLY_RE.match(use_specs)) use_specs = self.ONLY_RE.sub('',use_specs) ulist = self.SPLIT_RE.split(use_specs) ulist[-1] = ulist[-1].strip() uspecs = {} for item in ulist: match = self.RENAME_RE.search(item) if match: uspecs[match.group(1).lower()] = match.group(2) else: uspecs[item.lower()] = item ret_procs = {} ret_absints = {} ret_types = {} ret_vars = {} for name, obj in self.pub_procs.items(): name = name.lower() if only: if name in uspecs: ret_procs[name] = obj else: ret_procs[name] = obj for name, obj in self.pub_absints.items(): name = name.lower() if only: if name in uspecs: ret_absints[name] = obj else: ret_absints[name] = obj for name, obj in self.pub_types.items(): name = name.lower() if only: if name in uspecs: ret_types[name] = obj else: ret_types[name] = obj for name, obj in self.pub_vars.items(): name = name.lower() if only: if name in uspecs: ret_vars[name] = obj else: ret_vars[name] = obj return (ret_procs,ret_absints,ret_types,ret_vars)
Returns the entities which are imported by a use statement. These are contained in dicts.
def reset_object(self, driver_wrapper=None): """Reset each page element object :param driver_wrapper: driver wrapper instance """ from toolium.pageelements.page_elements import PageElements if driver_wrapper: self.driver_wrapper = driver_wrapper self._web_element = None for element in self._get_page_elements(): element.reset_object(driver_wrapper) if isinstance(element, (PageElement, PageElements)): # If element is not a page object, update element parent element.parent = self
Reset each page element object :param driver_wrapper: driver wrapper instance
def plot_h(data, cols, wspace=.1, plot_kw=None, **kwargs): """ Plot horizontally Args: data: DataFrame of data cols: columns to be plotted wspace: spacing between plots plot_kw: kwargs for each plot **kwargs: kwargs for the whole plot Returns: axes for plots Examples: >>> import pandas as pd >>> import numpy as np >>> >>> idx = range(5) >>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx) >>> # plot_h(data=data, cols=['a', 'b'], wspace=.2, plot_kw=[dict(style='.-'), dict()]) """ import matplotlib.pyplot as plt if plot_kw is None: plot_kw = [dict()] * len(cols) _, axes = plt.subplots(nrows=1, ncols=len(cols), **kwargs) plt.subplots_adjust(wspace=wspace) for n, col in enumerate(cols): data.loc[:, col].plot(ax=axes[n], **plot_kw[n]) return axes
Plot horizontally Args: data: DataFrame of data cols: columns to be plotted wspace: spacing between plots plot_kw: kwargs for each plot **kwargs: kwargs for the whole plot Returns: axes for plots Examples: >>> import pandas as pd >>> import numpy as np >>> >>> idx = range(5) >>> data = pd.DataFrame(dict(a=np.exp(idx), b=idx), index=idx) >>> # plot_h(data=data, cols=['a', 'b'], wspace=.2, plot_kw=[dict(style='.-'), dict()])
def _compute_nfp_uniform(l, u, cum_counts, sizes): """Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], assuming uniform distribution of set sizes within the interval. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. cum_counts: the complete cummulative distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives. """ if l > u: raise ValueError("l must be less or equal to u") if l == 0: n = cum_counts[u] else: n = cum_counts[u]-cum_counts[l-1] return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])
Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], assuming uniform distribution of set sizes within the interval. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. cum_counts: the complete cummulative distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives.
def disallow(nodes): """Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- disallowed : callable """ def disallowed(cls): cls.unsupported_nodes = () for node in nodes: new_method = _node_not_implemented(node, cls) name = 'visit_{node}'.format(node=node) cls.unsupported_nodes += (name,) setattr(cls, name, new_method) return cls return disallowed
Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- disallowed : callable
def is_email_simple(value): """Return True if value looks like an email address.""" # An @ must be in the middle of the value. if '@' not in value or value.startswith('@') or value.endswith('@'): return False try: p1, p2 = value.split('@') except ValueError: # value contains more than one @. return False # Dot must be in p2 (e.g. example.com) if '.' not in p2 or p2.startswith('.'): return False return True
Return True if value looks like an email address.
def right_join_where(self, table, one, operator, two): """ Add a "right join where" clause to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :return: The current QueryBuilder instance :rtype: QueryBuilder """ return self.join_where(table, one, operator, two, "right")
Add a "right join where" clause to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :return: The current QueryBuilder instance :rtype: QueryBuilder
def get_rt_data(self, code): """ 获取指定股票的分时数据 :param code: 股票代码,例如,HK.00700,US.APPL :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ========================================================================== 参数 类型 说明 ===================== =========== ========================================================================== code str 股票代码 time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间) is_blank bool 数据状态;正常数据为False,伪造数据为True opened_mins int 零点到当前多少分钟 cur_price float 当前价格 last_close float 昨天收盘的价格 avg_price float 平均价格 volume float 成交量 turnover float 成交金额 ===================== =========== ========================================================================== """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( RtDataQuery.pack_req, RtDataQuery.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, rt_data_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg for x in rt_data_list: x['code'] = code col_list = [ 'code', 'time', 'is_blank', 'opened_mins', 'cur_price', 'last_close', 'avg_price', 'volume', 'turnover' ] rt_data_table = pd.DataFrame(rt_data_list, columns=col_list) return RET_OK, rt_data_table
获取指定股票的分时数据 :param code: 股票代码,例如,HK.00700,US.APPL :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ========================================================================== 参数 类型 说明 ===================== =========== ========================================================================== code str 股票代码 time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间) is_blank bool 数据状态;正常数据为False,伪造数据为True opened_mins int 零点到当前多少分钟 cur_price float 当前价格 last_close float 昨天收盘的价格 avg_price float 平均价格 volume float 成交量 turnover float 成交金额 ===================== =========== ==========================================================================
def scale_cb(self, setting, value): """Handle callback related to image scaling.""" zoomlevel = self.zoom.calc_level(value) self.t_.set(zoomlevel=zoomlevel) self.redraw(whence=0)
Handle callback related to image scaling.
def lnprior(x): """Return the log prior given parameter vector `x`.""" per, t0, b = x if b < -1 or b > 1: return -np.inf elif per < 7 or per > 10: return -np.inf elif t0 < 1978 or t0 > 1979: return -np.inf else: return 0.
Return the log prior given parameter vector `x`.
def getNodeRefs(self): ''' Return a list of (prop, (form, valu)) refs out for the node. ''' retn = [] for name, valu in self.props.items(): pobj = self.form.props.get(name) if isinstance(pobj.type, s_types.Ndef): retn.append((name, valu)) continue if self.snap.model.forms.get(pobj.type.name) is None: continue ndef = (pobj.type.name, valu) if ndef == self.ndef: continue retn.append((name, ndef)) return retn
Return a list of (prop, (form, valu)) refs out for the node.
def addSubsumableToGroups(self, proteinIds, groupIds): """Add one or multiple subsumable proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string. """ for groupId in AUX.toList(groupIds): self.groups[groupId].addSubsumableProteins(proteinIds) self._addProteinIdsToGroupMapping(proteinIds, groupId)
Add one or multiple subsumable proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string.
def _pop_api_call(self, method, _url, kwargs): """ This will initialize an api_call or pop one that has already been initialized with the endpoint parameters :param method: str of the html method ['GET','POST','PUT','DELETE'] :param _url: str of the sub url of the api call (ex. g/device/list) :param kwargs: dict of additional arguments :return: ApiCall """ call_queue = self._call_queue.setdefault(self._get_thread_id(), []) if not call_queue: self._pre_process_call(name='%s.%s' % (_url, method), endpoint_params=kwargs) # this will add the api_call to the call_queue return call_queue.pop()
This will initialize an api_call or pop one that has already been initialized with the endpoint parameters :param method: str of the html method ['GET','POST','PUT','DELETE'] :param _url: str of the sub url of the api call (ex. g/device/list) :param kwargs: dict of additional arguments :return: ApiCall
def parent_ids(self): """ Returns an array of parent Biosample IDs. If the current Biosample has a part_of relationship, the Biosampled referenced there will be returned. Otherwise, if the current Biosample was generated from a pool of Biosamples (pooled_from_biosample_ids), then those will be returned. Otherwise, the result will be an empty array. """ action = os.path.join(self.record_url, "parent_ids") res = requests.get(url=action, headers=HEADERS, verify=False) res.raise_for_status() return res.json()["biosamples"]
Returns an array of parent Biosample IDs. If the current Biosample has a part_of relationship, the Biosampled referenced there will be returned. Otherwise, if the current Biosample was generated from a pool of Biosamples (pooled_from_biosample_ids), then those will be returned. Otherwise, the result will be an empty array.
def to_bytes(s, encoding="utf-8"): """ Converts the string to a bytes type, if not already. :s: the string to convert to bytes :returns: `str` on Python2 and `bytes` on Python3. """ if isinstance(s, six.binary_type): return s else: return six.text_type(s).encode(encoding)
Converts the string to a bytes type, if not already. :s: the string to convert to bytes :returns: `str` on Python2 and `bytes` on Python3.
def make_stats(data, perfile, fsamplehits, fbarhits, fmisses, fdbars): """ Write stats and stores to Assembly object. """ ## out file outhandle = os.path.join(data.dirs.fastqs, 's1_demultiplex_stats.txt') outfile = open(outhandle, 'w') ## write the header for file stats ------------------------------------ outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\ format("raw_file", "total_reads", "cut_found", "bar_matched")) ## write the file stats r1names = sorted(perfile) for fname in r1names: dat = perfile[fname] #dat = [perfile[fname][i] for i in ["ftotal", "fcutfound", "fmatched"]] outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\ format(fname, dat[0], dat[1], dat[2])) ## repeat for pairfile if 'pair' in data.paramsdict["datatype"]: fname = fname.replace("_R1_", "_R2_") outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\ format(fname, dat[0], dat[1], dat[2])) ## spacer, how many records for each sample -------------------------- outfile.write('\n{:<35} {:>13}\n'.format("sample_name", "total_reads")) ## names alphabetical. Write to file. Will save again below to Samples. snames = set() for sname in data.barcodes: if "-technical-replicate-" in sname: sname = sname.rsplit("-technical-replicate", 1)[0] snames.add(sname) for sname in sorted(list(snames)): outfile.write("{:<35} {:>13}\n".format(sname, fsamplehits[sname])) ## spacer, which barcodes were found ----------------------------------- outfile.write('\n{:<35} {:>13} {:>13} {:>13}\n'.\ format("sample_name", "true_bar", "obs_bar", "N_records")) ## write sample results for sname in sorted(data.barcodes): if "-technical-replicate-" in sname: fname = sname.rsplit("-technical-replicate", 1)[0] else: fname = sname ## write perfect hit hit = data.barcodes[sname] offhitstring = "" ## write off-n hits ## sort list of off-n hits if fname in fdbars: offkeys = list(fdbars.get(fname)) for offhit in offkeys[::-1]: ## exclude perfect hit if offhit not in data.barcodes.values(): offhitstring += '{:<35} {:>13} {:>13} {:>13}\n'.\ format(sname, hit, offhit, fbarhits[offhit]/2) #sumoffhits += fbarhits[offhit] ## write string to file outfile.write('{:<35} {:>13} {:>13} {:>13}\n'.\ #format(sname, hit, hit, fsamplehits[fname]-sumoffhits)) format(sname, hit, hit, fbarhits[hit]/2)) outfile.write(offhitstring) ## write misses misskeys = list(fmisses.keys()) misskeys.sort(key=fmisses.get) for key in misskeys[::-1]: outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\ format("no_match", "_", key, fmisses[key])) outfile.close() ## Link Sample with this data file to the Assembly object for sname in snames: ## make the sample sample = Sample() sample.name = sname ## allow multiple barcodes if its a replicate. barcodes = [] for n in xrange(500): fname = sname+"-technical-replicate-{}".format(n) fbar = data.barcodes.get(fname) if fbar: barcodes.append(fbar) if barcodes: sample.barcode = barcodes else: sample.barcode = data.barcodes[sname] ## file names if 'pair' in data.paramsdict["datatype"]: sample.files.fastqs = [(os.path.join(data.dirs.fastqs, sname+"_R1_.fastq.gz"), os.path.join(data.dirs.fastqs, sname+"_R2_.fastq.gz"))] else: sample.files.fastqs = [(os.path.join(data.dirs.fastqs, sname+"_R1_.fastq.gz"), "")] ## fill in the summary stats sample.stats["reads_raw"] = int(fsamplehits[sname]) ## fill in the full df stats value sample.stats_dfs.s1["reads_raw"] = int(fsamplehits[sname]) ## Only link Sample if it has data if sample.stats["reads_raw"]: sample.stats.state = 1 data.samples[sample.name] = sample else: print("Excluded sample: no data found for", sname) ## initiate s1 key for data object data.stats_dfs.s1 = data._build_stat("s1") data.stats_files.s1 = outhandle
Write stats and stores to Assembly object.
def delay(self, dl=0): """Delay for ``dl`` seconds. """ if dl is None: time.sleep(self.dl) elif dl < 0: sys.stderr.write( "delay cannot less than zero, this takes no effects.\n") else: time.sleep(dl)
Delay for ``dl`` seconds.
def set(self, *components): """ Set the possible components of the block :param components: components to append Optionables or Composables """ self.reset() if len(components) == 1: self.append(components[0]) else: for comp in components: self.append(comp)
Set the possible components of the block :param components: components to append Optionables or Composables
def _read_metrics(repo, metrics, branch): """Read the content of each metric file and format it. Args: metrics (list): List of metric touples branch (str): Branch to look up for metrics. Returns: A dict mapping keys with metrics path name and content. For example: {'metric.csv': ("value_mse deviation_mse data_set\n" "0.421601 0.173461 train\n" "0.67528 0.289545 testing\n" "0.671502 0.297848 validation\n")} """ res = {} for out, typ, xpath in metrics: assert out.scheme == "local" if not typ: typ = os.path.splitext(out.path.lower())[1].replace(".", "") if out.use_cache: open_fun = open path = repo.cache.local.get(out.checksum) else: open_fun = repo.tree.open path = out.path try: with open_fun(path) as fd: metric = _read_metric( fd, typ=typ, xpath=xpath, rel_path=out.rel_path, branch=branch, ) except IOError as e: if e.errno == errno.ENOENT: logger.warning( NO_METRICS_FILE_AT_REFERENCE_WARNING.format( out.rel_path, branch ) ) metric = None else: raise if not metric: continue res[out.rel_path] = metric return res
Read the content of each metric file and format it. Args: metrics (list): List of metric touples branch (str): Branch to look up for metrics. Returns: A dict mapping keys with metrics path name and content. For example: {'metric.csv': ("value_mse deviation_mse data_set\n" "0.421601 0.173461 train\n" "0.67528 0.289545 testing\n" "0.671502 0.297848 validation\n")}
def tags_getrelated(tag): """Gets the related tags for given tag.""" method = 'flickr.tags.getRelated' data = _doget(method, auth=False, tag=tag) if isinstance(data.rsp.tags.tag, list): return [tag.text for tag in data.rsp.tags.tag] else: return [data.rsp.tags.tag.text]
Gets the related tags for given tag.
def AddArguments(cls, argument_group): """Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group. """ argument_group.add_argument( '--slice', metavar='DATE', dest='slice', type=str, default='', action='store', help=( 'Create a time slice around a certain date. This parameter, if ' 'defined will display all events that happened X minutes before ' 'and after the defined date. X is controlled by the parameter ' '--slice_size but defaults to 5 minutes.')) argument_group.add_argument( '--slice_size', '--slice-size', dest='slice_size', type=int, default=5, action='store', help=( 'Defines the slice size. In the case of a regular time slice it ' 'defines the number of minutes the slice size should be. In the ' 'case of the --slicer it determines the number of events before ' 'and after a filter match has been made that will be included in ' 'the result set. The default value is 5. See --slice or --slicer ' 'for more details about this option.')) argument_group.add_argument( '--slicer', dest='slicer', action='store_true', default=False, help=( 'Create a time slice around every filter match. This parameter, ' 'if defined will save all X events before and after a filter ' 'match has been made. X is defined by the --slice_size ' 'parameter.')) argument_group.add_argument( 'filter', nargs='?', action='store', metavar='FILTER', default=None, type=str, help=( 'A filter that can be used to filter the dataset before it ' 'is written into storage. More information about the filters ' 'and how to use them can be found here: {0:s}').format( cls._DOCUMENTATION_URL))
Adds command line arguments to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()]
Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`.
def precision_score(df, col_true=None, col_pred='precision_result', pos_label=1, average=None): r""" Compute precision of a predicted DataFrame. Precision is defined as :math:`\frac{TP}{TP + TN}` :Parameters: - **df** - predicted data frame - **col_true** - column name of true label - **col_pred** - column name of predicted label, 'prediction_result' by default. - **pos_label** - denote the desired class label when ``average`` == `binary` - **average** - denote the method to compute average. :Returns: Precision score :Return type: float or numpy.array[float] The parameter ``average`` controls the behavior of the function. - When ``average`` == None (by default), precision of every class is given as a list. - When ``average`` == 'binary', precision of class specified in ``pos_label`` is given. - When ``average`` == 'micro', STP / (STP + STN) is given, where STP and STN are summations of TP and TN for every class. - When ``average`` == 'macro', average precision of all the class is given. - When ``average`` == `weighted`, average precision of all the class weighted by support of every true classes is given. :Example: Assume we have a table named 'predicted' as follows: ======== =================== label prediction_result ======== =================== 0 0 1 2 2 1 0 0 1 0 2 1 ======== =================== Different options of ``average`` parameter outputs different values: .. code-block:: python >>> precision_score(predicted, 'label', average=None) array([ 0.66..., 0. , 0. ]) >>> precision_score(predicted, 'label', average='macro') 0.22 >>> precision_score(predicted, 'label', average='micro') 0.33 >>> precision_score(predicted, 'label', average='weighted') 0.22 """ if not col_pred: col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS) mat, label_list = _run_cm_node(df, col_true, col_pred) class_dict = dict((label, idx) for idx, label in enumerate(label_list)) tps = np.diag(mat) pred_count = np.sum(mat, axis=0) if average is None: return tps * 1.0 / pred_count elif average == 'binary': class_idx = class_dict[pos_label] return tps[class_idx] * 1.0 / pred_count[class_idx] elif average == 'micro': return np.sum(tps) / np.sum(pred_count) elif average == 'macro': return np.mean(tps * 1.0 / pred_count) elif average == 'weighted': support = np.sum(mat, axis=1) return np.sum(tps * 1.0 / pred_count * support) / np.sum(support)
r""" Compute precision of a predicted DataFrame. Precision is defined as :math:`\frac{TP}{TP + TN}` :Parameters: - **df** - predicted data frame - **col_true** - column name of true label - **col_pred** - column name of predicted label, 'prediction_result' by default. - **pos_label** - denote the desired class label when ``average`` == `binary` - **average** - denote the method to compute average. :Returns: Precision score :Return type: float or numpy.array[float] The parameter ``average`` controls the behavior of the function. - When ``average`` == None (by default), precision of every class is given as a list. - When ``average`` == 'binary', precision of class specified in ``pos_label`` is given. - When ``average`` == 'micro', STP / (STP + STN) is given, where STP and STN are summations of TP and TN for every class. - When ``average`` == 'macro', average precision of all the class is given. - When ``average`` == `weighted`, average precision of all the class weighted by support of every true classes is given. :Example: Assume we have a table named 'predicted' as follows: ======== =================== label prediction_result ======== =================== 0 0 1 2 2 1 0 0 1 0 2 1 ======== =================== Different options of ``average`` parameter outputs different values: .. code-block:: python >>> precision_score(predicted, 'label', average=None) array([ 0.66..., 0. , 0. ]) >>> precision_score(predicted, 'label', average='macro') 0.22 >>> precision_score(predicted, 'label', average='micro') 0.33 >>> precision_score(predicted, 'label', average='weighted') 0.22
async def trigger(self, event, kwargs): """ Enqueue an event for processing """ await self._queue.put((event, kwargs)) self._resume_processing.set()
Enqueue an event for processing
def active_joined_organisations(doc): """View for getting organisations associated with a user""" if doc.get('type') == 'user' and doc.get('state') != 'deactivated': for org_id, state in doc.get('organisations', {}).items(): if state['state'] == 'deactivated': continue org = {'_id': org_id} yield [doc['_id'], None], org try: yield [doc['_id'], state['state']], org except KeyError: pass
View for getting organisations associated with a user
def get_proteome_counts_impute_missing(prots_filtered_feathers, outpath, length_filter_pid=None, copynum_scale=False, copynum_df=None, force_rerun=False): """Get counts, uses the mean feature vector to fill in missing proteins for a strain""" if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath): big_strain_counts_df = pd.DataFrame() first = True for feather in prots_filtered_feathers: loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid, copynum_scale=copynum_scale, copynum_df=copynum_df) if first: big_strain_counts_df = pd.DataFrame(index=_all_counts, columns=loaded.columns) first = False new_columns = list(set(loaded.columns.tolist()).difference(big_strain_counts_df.columns)) if new_columns: for col in new_columns: big_strain_counts_df[col] = big_strain_counts_df.mean(axis=1) not_in_loaded = list(set(big_strain_counts_df.columns).difference(loaded.columns.tolist())) if not_in_loaded: for col in not_in_loaded: big_strain_counts_df[col] = big_strain_counts_df[col] + loaded.mean(axis=1) big_strain_counts_df = big_strain_counts_df.add(loaded, fill_value=0) if len(big_strain_counts_df) > 0: big_strain_counts_df.astype(float).reset_index().to_feather(outpath) return big_strain_counts_df else: return pd.read_feather(outpath).set_index('index')
Get counts, uses the mean feature vector to fill in missing proteins for a strain
def get_time_remaining_estimate(self): """ Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime """ power_status = SYSTEM_POWER_STATUS() if not GetSystemPowerStatus(pointer(power_status)): raise WinError() if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC: return common.TIME_REMAINING_UNLIMITED elif power_status.BatteryLifeTime == -1: return common.TIME_REMAINING_UNKNOWN else: return float(power_status.BatteryLifeTime) / 60.0
Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime
def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object. """ box = Span(level=properties.get('level', 'glyph'), **mapping) plot.renderers.append(box) return None, box
Returns a Bokeh glyph object.
def convert(self, *args, **kwargs): """ Yes it is, thanks captain. """ self.strings() self.metadata() # save file self.result.save(self.output())
Yes it is, thanks captain.
def _prepare_value(val, maxlen=50, notype=False): """ Stringify value `val`, ensuring that it is not too long. """ if val is None or val is True or val is False: return str(val) sval = repr(val) sval = sval.replace("\n", " ").replace("\t", " ").replace("`", "'") if len(sval) > maxlen: sval = sval[:maxlen - 4] + "..." + sval[-1] if notype: return sval else: tval = checker_for_type(type(val)).name() return "%s of type %s" % (sval, tval)
Stringify value `val`, ensuring that it is not too long.
def _create_group_assignment(self, mesh_axes): """Create group assignment for XLA cross replica ops (physical pnums).""" partitioning = {} for logical_pnum in xrange(self.size): group = mtf.pnum_to_group(self.shape, mesh_axes, logical_pnum) if group not in partitioning: partitioning[group] = [] partitioning[group].append(self.l2p(logical_pnum)) group_assignment = [] for group, physical_pnums in partitioning.items(): group_assignment.append(physical_pnums) return group_assignment
Create group assignment for XLA cross replica ops (physical pnums).
def show(self, baseAppInstance): """Allows to show the widget as root window""" self.from_dict_to_fields(self.configDict) super(ProjectConfigurationDialog, self).show(baseAppInstance)
Allows to show the widget as root window
def share_column_widths(self, tables, shared_limit=None): """ To have this table use sync with the columns in tables Note, this will need to be called on the other tables to be fully synced. :param tables: list of SeabornTables to share column widths :param shared_limit: int if diff is greater than this than ignore it. :return: None """ for table in tables: record = (table, shared_limit) if not record in self.shared_tables and table is not self: self.shared_tables.append(record)
To have this table use sync with the columns in tables Note, this will need to be called on the other tables to be fully synced. :param tables: list of SeabornTables to share column widths :param shared_limit: int if diff is greater than this than ignore it. :return: None
def get_location_observation(lat, lng, token): """Lookup observations by geo coordinates.""" req = requests.get( API_ENDPOINT_GEO % (lat, lng), params={ 'token': token }) if req.status_code == 200 and req.json()["status"] == "ok": return parse_observation_response(req.json()["data"]) return {}
Lookup observations by geo coordinates.
def get_absolute_url_with_date(self): """URL based on the entry's date & slug.""" pub_date = self.published_on if pub_date and settings.USE_TZ: # If TZ is enabled, convert all of these dates from UTC to whatever # the project's timezone is set as. Ideally, we'd pull this form # some user settings, but the *canonical* publish time is that of # the author (asssuming author == owner of this project). pub_date = make_naive(pub_date, pytz.utc) # Make naive pub_date = pytz.timezone(settings.TIME_ZONE).localize(pub_date) if pub_date: args = [ pub_date.strftime("%Y"), pub_date.strftime("%m"), pub_date.strftime("%d"), self.slug ] else: args = [self.slug] return reverse('blargg:entry_detail', args=args)
URL based on the entry's date & slug.
def queryResponse(self, queryEngine, query=None, vendorSpecific=None, **kwargs): """CNRead.query(session, queryEngine, query) → OctetStream https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNRead.query. Args: queryEngine: query: vendorSpecific: **kwargs: Returns: """ return self.GET( ['query', queryEngine, query], headers=vendorSpecific, query=kwargs )
CNRead.query(session, queryEngine, query) → OctetStream https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_APIs.html#CNRead.query. Args: queryEngine: query: vendorSpecific: **kwargs: Returns:
def draw_cloud_tree(self, axes=None, html=False, fixed_order=True, **kwargs): """ Draw a series of trees overlapping each other in coordinate space. The order of tip_labels is fixed in cloud trees so that trees with discordant relationships can be seen in conflict. To change the tip order use the 'fixed_order' argument in toytree.mtree() when creating the MultiTree object. Parameters: axes (toyplot.Cartesian): toyplot Cartesian axes object. html (bool): whether to return the drawing as html (default=PNG). edge_styles: (list): option to enter a list of edge dictionaries. **kwargs (dict): styling options should be input as a dictionary. """ # return nothing if tree is empty if not self.treelist: print("Treelist is empty") return None, None # return nothing if tree is empty if not self.all_tips_shared: print("All trees in treelist do not share the same tips") return None, None # make a copy of the treelist so we don't modify the original if not fixed_order: raise Exception( "fixed_order must be either True or a list with the tip order") # set fixed order on a copy of the tree list if isinstance(fixed_order, (list, tuple)): pass elif fixed_order is True: fixed_order = self.treelist[0].get_tip_labels() else: raise Exception( "fixed_order argument must be True or a list with the tip order") treelist = [ ToyTree(i, fixed_order=fixed_order) for i in self.copy().treelist ] # give advice if user tries to enter tip_labels if kwargs.get("tip_labels"): print(TIP_LABELS_ADVICE) # set autorender format to png so we don't bog down notebooks try: changed_autoformat = False if not html: toyplot.config.autoformat = "png" changed_autoformat = True # dict of global cloud tree style mstyle = STYLES['m'] # if trees in treelist already have some then we don't quash... mstyle.update( {i: j for (i, j) in kwargs.items() if (j is not None) & (i != "tip_labels")} ) for tree in treelist: tree.style.update(mstyle) # Send a copy of MultiTree to init Drawing object. draw = CloudTree(treelist, **kwargs) # and create drawing if kwargs.get("debug"): return draw # allow user axes, and kwargs for width, height canvas, axes = draw.update(axes) return canvas, axes finally: if changed_autoformat: toyplot.config.autoformat = "html"
Draw a series of trees overlapping each other in coordinate space. The order of tip_labels is fixed in cloud trees so that trees with discordant relationships can be seen in conflict. To change the tip order use the 'fixed_order' argument in toytree.mtree() when creating the MultiTree object. Parameters: axes (toyplot.Cartesian): toyplot Cartesian axes object. html (bool): whether to return the drawing as html (default=PNG). edge_styles: (list): option to enter a list of edge dictionaries. **kwargs (dict): styling options should be input as a dictionary.
def warn_startup_with_shell_off(platform, gdb_args): """return True if user may need to turn shell off if mac OS version is 16 (sierra) or higher, may need to set shell off due to os's security requirements http://stackoverflow.com/questions/39702871/gdb-kind-of-doesnt-work-on-macos-sierra """ darwin_match = re.match("darwin-(\d+)\..*", platform) on_darwin = darwin_match is not None and int(darwin_match.groups()[0]) >= 16 if on_darwin: shell_is_off = "startup-with-shell off" in gdb_args return not shell_is_off return False
return True if user may need to turn shell off if mac OS version is 16 (sierra) or higher, may need to set shell off due to os's security requirements http://stackoverflow.com/questions/39702871/gdb-kind-of-doesnt-work-on-macos-sierra
def lines_table(html_doc, tofloat=True): """return a list of [(lines, table), .....] lines = all the significant lines before the table. These are lines between this table and the previous table or 'hr' tag table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..] The lines act as a description for what is in the table """ soup = BeautifulSoup(html_doc, "html.parser") linestables = [] elements = soup.p.next_elements # start after the first para for element in elements: tabletup = [] if not _has_name(element): continue if element.name == 'table': # hit the first table beforetable = [] prev_elements = element.previous_elements # walk back and get the lines for prev_element in prev_elements: if not _has_name(prev_element): continue if prev_element.name not in ('br', None): # no lines here if prev_element.name in ('table', 'hr', 'tr', 'td'): # just hit the previous table. You got all the lines break if prev_element.parent.name == "p": # if the parent is "p", you will get it's text anyways from the parent pass else: if prev_element.get_text(): # skip blank lines beforetable.append(prev_element.get_text()) beforetable.reverse() tabletup.append(beforetable) function_selector = {True:table2val_matrix, False:table2matrix} function = function_selector[tofloat] tabletup.append(function(element)) if tabletup: linestables.append(tabletup) return linestables
return a list of [(lines, table), .....] lines = all the significant lines before the table. These are lines between this table and the previous table or 'hr' tag table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..] The lines act as a description for what is in the table
def getInstance(self, aLocation, axisOnly=False, getFactors=False): """ Calculate the delta at aLocation. * aLocation: a Location object, expected to be in bent space * axisOnly: * True: calculate an instance only with the on-axis masters. * False: calculate an instance with on-axis and off-axis masters. * getFactors: * True: return a list of the calculated factors. """ self._collectAxisPoints() factors = self.getFactors(aLocation, axisOnly) total = None for f, item, name in factors: if total is None: total = f * item continue total += f * item if total is None: total = 0 * self._neutral if getFactors: return total, factors return total
Calculate the delta at aLocation. * aLocation: a Location object, expected to be in bent space * axisOnly: * True: calculate an instance only with the on-axis masters. * False: calculate an instance with on-axis and off-axis masters. * getFactors: * True: return a list of the calculated factors.
def stop_threadsafe(self): """Stop this task from another thread and wait for it to finish. This method must not be called from within the BackgroundEventLoop but will inject self.stop() into the event loop and block until it returns. Raises: TimeoutExpiredError: If the task does not stop in the given timeout specified in __init__() """ if self.stopped: return try: self._loop.run_coroutine(self.stop()) except asyncio.TimeoutError: raise TimeoutExpiredError("Timeout stopping task {} with {} subtasks".format(self.name, len(self.subtasks)))
Stop this task from another thread and wait for it to finish. This method must not be called from within the BackgroundEventLoop but will inject self.stop() into the event loop and block until it returns. Raises: TimeoutExpiredError: If the task does not stop in the given timeout specified in __init__()
def render_customizations(self): """ Customize template for site user specified customizations """ disable_plugins = self.pt.customize_conf.get('disable_plugins', []) if not disable_plugins: logger.debug('No site-user specified plugins to disable') else: for plugin in disable_plugins: try: self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'], 'disabled at user request') except KeyError: # Malformed config logger.info('Invalid custom configuration found for disable_plugins') enable_plugins = self.pt.customize_conf.get('enable_plugins', []) if not enable_plugins: logger.debug('No site-user specified plugins to enable"') else: for plugin in enable_plugins: try: msg = 'enabled at user request' self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'], plugin['plugin_args'], msg) except KeyError: # Malformed config logger.info('Invalid custom configuration found for enable_plugins')
Customize template for site user specified customizations
def skewvT(self,R,romberg=False,nsigma=None,phi=0.): """ NAME: skewvT PURPOSE: calculate skew in vT at R by marginalizing over velocity INPUT: R - radius at which to calculate <vR> (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: skewvT HISTORY: 2011-12-07 - Written - Bovy (NYU) """ surfmass= self.surfacemass(R,romberg=romberg,nsigma=nsigma, use_physical=False) vt= self._vmomentsurfacemass(R,0,1,romberg=romberg,nsigma=nsigma)\ /surfmass vt2= self._vmomentsurfacemass(R,0,2,romberg=romberg,nsigma=nsigma)\ /surfmass vt3= self._vmomentsurfacemass(R,0,3,romberg=romberg,nsigma=nsigma)\ /surfmass s2= vt2-vt**2. return (vt3-3.*vt*vt2+2.*vt**3.)*s2**(-1.5)
NAME: skewvT PURPOSE: calculate skew in vT at R by marginalizing over velocity INPUT: R - radius at which to calculate <vR> (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: skewvT HISTORY: 2011-12-07 - Written - Bovy (NYU)
def format_list(self, at_char, user, list_name): '''Return formatted HTML for a list.''' return '<a href="https://twitter.com/%s/lists/%s">%s%s/%s</a>' \ % (user, list_name, at_char, user, list_name)
Return formatted HTML for a list.
def add_logging(parser, log_format=LOG_FORMAT, log_level=LOG_LEVEL, color=True): """Configures the `argparse.ArgumentParser` with arguments to configure logging. This adds arguments: * ``-v`` to increase the log level * ``-q`` to decrease the log level * ``--color`` to enable color logging when available * ``--no-color`` to disable color logging The root logger is configured with the given format and log level. ANSI color codes are supported in the logging format string. If color is enabled and stderr is a tty, the codes will be passed through. Otherwise the logging formatter will strip them out. The logging format supports these additional format variables for coloration: %(levelcolor)s If stderr is a terminal, an ANSI color code appropriate for the level of the logged record. %(resetcolor)s If stderr is a terminal, an ANSI color reset code. """ parser.set_defaults(log_level=log_level) parser.add_argument('-v', dest='log_level', action=_LogLevelAddAction, const=1, help='use more verbose logging (stackable)') parser.add_argument('-q', dest='log_level', action=_LogLevelAddAction, const=-1, help='use less verbose logging (stackable)') root_logger = logging.getLogger() root_logger.setLevel(log_level) handler = logging.StreamHandler() # using sys.stderr if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): class ColorAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, True) handler.setFormatter(_ColorLogFormatter(log_format)) class NoColorAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, False) handler.setFormatter(_NoColorLogFormatter(log_format)) parser.add_argument('--color', dest='color', action=ColorAction, nargs=0, help='use color in log (when available)') parser.add_argument('--no-color', dest='color', action=NoColorAction, nargs=0, help='use no color in log') if color: formatter_class = _ColorLogFormatter else: formatter_class = _NoColorLogFormatter else: # Make the options available, but they don't do anything. parser.add_argument('--color', dest='color', action='store_true', help='use color in log (when available)') parser.add_argument('--no-color', dest='color', action='store_false', help='use no color in log') formatter_class = _NoColorLogFormatter handler.setFormatter(formatter_class(log_format)) root_logger.addHandler(handler)
Configures the `argparse.ArgumentParser` with arguments to configure logging. This adds arguments: * ``-v`` to increase the log level * ``-q`` to decrease the log level * ``--color`` to enable color logging when available * ``--no-color`` to disable color logging The root logger is configured with the given format and log level. ANSI color codes are supported in the logging format string. If color is enabled and stderr is a tty, the codes will be passed through. Otherwise the logging formatter will strip them out. The logging format supports these additional format variables for coloration: %(levelcolor)s If stderr is a terminal, an ANSI color code appropriate for the level of the logged record. %(resetcolor)s If stderr is a terminal, an ANSI color reset code.
def validate_dict(in_dict, **kwargs): """ Returns Boolean of whether given dict conforms to type specifications given in kwargs. """ if not isinstance(in_dict, dict): raise ValueError('requires a dictionary') for key, value in kwargs.iteritems(): if key == 'required': for required_key in value: if required_key not in in_dict: return False elif key not in in_dict: continue elif value == bool: in_dict[key] = (True if str(in_dict[key]).lower() == 'true' else False) else: if (isinstance(in_dict[key], list) and len(in_dict[key]) == 1 and value != list): in_dict[key] = in_dict[key][0] try: if key in in_dict: in_dict[key] = value(in_dict[key]) except ValueError: return False return True
Returns Boolean of whether given dict conforms to type specifications given in kwargs.
def grid_at_redshift_from_image_plane_grid_and_redshift(self, image_plane_grid, redshift): """For an input grid of (y,x) arc-second image-plane coordinates, ray-trace the coordinates to any redshift in \ the strong lens configuration. This is performed using multi-plane ray-tracing and the existing redshifts and planes of the tracer. However, \ any redshift can be input even if a plane does not exist there, including redshifts before the first plane \ of the lensing system. Parameters ---------- image_plane_grid : ndsrray or grids.RegularGrid The image-plane grid which is traced to the redshift. redshift : float The redshift the image-plane grid is traced to. """ # TODO : We need to come up with a better abstraction for multi-plane lensing 0_0 image_plane_grid_stack = grids.GridStack(regular=image_plane_grid, sub=np.array([[0.0, 0.0]]), blurring=np.array([[0.0, 0.0]])) tracer = TracerMultiPlanes(galaxies=self.galaxies, image_plane_grid_stack=image_plane_grid_stack, border=None, cosmology=self.cosmology) for plane_index in range(0, len(self.plane_redshifts)): new_grid_stack = image_plane_grid_stack if redshift <= tracer.plane_redshifts[plane_index]: # If redshift is between two planes, we need to map over all previous planes coordinates / deflections. if plane_index > 0: for previous_plane_index in range(plane_index): scaling_factor = cosmology_util.scaling_factor_between_redshifts_from_redshifts_and_cosmology( redshift_0=tracer.plane_redshifts[previous_plane_index], redshift_1=redshift, redshift_final=tracer.plane_redshifts[-1], cosmology=tracer.cosmology) scaled_deflection_stack = lens_util.scaled_deflection_stack_from_plane_and_scaling_factor( plane=tracer.planes[previous_plane_index], scaling_factor=scaling_factor) new_grid_stack = \ lens_util.grid_stack_from_deflection_stack(grid_stack=new_grid_stack, deflection_stack=scaled_deflection_stack) # If redshift is before the first plane, no change to image pllane coordinates. elif plane_index == 0: return new_grid_stack.regular return new_grid_stack.regular
For an input grid of (y,x) arc-second image-plane coordinates, ray-trace the coordinates to any redshift in \ the strong lens configuration. This is performed using multi-plane ray-tracing and the existing redshifts and planes of the tracer. However, \ any redshift can be input even if a plane does not exist there, including redshifts before the first plane \ of the lensing system. Parameters ---------- image_plane_grid : ndsrray or grids.RegularGrid The image-plane grid which is traced to the redshift. redshift : float The redshift the image-plane grid is traced to.
def get_group(self, group_id): """Get a single group. :param str group_id: group ID :returns: group :rtype: :class:`marathon.models.group.MarathonGroup` """ response = self._do_request( 'GET', '/v2/groups/{group_id}'.format(group_id=group_id)) return self._parse_response(response, MarathonGroup)
Get a single group. :param str group_id: group ID :returns: group :rtype: :class:`marathon.models.group.MarathonGroup`
def _execute_command(self, command, sql): """ :raise InterfaceError: If the connection is closed. :raise ValueError: If no username was specified. """ if not self._sock: raise err.InterfaceError("(0, '')") # If the last query was unbuffered, make sure it finishes before # sending new commands if self._result is not None: if self._result.unbuffered_active: warnings.warn("Previous unbuffered result was left incomplete") self._result._finish_unbuffered_query() while self._result.has_next: self.next_result() self._result = None if isinstance(sql, text_type): sql = sql.encode(self.encoding) packet_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command # tiny optimization: build first packet manually instead of # calling self..write_packet() prelude = struct.pack('<iB', packet_size, command) packet = prelude + sql[:packet_size-1] self._write_bytes(packet) if DEBUG: dump_packet(packet) self._next_seq_id = 1 if packet_size < MAX_PACKET_LEN: return sql = sql[packet_size-1:] while True: packet_size = min(MAX_PACKET_LEN, len(sql)) self.write_packet(sql[:packet_size]) sql = sql[packet_size:] if not sql and packet_size < MAX_PACKET_LEN: break
:raise InterfaceError: If the connection is closed. :raise ValueError: If no username was specified.
def operator(self): """Supported Filter Operators + EQ - Equal To + NE - Not Equal To + GT - Greater Than + GE - Greater Than or Equal To + LT - Less Than + LE - Less Than or Equal To + SW - Starts With + IN - In String or Array + NI - Not in String or Array """ return { 'EQ': operator.eq, 'NE': operator.ne, 'GT': operator.gt, 'GE': operator.ge, 'LT': operator.lt, 'LE': operator.le, 'SW': self._starts_with, 'IN': self._in, 'NI': self._ni, # not in }
Supported Filter Operators + EQ - Equal To + NE - Not Equal To + GT - Greater Than + GE - Greater Than or Equal To + LT - Less Than + LE - Less Than or Equal To + SW - Starts With + IN - In String or Array + NI - Not in String or Array
def median_slitlets_rectified( input_image, mode=0, minimum_slitlet_width_mm=EMIR_MINIMUM_SLITLET_WIDTH_MM, maximum_slitlet_width_mm=EMIR_MAXIMUM_SLITLET_WIDTH_MM, debugplot=0 ): """Compute median spectrum for each slitlet. Parameters ---------- input_image : HDUList object Input 2D image. mode : int Indicate desired result: 0 : image with the same size as the input image, with the median spectrum of each slitlet spanning all the spectra of the corresponding slitlet 1 : image with 55 spectra, containing the median spectra of each slitlet 2 : single collapsed median spectrum, using exclusively the useful slitlets from the input image minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- image_median : HDUList object Output image. """ image_header = input_image[0].header image2d = input_image[0].data # check image dimensions naxis2_expected = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED naxis2, naxis1 = image2d.shape if naxis2 != naxis2_expected: raise ValueError("NAXIS2={0} should be {1}".format( naxis2, naxis2_expected )) # check that the FITS file has been obtained with EMIR instrument = image_header['instrume'] if instrument != 'EMIR': raise ValueError("INSTRUME keyword is not 'EMIR'!") # initialize output image if mode == 0: image2d_median = np.zeros((naxis2, naxis1)) else: image2d_median = np.zeros((EMIR_NBARS, naxis1)) # main loop for i in range(EMIR_NBARS): ns1 = i * EMIR_NPIXPERSLIT_RECTIFIED + 1 ns2 = ns1 + EMIR_NPIXPERSLIT_RECTIFIED - 1 sp_median = np.median(image2d[(ns1-1):ns2, :], axis=0) if mode == 0: image2d_median[(ns1-1):ns2, :] = np.tile( sp_median, (EMIR_NPIXPERSLIT_RECTIFIED, 1) ) else: image2d_median[i] = np.copy(sp_median) if mode == 2: # get CSU configuration from FITS header csu_config = CsuConfiguration.define_from_header(image_header) # define wavelength calibration parameters crpix1 = image_header['crpix1'] crval1 = image_header['crval1'] cdelt1 = image_header['cdelt1'] # segregate slitlets list_useful_slitlets = csu_config.widths_in_range_mm( minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm ) list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets] if abs(debugplot) != 0: print('>>> list_useful_slitlets....:', list_useful_slitlets) print('>>> list_not_useful_slitlets:', list_not_useful_slitlets) # define mask from array data mask2d, borders = define_mask_borders(image2d_median, sought_value=0) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # update mask with unused slitlets for islitlet in list_not_useful_slitlets: mask2d[islitlet - 1, :] = np.array([True] * naxis1) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # useful image pixels image2d_masked = image2d_median * (1 - mask2d.astype(int)) if abs(debugplot) % 10 != 0: ximshow(image2d_masked, crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # masked image image2d_masked = np.ma.masked_array(image2d_median, mask=mask2d) # median spectrum image1d_median = np.ma.median(image2d_masked, axis=0).data image_median = fits.PrimaryHDU(data=image1d_median, header=image_header) else: image_median = fits.PrimaryHDU(data=image2d_median, header=image_header) return fits.HDUList([image_median])
Compute median spectrum for each slitlet. Parameters ---------- input_image : HDUList object Input 2D image. mode : int Indicate desired result: 0 : image with the same size as the input image, with the median spectrum of each slitlet spanning all the spectra of the corresponding slitlet 1 : image with 55 spectra, containing the median spectra of each slitlet 2 : single collapsed median spectrum, using exclusively the useful slitlets from the input image minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- image_median : HDUList object Output image.