code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _get_name_map(saltenv='base'): u_name_map = {} name_map = get_repo_data(saltenv).get('name_map', {}) if not six.PY2: return name_map for k in name_map: u_name_map[k] = name_map[k] return u_name_map
Return a reverse map of full pkg names to the names recognized by winrepo.
def contribute_to_class(self, cls, name): super(StateField, self).contribute_to_class(cls, name) parent_property = getattr(cls, self.name, None) setattr(cls, self.name, StateFieldProperty(self, parent_property))
Contribute the state to a Model. Attaches a StateFieldProperty to wrap the attribute.
def total_accessibility(in_rsa, path=True): if path: with open(in_rsa, 'r') as inf: rsa = inf.read() else: rsa = in_rsa[:] all_atoms, side_chains, main_chain, non_polar, polar = [ float(x) for x in rsa.splitlines()[-1].split()[1:]] return all_atoms, side_chains, main_chain, non_polar, polar
Parses rsa file for the total surface accessibility data. Parameters ---------- in_rsa : str Path to naccess rsa file. path : bool Indicates if in_rsa is a path or a string. Returns ------- dssp_residues : 5-tuple(float) Total accessibility values for: [0] all atoms [1] all side-chain atoms [2] all main-chain atoms [3] all non-polar atoms [4] all polar atoms
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs): if isinstance(ctx_name, Context): ctx = ctx_name else: ctx = ctxs.get(ctx_name, ctxs[ctx_default]) return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
Submit through a context Parameters ---------- ctx_name : str The name of the context to submit through parent_id : str The ID of the group that the job is a part of. name : str The name of the job url : str The handler that can take the results (e.g., /beta_diversity/) func : function The function to execute. Any returns from this function will be serialized and deposited into Redis using the uuid for a key. This function should raise if the method fails. args : tuple or None Any args for ``func`` kwargs : dict or None Any kwargs for ``func`` Returns ------- tuple, (str, str, AsyncResult) The job ID, parent ID and the IPython's AsyncResult object of the job
def get_connection_cls(cls): if cls.__connection_cls is None: cls.__connection_cls, _ = cls.from_settings() return cls.__connection_cls
Return connection class. :rtype: :class:`type`
def _find_relation_factory(module): if not module: return None candidates = [o for o in (getattr(module, attr) for attr in dir(module)) if (o is not RelationFactory and o is not RelationBase and isclass(o) and issubclass(o, RelationFactory))] candidates = [c1 for c1 in candidates if not any(issubclass(c2, c1) for c2 in candidates if c1 is not c2)] if not candidates: hookenv.log('No RelationFactory found in {}'.format(module.__name__), hookenv.WARNING) return None if len(candidates) > 1: raise RuntimeError('Too many RelationFactory found in {}' ''.format(module.__name__)) return candidates[0]
Attempt to find a RelationFactory subclass in the module. Note: RelationFactory and RelationBase are ignored so they may be imported to be used as base classes without fear.
def DbGetPropertyHist(self, argin): self._log.debug("In DbGetPropertyHist()") object_name = argin[0] prop_name = argin[1] return self.db.get_property_hist(object_name, prop_name)
Retrieve object property history :param argin: Str[0] = Object name Str[2] = Property name :type: tango.DevVarStringArray :return: Str[0] = Property name Str[1] = date Str[2] = Property value number (array case) Str[3] = Property value 1 Str[n] = Property value n :rtype: tango.DevVarStringArray
def trace_set_format(self, fmt): cmd = enums.JLinkTraceCommand.SET_FORMAT data = ctypes.c_uint32(fmt) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to set trace format.') return None
Sets the format for the trace buffer to use. Args: self (JLink): the ``JLink`` instance. fmt (int): format for the trace buffer; this is one of the attributes of ``JLinkTraceFormat``. Returns: ``None``
def clear_errors(): data = [] data.append(0x0B) data.append(BROADCAST_ID) data.append(RAM_WRITE_REQ) data.append(STATUS_ERROR_RAM) data.append(BYTE2) data.append(0x00) data.append(0x00) send_data(data)
Clears the errors register of all Herkulex servos Args: none
def OnGetItemText(self, item, col): try: column = self.columns[col] value = column.get(self.sorted[item]) except IndexError, err: return None else: if value is None: return u'' if column.percentPossible and self.percentageView and self.total: value = value / float(self.total) * 100.00 if column.format: try: return column.format % (value,) except Exception, err: log.warn('Column %s could not format %r value: %r', column.name, type(value), value ) value = column.get(self.sorted[item] ) if isinstance(value,(unicode,str)): return value return unicode(value) else: if isinstance(value,(unicode,str)): return value return unicode(value)
Retrieve text for the item and column respectively
def _insert_html_configs(c, *, project_name, short_project_name): c['templates_path'] = [ '_templates', lsst_sphinx_bootstrap_theme.get_html_templates_path()] c['html_theme'] = 'lsst_sphinx_bootstrap_theme' c['html_theme_path'] = [lsst_sphinx_bootstrap_theme.get_html_theme_path()] c['html_theme_options'] = {'logotext': short_project_name} c['html_title'] = project_name c['html_short_title'] = short_project_name c['html_logo'] = None c['html_favicon'] = None if os.path.isdir('_static'): c['html_static_path'] = ['_static'] else: c['html_static_path'] = [] c['html_last_updated_fmt'] = '%b %d, %Y' c['html_use_smartypants'] = True c['html_domain_indices'] = False c['html_use_index'] = False c['html_split_index'] = False c['html_show_sourcelink'] = True c['html_show_sphinx'] = True c['html_show_copyright'] = True c['html_file_suffix'] = '.html' c['html_search_language'] = 'en' return c
Insert HTML theme configurations.
def memory(self): if self._memory is not None: return self._memory elif self._config is not None: return self._config.defaultMemory else: raise AttributeError("Default value for 'memory' cannot be determined")
The maximum number of bytes of memory the job will require to run.
async def cli(self): print('Enter commands and press enter') print('Type help for help and exit to quit') while True: command = await _read_input(self.loop, 'pyatv> ') if command.lower() == 'exit': break elif command == 'cli': print('Command not availble here') continue await _handle_device_command( self.args, command, self.atv, self.loop)
Enter commands in a simple CLI.
def Mx(mt, x): n = len(mt.Cx) sum1 = 0 for j in range(x, n): k = mt.Cx[j] sum1 += k return sum1
Return the Mx
def LMLgrad(self,params=None): if params is not None: self.setParams(params) KV = self._update_cache() W = KV['W'] LMLgrad = SP.zeros(self.covar.n_params) for i in range(self.covar.n_params): Kd = self.covar.Kgrad_param(i) LMLgrad[i] = 0.5 * (W*Kd).sum() return {'covar':LMLgrad}
evaluates the gradient of the log marginal likelihood for the given hyperparameters
def save_hdf(self,filename,path=''): self.orbpop_long.save_hdf(filename,'{}/long'.format(path)) self.orbpop_short.save_hdf(filename,'{}/short'.format(path))
Save to .h5 file.
def jtag_send(self, tms, tdi, num_bits): if not util.is_natural(num_bits) or num_bits <= 0 or num_bits > 32: raise ValueError('Number of bits must be >= 1 and <= 32.') self._dll.JLINKARM_StoreBits(tms, tdi, num_bits) return None
Sends data via JTAG. Sends data via JTAG on the rising clock edge, TCK. At on each rising clock edge, on bit is transferred in from TDI and out to TDO. The clock uses the TMS to step through the standard JTAG state machine. Args: self (JLink): the ``JLink`` instance tms (int): used to determine the state transitions for the Test Access Port (TAP) controller from its current state tdi (int): input data to be transferred in from TDI to TDO num_bits (int): a number in the range ``[1, 32]`` inclusively specifying the number of meaningful bits in the ``tms`` and ``tdi`` parameters for the purpose of extracting state and data information Returns: ``None`` Raises: ValueError: if ``num_bits < 1`` or ``num_bits > 32``. See Also: `JTAG Technical Overview <https://www.xjtag.com/about-jtag/jtag-a-technical-overview>`_.
def _setup_firefox(self, capabilities): if capabilities.get("marionette"): gecko_driver = self.config.get('Driver', 'gecko_driver_path') self.logger.debug("Gecko driver path given in properties: %s", gecko_driver) else: gecko_driver = None firefox_binary = self.config.get_optional('Firefox', 'binary') firefox_options = Options() if self.config.getboolean_optional('Driver', 'headless'): self.logger.debug("Running Firefox in headless mode") firefox_options.add_argument('-headless') self._add_firefox_arguments(firefox_options) if firefox_binary: firefox_options.binary = firefox_binary log_path = os.path.join(DriverWrappersPool.output_directory, 'geckodriver.log') try: return webdriver.Firefox(firefox_profile=self._create_firefox_profile(), capabilities=capabilities, executable_path=gecko_driver, firefox_options=firefox_options, log_path=log_path) except TypeError: return webdriver.Firefox(firefox_profile=self._create_firefox_profile(), capabilities=capabilities, executable_path=gecko_driver, firefox_options=firefox_options)
Setup Firefox webdriver :param capabilities: capabilities object :returns: a new local Firefox driver
def _find_protocol_error(tb, proto_name): tb_info = traceback.extract_tb(tb) for frame in reversed(tb_info): if frame.filename == proto_name: return frame else: raise KeyError
Return the FrameInfo for the lowest frame in the traceback from the protocol.
async def connect(url, *, apikey=None, insecure=False): url = api_url(url) url = urlparse(url) if url.username is not None: raise ConnectError( "Cannot provide user-name explicitly in URL (%r) when connecting; " "use login instead." % url.username) if url.password is not None: raise ConnectError( "Cannot provide password explicitly in URL (%r) when connecting; " "use login instead." % url.username) if apikey is None: credentials = None else: credentials = Credentials.parse(apikey) description = await fetch_api_description(url, insecure) return Profile( name=url.netloc, url=url.geturl(), credentials=credentials, description=description)
Connect to a remote MAAS instance with `apikey`. Returns a new :class:`Profile` which has NOT been saved. To connect AND save a new profile:: profile = connect(url, apikey=apikey) profile = profile.replace(name="mad-hatter") with profiles.ProfileStore.open() as config: config.save(profile) # Optionally, set it as the default. config.default = profile.name
def write_int32(self, value, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.pack('%si' % endian, value)
Pack the value as a signed integer and write 4 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
def ikev2scan(ip, **kwargs): return sr(IP(dst=ip) / UDP() / IKEv2(init_SPI=RandString(8), exch_type=34) / IKEv2_payload_SA(prop=IKEv2_payload_Proposal()), **kwargs)
Send a IKEv2 SA to an IP and wait for answers.
def resume(self, container_id=None, sudo=None): return self._state_command(container_id, command='resume', sudo=sudo)
resume a stopped OciImage container, if it exists Equivalent command line example: singularity oci resume <container_ID> Parameters ========== container_id: the id to stop. sudo: Add sudo to the command. If the container was created by root, you need sudo to interact and get its state. Returns ======= return_code: the return code to indicate if the container was resumed.
def get_meta_graph_copy(self, tags=None): meta_graph = self.get_meta_graph(tags) copy = tf_v1.MetaGraphDef() copy.CopyFrom(meta_graph) return copy
Returns a copy of a MetaGraph with the identical set of tags.
def decompress_file(filepath): toks = filepath.split(".") file_ext = toks[-1].upper() from monty.io import zopen if file_ext in ["BZ2", "GZ", "Z"]: with open(".".join(toks[0:-1]), 'wb') as f_out, \ zopen(filepath, 'rb') as f_in: f_out.writelines(f_in) os.remove(filepath)
Decompresses a file with the correct extension. Automatically detects gz, bz2 or z extension. Args: filepath (str): Path to file. compression (str): A compression mode. Valid options are "gz" or "bz2". Defaults to "gz".
def len2dlc(length): if length <= 8: return length for dlc, nof_bytes in enumerate(CAN_FD_DLC): if nof_bytes >= length: return dlc return 15
Calculate the DLC from data length. :param int length: Length in number of bytes (0-64) :returns: DLC (0-15) :rtype: int
def _traverse_report(data): if 'items' not in data: return {} out = {} for item in data['items']: skip = (item['severity'] == 'NonDisplay' or item['itemKey'] == 'categoryDesc' or item['value'] in [None, 'Null', 'N/A', 'NULL']) if skip: continue value = 'Ok' if item['value'] == '0.0' else item['value'] out[item['itemKey']] = value out.update(_traverse_report(item)) return out
Recursively traverse vehicle health report.
def temperature(self): result = self.i2c_read(2) value = struct.unpack('>H', result)[0] if value < 32768: return value / 256.0 else: return (value - 65536) / 256.0
Get the temperature in degree celcius
async def open_wallet_search(wallet_handle: int, type_: str, query_json: str, options_json: str) -> int: logger = logging.getLogger(__name__) logger.debug("open_wallet_search: >>> wallet_handle: %r, type_: %r, query_json: %r, options_json: %r", wallet_handle, type_, query_json, options_json) if not hasattr(open_wallet_search, "cb"): logger.debug("open_wallet_search: Creating callback") open_wallet_search.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32)) c_wallet_handle = c_int32(wallet_handle) c_type = c_char_p(type_.encode('utf-8')) c_query_json = c_char_p(query_json.encode('utf-8')) c_options_json = c_char_p(options_json.encode('utf-8')) search_handle = await do_call('indy_open_wallet_search', c_wallet_handle, c_type, c_query_json, c_options_json, open_wallet_search.cb) res = search_handle logger.debug("open_wallet_search: <<< res: %r", res) return res
Search for wallet records :param wallet_handle: wallet handler (created by open_wallet). :param type_: allows to separate different record types collections :param query_json: MongoDB style query to wallet record tags: { "tagName": "tagValue", $or: { "tagName2": { $regex: 'pattern' }, "tagName3": { $gte: '123' }, }, } :param options_json: //TODO: FIXME: Think about replacing by bitmask { retrieveRecords: (optional, true by default) If false only "counts" will be calculated, retrieveTotalCount: (optional, false by default) Calculate total count, retrieveType: (optional, false by default) Retrieve record type, retrieveValue: (optional, true by default) Retrieve record value, retrieveTags: (optional, true by default) Retrieve record tags, } :return: search_handle: Wallet search handle that can be used later to fetch records by small batches (with fetch_wallet_search_next_records)
def parse(path): doc = ET.parse(path).getroot() channel = doc.find("./channel") blog = _parse_blog(channel) authors = _parse_authors(channel) categories = _parse_categories(channel) tags = _parse_tags(channel) posts = _parse_posts(channel) return { "blog": blog, "authors": authors, "categories": categories, "tags": tags, "posts": posts, }
Parses xml and returns a formatted dict. Example: wpparser.parse("./blog.wordpress.2014-09-26.xml") Will return: { "blog": { "tagline": "Tagline", "site_url": "http://marteinn.se/blog", "blog_url": "http://marteinn.se/blog", "language": "en-US", "title": "Marteinn / Blog" }, "authors: [{ "login": "admin", "last_name": None, "display_name": "admin", "email": "martin@marteinn.se", "first_name": None} ], "categories": [{ "parent": None, "term_id": "3", "name": "Action Script", "nicename": "action-script", "children": [{ "parent": "action-script", "term_id": "20", "name": "Flash related", "nicename": "flash-related", "children": [] }] }], "tags": [{"term_id": "36", "slug": "bash", "name": "Bash"}], "posts": [{ "creator": "admin", "excerpt": None, "post_date_gmt": "2014-09-22 20:10:40", "post_date": "2014-09-22 21:10:40", "post_type": "post", "menu_order": "0", "guid": "http://marteinn.se/blog/?p=828", "title": "Post Title", "comments": [{ "date_gmt": "2014-09-24 23:08:31", "parent": "0", "date": "2014-09-25 00:08:31", "id": "85929", "user_id": "0", "author": u"Author", "author_email": None, "author_ip": "111.111.111.111", "approved": "1", "content": u"Comment title", "author_url": "http://example.com", "type": "pingback" }], "content": "Text", "post_parent": "0", "post_password": None, "status": "publish", "description": None, "tags": ["tag"], "ping_status": "open", "post_id": "828", "link": "http://www.marteinn.se/blog/slug/", "pub_date": "Mon, 22 Sep 2014 20:10:40 +0000", "categories": ["category"], "is_sticky": "0", "post_name": "slug" }] }
def parse_url_rules(urls_fp): url_rules = [] for line in urls_fp: re_url = line.strip() if re_url: url_rules.append({'str': re_url, 're': re.compile(re_url)}) return url_rules
URL rules from given fp
def sample_counters(mc, system_info): return { (x, y): mc.get_router_diagnostics(x, y) for (x, y) in system_info }
Sample every router counter in the machine.
def Set(self, name, initial=None): return types.Set(name, self.api, initial)
The set datatype. :param name: The name of the set. :keyword initial: Initial members of the set. See :class:`redish.types.Set`.
def check_wide_data_for_blank_choices(choice_col, wide_data): if wide_data[choice_col].isnull().any(): msg_1 = "One or more of the values in wide_data[choice_col] is null." msg_2 = " Remove null values in the choice column or fill them in." raise ValueError(msg_1 + msg_2) return None
Checks `wide_data` for null values in the choice column, and raises a helpful ValueError if null values are found. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that is used to record each observation's choice. wide_data : pandas dataframe. Contains one row for each observation. Should contain `choice_col`. Returns ------- None.
def safe_mkdir(directory, clean=False): if clean: safe_rmtree(directory) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise
Safely create a directory. Ensures a directory is present. If it's not there, it is created. If it is, it's a no-op. If clean is True, ensures the directory is empty.
def loadJSON(self, jdata): self.__name = jdata['name'] self.__field = jdata['field'] self.__display = jdata.get('display') or self.__display self.__flags = jdata.get('flags') or self.__flags self.__defaultOrder = jdata.get('defaultOrder') or self.__defaultOrder self.__default = jdata.get('default') or self.__default
Initializes the information for this class from the given JSON data blob. :param jdata: <dict>
def define_page_breakpoint(self, dwProcessId, address, pages = 1, condition = True, action = None): process = self.system.get_process(dwProcessId) bp = PageBreakpoint(address, pages, condition, action) begin = bp.get_address() end = begin + bp.get_size() address = begin pageSize = MemoryAddresses.pageSize while address < end: key = (dwProcessId, address) if key in self.__pageBP: msg = "Already exists (PID %d) : %r" msg = msg % (dwProcessId, self.__pageBP[key]) raise KeyError(msg) address = address + pageSize address = begin while address < end: key = (dwProcessId, address) self.__pageBP[key] = bp address = address + pageSize return bp
Creates a disabled page breakpoint at the given address. @see: L{has_page_breakpoint}, L{get_page_breakpoint}, L{enable_page_breakpoint}, L{enable_one_shot_page_breakpoint}, L{disable_page_breakpoint}, L{erase_page_breakpoint} @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of the first page to watch. @type pages: int @param pages: Number of pages to watch. @type condition: function @param condition: (Optional) Condition callback function. The callback signature is:: def condition_callback(event): return True # returns True or False Where B{event} is an L{Event} object, and the return value is a boolean (C{True} to dispatch the event, C{False} otherwise). @type action: function @param action: (Optional) Action callback function. If specified, the event is handled by this callback instead of being dispatched normally. The callback signature is:: def action_callback(event): pass # no return value Where B{event} is an L{Event} object, and the return value is a boolean (C{True} to dispatch the event, C{False} otherwise). @rtype: L{PageBreakpoint} @return: The page breakpoint object.
def deleteRole(self, roleID): url = self._url + "/%s/delete" % roleID params = { "f" : "json" } return self._post(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
deletes a role by ID
def run_latex_report(base, report_dir, section_info): out_name = "%s_recal_plots.tex" % base out = os.path.join(report_dir, out_name) with open(out, "w") as out_handle: out_tmpl = Template(out_template) out_handle.write(out_tmpl.render(sections=section_info)) start_dir = os.getcwd() try: os.chdir(report_dir) cl = ["pdflatex", out_name] child = subprocess.Popen(cl) child.wait() finally: os.chdir(start_dir)
Generate a pdf report with plots using latex.
def add_path(prev: Optional[ResponsePath], key: Union[str, int]) -> ResponsePath: return ResponsePath(prev, key)
Add a key to a response path. Given a ResponsePath and a key, return a new ResponsePath containing the new key.
def path_for_doc(self, doc_id): full_path = self.path_for_doc_fn(self.repo, doc_id) return full_path
Returns doc_dir and doc_filepath for doc_id.
def snapshot_share(self, share_name, metadata=None, quota=None, timeout=None): _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'comp': 'snapshot', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-share-quota': _int_to_str(quota) } _add_metadata_headers(metadata, request) return self._perform_request(request, _parse_snapshot_share, [share_name])
Creates a snapshot of an existing share under the specified account. :param str share_name: The name of the share to create a snapshot of. :param metadata: A dict with name_value pairs to associate with the share as metadata. Example:{'Category':'test'} :type metadata: a dict of str to str: :param int quota: Specifies the maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). :param int timeout: The timeout parameter is expressed in seconds. :return: snapshot properties :rtype: azure.storage.file.models.Share
def track_child(self, child, logical_block_size, allow_duplicate=False): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized') self._add_child(child, logical_block_size, allow_duplicate, False)
A method to track an existing child of this directory record. Parameters: child - The child directory record object to add. logical_block_size - The size of a logical block for this volume descriptor. allow_duplicate - Whether to allow duplicate names, as there are situations where duplicate children are allowed. Returns: Nothing.
def _load_table(self, name): table = self._tables.get(name, None) if table is not None: return table if not self.engine.has_table(name): raise BindingException('Table does not exist: %r' % name, table=name) table = Table(name, self.meta, autoload=True) self._tables[name] = table return table
Reflect a given table from the database.
def symbols_to_prob(symbols): myCounter = Counter(symbols) N = float(len(list(symbols))) for k in myCounter: myCounter[k] /= N return myCounter
Return a dict mapping symbols to probability. input: ----- symbols: iterable of hashable items works well if symbols is a zip of iterables
def _handle_poll(self, relpath, params): request = json.loads(params.get('q')[0]) ret = {} for poll in request: _id = poll.get('id', None) path = poll.get('path', None) pos = poll.get('pos', 0) if path: abspath = os.path.normpath(os.path.join(self._root, path)) if os.path.isfile(abspath): with open(abspath, 'rb') as infile: if pos: infile.seek(pos) content = infile.read() ret[_id] = content.decode("utf-8") content = json.dumps(ret).encode("utf-8") self._send_content(content, 'application/json')
Handle poll requests for raw file contents.
def validate_quantity(self, value): if not isinstance(value, pq.quantity.Quantity): self._error('%s' % value, "Must be a Python quantity.")
Validate that the value is of the `Quantity` type.
def system_update_keyspace(self, ks_def): self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_update_keyspace(ks_def) return d
updates properties of a keyspace. returns the new schema id. Parameters: - ks_def
def get_service_status(self, name): svc = self._query_service(name) if svc is not None: return {'name': name, 'status': self.parse_query(svc['output']), 'output': svc['output'] } else: return {'name': name, 'status': 'missing', 'output': '' }
Returns the status for the given service name along with the output of the query command
def stat_smt_query(func: Callable): stat_store = SolverStatistics() def function_wrapper(*args, **kwargs): if not stat_store.enabled: return func(*args, **kwargs) stat_store.query_count += 1 begin = time() result = func(*args, **kwargs) end = time() stat_store.solver_time += end - begin return result return function_wrapper
Measures statistics for annotated smt query check function
def git_ls_files(*cmd_args): cmd = ['git', 'ls-files'] cmd.extend(cmd_args) return set(subprocess.check_output(cmd).splitlines())
Run ``git ls-files`` in the top-level project directory. Arguments go directly to execution call. :return: set of file names :rtype: :class:`set`
def info_gain(current_impurity, true_branch, false_branch, criterion): measure_impurity = gini_impurity if criterion == "gini" else entropy p = float(len(true_branch)) / (len(true_branch) + len(false_branch)) return current_impurity - p * measure_impurity(true_branch) - (1 - p) * measure_impurity(false_branch)
Information Gain. The uncertainty of the starting node, minus the weighted impurity of two child nodes.
def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs): return self._render( request = request, template = str(status), status = status, context = { 'error': kwargs }, headers = headers, prefix_template_path = prefix_template_path )
Convenience method to render an error response. The template is inferred from the status code. :param request: A django.http.HttpRequest instance. :param status: An integer describing the HTTP status code to respond with. :param headers: A dictionary describing HTTP headers. :param prefix_template_path: A boolean describing whether to prefix the template with the view's template path. :param kwargs: Any additional keyword arguments to inject. These are wrapped under ``error`` for convenience. For implementation details, see ``render``
def end_index(self): paginator = self.paginator if self.number == paginator.num_pages: return paginator.count return (self.number - 1) * paginator.per_page + paginator.first_page
Return the 1-based index of the last item on this page.
def to_xarray(input): from climlab.domain.field import Field if isinstance(input, Field): return Field_to_xarray(input) elif isinstance(input, dict): return state_to_xarray(input) else: raise TypeError('input must be Field object or dictionary of Field objects')
Convert climlab input to xarray format. If input is a climlab.Field object, return xarray.DataArray If input is a dictionary (e.g. process.state or process.diagnostics), return xarray.Dataset object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. Any items in the dictionary that are not instances of climlab.Field are ignored.
def getsourcefile(object): filename = getfile(object) if string.lower(filename[-4:]) in ['.pyc', '.pyo']: filename = filename[:-4] + '.py' for suffix, mode, kind in imp.get_suffixes(): if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix: return None if os.path.exists(filename): return filename
Return the Python source file an object was defined in, if it exists.
def facets_normal(self): if len(self.facets) == 0: return np.array([]) area_faces = self.area_faces index = np.array([i[area_faces[i].argmax()] for i in self.facets]) normals = self.face_normals[index] origins = self.vertices[self.faces[:, 0][index]] self._cache['facets_origin'] = origins return normals
Return the normal of each facet Returns --------- normals: (len(self.facets), 3) float A unit normal vector for each facet
def GetListSelect(selectList, title="Select", msg=""): root = tkinter.Tk() root.title(title) label = tkinter.Label(root, text=msg) label.pack() listbox = tkinter.Listbox(root) for i in selectList: listbox.insert(tkinter.END, i) listbox.pack() tkinter.Button(root, text="OK", fg="black", command=root.quit).pack() root.mainloop() selected = listbox.get(listbox.curselection()) print(selected + " is selected") root.destroy() return (selected, selectList.index(selected))
Create list with selectList, and then return seleced string and index title: Window name mag: Label of the list return (seldctedItem, selectedindex)
def file_identifier(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized') if self.file_ident is None: return b'/' return self.file_ident.fi
A method to get the name of this UDF File Entry as a byte string. Parameters: None. Returns: The UDF File Entry as a byte string.
def wait_for_tx(self, tx, max_seconds=120): tx_hash = None if isinstance(tx, (str, UInt256)): tx_hash = str(tx) elif isinstance(tx, Transaction): tx_hash = tx.Hash.ToString() else: raise AttributeError("Supplied tx is type '%s', but must be Transaction or UInt256 or str" % type(tx)) wait_event = Event() time_start = time.time() while True: _tx, height = Blockchain.Default().GetTransaction(tx_hash) if height > -1: return True wait_event.wait(3) seconds_passed = time.time() - time_start if seconds_passed > max_seconds: raise TxNotFoundInBlockchainError("Transaction with hash %s not found after %s seconds" % (tx_hash, int(seconds_passed)))
Wait for tx to show up on blockchain Args: tx (Transaction or UInt256 or str): Transaction or just the hash max_seconds (float): maximum seconds to wait for tx to show up. default: 120 Returns: True: if transaction was found Raises: AttributeError: if supplied tx is not Transaction or UInt256 or str TxNotFoundInBlockchainError: if tx is not found in blockchain after max_seconds
def complete_object_value( self, return_type: GraphQLObjectType, field_nodes: List[FieldNode], info: GraphQLResolveInfo, path: ResponsePath, result: Any, ) -> AwaitableOrValue[Dict[str, Any]]: if return_type.is_type_of: is_type_of = return_type.is_type_of(result, info) if isawaitable(is_type_of): async def collect_and_execute_subfields_async(): if not await is_type_of: raise invalid_return_type_error( return_type, result, field_nodes ) return self.collect_and_execute_subfields( return_type, field_nodes, path, result ) return collect_and_execute_subfields_async() if not is_type_of: raise invalid_return_type_error(return_type, result, field_nodes) return self.collect_and_execute_subfields( return_type, field_nodes, path, result )
Complete an Object value by executing all sub-selections.
def update(self): url = self.baseurl + '/_status?format=xml' response = self.s.get(url) response.raise_for_status() from xml.etree.ElementTree import XML root = XML(response.text) for serv_el in root.iter('service'): serv = Monit.Service(self, serv_el) self[serv.name] = serv if self[serv.name].pendingaction: time.sleep(1) return Monit.update(self) if self[serv.name].monitorState == 2: time.sleep(1) return Monit.update(self)
Update Monit deamon and services status.
def cloud_front_origin_access_identity_exists(Id, region=None, key=None, keyid=None, profile=None): authargs = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} oais = list_cloud_front_origin_access_identities(**authargs) or [] return bool([i['Id'] for i in oais if i['Id'] == Id])
Return True if a CloudFront origin access identity exists with the given Resource ID or False otherwise. Id Resource ID of the CloudFront origin access identity. region Region to connect to. key Secret key to use. keyid Access key to use. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.cloud_front_origin_access_identity_exists Id=E30RBTSABCDEF0
def _call(self, x, out=None): if out is None: out = self.range.zero() for i, j, op in zip(self.ops.row, self.ops.col, self.ops.data): out[i] += op(x[j]) else: has_evaluated_row = np.zeros(len(self.range), dtype=bool) for i, j, op in zip(self.ops.row, self.ops.col, self.ops.data): if not has_evaluated_row[i]: op(x[j], out=out[i]) else: out[i] += op(x[j]) has_evaluated_row[i] = True for i, evaluated in enumerate(has_evaluated_row): if not evaluated: out[i].set_zero() return out
Call the operators on the parts of ``x``.
def credit(self, amount, debit_account, description, debit_memo="", credit_memo="", datetime=None): assert amount >= 0 return self.post(-amount, debit_account, description, self_memo=credit_memo, other_memo=debit_memo, datetime=datetime)
Post a credit of 'amount' and a debit of -amount against this account and credit_account respectively. note amount must be non-negative.
def generate(self, labels, split_idx): atom_labels = [label[0] for label in labels] noise = [] distribution_function = distributions[self.distribution_name]["function"] for label in atom_labels: params = [self.parameters["{}_{}".format(label, param)][split_idx] for param in self.distribution_parameter_names] if None in params: dim_noise = 0.0 else: try: dim_noise = distribution_function(*params) except ValueError: raise ValueError noise.append(dim_noise) return noise
Generate peak-specific noise abstract method, must be reimplemented in a subclass. :param tuple labels: Dimension labels of a peak. :param int split_idx: Index specifying which peak list split parameters to use. :return: List of noise values for dimensions ordered as they appear in a peak. :rtype: :py:class:`list`
def wrap_case_result(raw, expr): raw_1d = np.atleast_1d(raw) if np.any(pd.isnull(raw_1d)): result = pd.Series(raw_1d) else: result = pd.Series( raw_1d, dtype=constants.IBIS_TYPE_TO_PANDAS_TYPE[expr.type()] ) if result.size == 1 and isinstance(expr, ir.ScalarExpr): return result.item() return result
Wrap a CASE statement result in a Series and handle returning scalars. Parameters ---------- raw : ndarray[T] The raw results of executing the ``CASE`` expression expr : ValueExpr The expression from the which `raw` was computed Returns ------- Union[scalar, Series]
def syndic_cmd(self, data): if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} for field in ('master_id', 'user', ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs)
Take the now clear load and forward it on to the client cmd
def get_method(name): name = _format_name(name) try: return METHODS[name] except KeyError as exc: exc.args = ("no PSD method registered with name {0!r}".format(name),) raise
Return the PSD method registered with the given name.
def get_app_index_dashboard(context): app = context['app_list'][0] model_list = [] app_label = None app_title = app['name'] admin_site = get_admin_site(context=context) for model, model_admin in admin_site._registry.items(): if app['app_label'] == model._meta.app_label: split = model.__module__.find(model._meta.app_label) app_label = model.__module__[0:split] + model._meta.app_label for m in app['models']: if m['name'] == capfirst(model._meta.verbose_name_plural): mod = '%s.%s' % (model.__module__, model.__name__) model_list.append(mod) if app_label is not None and app_label in Registry.registry: return Registry.registry[app_label](app_title, model_list) return _get_dashboard_cls(getattr( settings, 'ADMIN_TOOLS_APP_INDEX_DASHBOARD', 'admin_tools.dashboard.dashboards.DefaultAppIndexDashboard' ), context)(app_title, model_list)
Returns the admin dashboard defined by the user or the default one.
async def wait_for_connection_lost(self) -> bool: if not self.connection_lost_waiter.done(): try: await asyncio.wait_for( asyncio.shield(self.connection_lost_waiter), self.close_timeout, loop=self.loop, ) except asyncio.TimeoutError: pass return self.connection_lost_waiter.done()
Wait until the TCP connection is closed or ``self.close_timeout`` elapses. Return ``True`` if the connection is closed and ``False`` otherwise.
def dct2(input, K=13): nframes, N = input.shape freqstep = numpy.pi / N cosmat = dctmat(N,K,freqstep,False) return numpy.dot(input, cosmat) * (2.0 / N)
Convert log-power-spectrum to MFCC using the normalized DCT-II
def list_websites(self): self.connect() results = self.server.list_websites(self.session_id) return results
Return all websites, name is not a key
def handle_single_request(self, request_object): if not isinstance(request_object, (MethodCall, Notification)): raise TypeError("Invalid type for request_object") method_name = request_object.method_name params = request_object.params req_id = request_object.id request_body = self.build_request_body(method_name, params, id=req_id) http_request = self.build_http_request_obj(request_body) try: response = urllib.request.urlopen(http_request) except urllib.request.HTTPError as e: raise CalledServiceError(e) if not req_id: return response_body = json.loads(response.read().decode()) return response_body
Handles a single request object and returns the raw response :param request_object:
def start(host, port=5959, tag='salt/engine/logstash', proto='udp'): if proto == 'tcp': logstashHandler = logstash.TCPLogstashHandler elif proto == 'udp': logstashHandler = logstash.UDPLogstashHandler logstash_logger = logging.getLogger('python-logstash-logger') logstash_logger.setLevel(logging.INFO) logstash_logger.addHandler(logstashHandler(host, port, version=1)) if __opts__.get('id').endswith('_master'): event_bus = salt.utils.event.get_master_event( __opts__, __opts__['sock_dir'], listen=True) else: event_bus = salt.utils.event.get_event( 'minion', transport=__opts__['transport'], opts=__opts__, sock_dir=__opts__['sock_dir'], listen=True) log.debug('Logstash engine started') while True: event = event_bus.get_event() if event: logstash_logger.info(tag, extra=event)
Listen to salt events and forward them to logstash
def scale_calculator(multiplier, elements, rescale=None): if isinstance(elements, list): unique_elements = list(set(elements)) scales = {} for x in unique_elements: count = elements.count(x) scales[x] = multiplier * count elif isinstance(elements, dict): scales = {} for k,count in elements.items(): scales[k] = multiplier * int(count) else: raise ValueError('Input list of elements or dictionary of elements & counts') if not rescale: return scales else: new_scales = {} for k,v in scales.items(): new_scales[k] = remap(v, min(scales.values()), max(scales.values()), rescale[0], rescale[1]) return new_scales
Get a dictionary of scales for each element in elements. Examples: >>> scale_calculator(1, [2,7,8]) {8: 1, 2: 1, 7: 1} >>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8]) {2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 1, 8: 1} >>> scale_calculator(1, [2,2,2,3,4,5,5,6,7,8], rescale=(0.5,1)) {2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5} >>> scale_calculator(1, {2:3, 3:1, 4:1, 5:2, 6:1, 7:1, 8:1}, rescale=(0.5,1)) {2: 1.0, 3: 0.5, 4: 0.5, 5: 0.75, 6: 0.5, 7: 0.5, 8: 0.5} >>> scale_calculator(1, [(2,2,2),(3,),(4,),(5,),(5,),(6,7,8)], rescale=(0.5,1)) {(2, 2, 2): 0.5, (3,): 0.5, (6, 7, 8): 0.5, (4,): 0.5, (5,): 1.0} >>> scale_calculator(1, {77:35, 80:35, 16:1}, rescale=(.99,1)) None Args: mutiplier (int, float): Base float to be multiplied elements (list, dict): Dictionary which contains object:count or list of objects that may have repeats which will be counted rescale (tuple): Min and max values to rescale to Returns: dict: Scaled values of mutiplier for each element in elements
def display(table, limit=0, vrepr=None, index_header=None, caption=None, tr_style=None, td_styles=None, encoding=None, truncate=None, epilogue=None): from IPython.core.display import display_html html = _display_html(table, limit=limit, vrepr=vrepr, index_header=index_header, caption=caption, tr_style=tr_style, td_styles=td_styles, encoding=encoding, truncate=truncate, epilogue=epilogue) display_html(html, raw=True)
Display a table inline within an IPython notebook.
def as_translation_key(self): return TranslationKey(**{ name: getattr(self, name) for name in TranslationKey._fields})
Project Translation object or any other derived class into just a TranslationKey, which has fewer fields and can be used as a dictionary key.
def _get_item_from_search_response(self, response, type_): sections = sorted(response['sections'], key=lambda sect: sect['type'] == type_, reverse=True) for section in sections: hits = [hit for hit in section['hits'] if hit['type'] == type_] if hits: return hits[0]['result']
Returns either a Song or Artist result from search_genius_web
def output(self, context, *args, **kwargs): output_fields = self.output_fields output_type = self.output_type if output_fields and output_type: raise UnrecoverableError("Cannot specify both output_fields and output_type option.") if self.output_type: context.set_output_type(self.output_type) if self.output_fields: context.set_output_fields(self.output_fields) yield
Allow all readers to use eventually use output_fields XOR output_type options.
def get_by_name(self, name, style_type = None): for st in self.styles.values(): if st: if st.name == name: return st if style_type and not st: st = self.styles.get(self.default_styles[style_type], None) return st
Find style by it's descriptive name. :Returns: Returns found style of type :class:`ooxml.doc.Style`.
def synchronized(lock): @simple_decorator def wrap(function_target): def new_function(*args, **kw): lock.acquire() try: return function_target(*args, **kw) finally: lock.release() return new_function return wrap
Synchronization decorator. Allos to set a mutex on any function
def id_getter(self, relation_name, strict=False): def get_id(old_id): return self.get_new_id(relation_name, old_id, strict) return get_id
Returns a function that accepts an old_id and returns the new ID for the enclosed relation name.
def assign_issue(self, issue, assignee): url = self._options['server'] + \ '/rest/api/latest/issue/' + str(issue) + '/assignee' payload = {'name': assignee} r = self._session.put( url, data=json.dumps(payload)) raise_on_error(r) return True
Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic. :param issue: the issue ID or key to assign :type issue: int or str :param assignee: the user to assign the issue to :type assignee: str :rtype: bool
def deprecated(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): warnings.warn(fn.__doc__.split('\n')[0], category=DeprecationWarning, stacklevel=2) return fn(*args, **kwargs) return wrapper
Mark a function as deprecated and warn the user on use.
def sample_size_necessary_under_cph(power, ratio_of_participants, p_exp, p_con, postulated_hazard_ratio, alpha=0.05): def z(p): return stats.norm.ppf(p) m = ( 1.0 / ratio_of_participants * ((ratio_of_participants * postulated_hazard_ratio + 1.0) / (postulated_hazard_ratio - 1.0)) ** 2 * (z(1.0 - alpha / 2.0) + z(power)) ** 2 ) n_exp = m * ratio_of_participants / (ratio_of_participants * p_exp + p_con) n_con = m / (ratio_of_participants * p_exp + p_con) return int(np.ceil(n_exp)), int(np.ceil(n_con))
This computes the sample size for needed power to compare two groups under a Cox Proportional Hazard model. Parameters ---------- power : float power to detect the magnitude of the hazard ratio as small as that specified by postulated_hazard_ratio. ratio_of_participants: ratio of participants in experimental group over control group. p_exp : float probability of failure in experimental group over period of study. p_con : float probability of failure in control group over period of study postulated_hazard_ratio : float the postulated hazard ratio alpha : float, optional (default=0.05) type I error rate Returns ------- n_exp : integer the samples sizes need for the experiment to achieve desired power n_con : integer the samples sizes need for the control group to achieve desired power Examples -------- >>> from lifelines.statistics import sample_size_necessary_under_cph >>> >>> desired_power = 0.8 >>> ratio_of_participants = 1. >>> p_exp = 0.25 >>> p_con = 0.35 >>> postulated_hazard_ratio = 0.7 >>> n_exp, n_con = sample_size_necessary_under_cph(desired_power, ratio_of_participants, p_exp, p_con, postulated_hazard_ratio) >>> # (421, 421) References ----------- https://cran.r-project.org/web/packages/powerSurvEpi/powerSurvEpi.pdf See Also -------- power_under_cph
def _create_state_data(self, context, resp_args, relay_state): if "name_id_policy" in resp_args and resp_args["name_id_policy"] is not None: resp_args["name_id_policy"] = resp_args["name_id_policy"].to_string().decode("utf-8") return {"resp_args": resp_args, "relay_state": relay_state}
Returns a dict containing the state needed in the response flow. :type context: satosa.context.Context :type resp_args: dict[str, str | saml2.samlp.NameIDPolicy] :type relay_state: str :rtype: dict[str, dict[str, str] | str] :param context: The current context :param resp_args: Response arguments :param relay_state: Request relay state :return: A state as a dict
def omitted_parcov(self): if self.__omitted_parcov is None: self.log("loading omitted_parcov") self.__load_omitted_parcov() self.log("loading omitted_parcov") return self.__omitted_parcov
get the omitted prior parameter covariance matrix Returns ------- omitted_parcov : pyemu.Cov Note ---- returns a reference If ErrorVariance.__omitted_parcov is None, attribute is dynamically loaded
def get_info(self): field = self._current_field() if field: info = field.get_info() info['path'] = '%s/%s' % (self.name if self.name else '<no name>', info['path']) else: info = super(Container, self).get_info() return info
Get info regarding the current fuzzed enclosed node :return: info dictionary
def largest_connected_submatrix(C, directed=True, lcc=None): r if isdense(C): return sparse.connectivity.largest_connected_submatrix(csr_matrix(C), directed=directed, lcc=lcc).toarray() else: return sparse.connectivity.largest_connected_submatrix(C, directed=directed, lcc=lcc)
r"""Compute the count matrix on the largest connected set. Parameters ---------- C : scipy.sparse matrix Count matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True lcc : (M,) ndarray, optional The largest connected set Returns ------- C_cc : scipy.sparse matrix Count matrix of largest completely connected set of vertices (states) See also -------- largest_connected_set Notes ----- Viewing the count matrix as the adjacency matrix of a (directed) graph the larest connected submatrix is the adjacency matrix of the largest connected set of the corresponding graph. The largest connected submatrix can be efficiently computed using Tarjan's algorithm. References ---------- .. [1] Tarjan, R E. 1972. Depth-first search and linear graph algorithms. SIAM Journal on Computing 1 (2): 146-160. Examples -------- >>> import numpy as np >>> from msmtools.estimation import largest_connected_submatrix >>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]]) >>> C_cc_directed = largest_connected_submatrix(C) >>> C_cc_directed # doctest: +ELLIPSIS array([[10, 1], [ 2, 0]]...) >>> C_cc_undirected = largest_connected_submatrix(C, directed=False) >>> C_cc_undirected # doctest: +ELLIPSIS array([[10, 1, 0], [ 2, 0, 3], [ 0, 0, 4]]...)
def word_wrap_tree(parented_tree, width=0): if width != 0: for i, leaf_text in enumerate(parented_tree.leaves()): dedented_text = textwrap.dedent(leaf_text).strip() parented_tree[parented_tree.leaf_treeposition(i)] = textwrap.fill(dedented_text, width=width) return parented_tree
line-wrap an NLTK ParentedTree for pretty-printing
def real_main(release_url=None, tests_json_path=None, upload_build_id=None, upload_release_name=None): coordinator = workers.get_coordinator() fetch_worker.register(coordinator) coordinator.start() data = open(FLAGS.tests_json_path).read() tests = load_tests(data) item = DiffMyImages( release_url, tests, upload_build_id, upload_release_name, heartbeat=workers.PrintWorkflow) item.root = True coordinator.input_queue.put(item) coordinator.wait_one() coordinator.stop() coordinator.join()
Runs diff_my_images.
def populateFromRow(self, continuousSetRecord): self._filePath = continuousSetRecord.dataurl self.setAttributesJson(continuousSetRecord.attributes)
Populates the instance variables of this ContinuousSet from the specified DB row.
def fill_dcnm_subnet_info(self, tenant_id, subnet, start, end, gateway, sec_gateway, direc): serv_obj = self.get_service_obj(tenant_id) fw_dict = serv_obj.get_fw_dict() fw_id = fw_dict.get('fw_id') if direc == 'in': name = fw_id[0:4] + fw_const.IN_SERVICE_SUBNET + ( fw_id[len(fw_id) - 4:]) else: name = fw_id[0:4] + fw_const.OUT_SERVICE_SUBNET + ( fw_id[len(fw_id) - 4:]) subnet_dict = {'enable_dhcp': False, 'tenant_id': tenant_id, 'name': name, 'cidr': subnet + '/24', 'gateway_ip': gateway, 'secondary_gw': sec_gateway, 'ip_version': 4} subnet_dict['allocation_pools'] = [{'start': start, 'end': end}] return subnet_dict
Fills the DCNM subnet parameters. Function that fills the subnet parameters for a tenant required by DCNM.
def export_model(model, model_type, export_dir, model_column_fn): wide_columns, deep_columns = model_column_fn() if model_type == 'wide': columns = wide_columns elif model_type == 'deep': columns = deep_columns else: columns = wide_columns + deep_columns feature_spec = tf.feature_column.make_parse_example_spec(columns) example_input_fn = ( tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)) model.export_savedmodel(export_dir, example_input_fn, strip_default_attrs=True)
Export to SavedModel format. Args: model: Estimator object model_type: string indicating model type. "wide", "deep" or "wide_deep" export_dir: directory to export the model. model_column_fn: Function to generate model feature columns.
def getTmpFilename(self, tmp_dir="/tmp",prefix='tmp',suffix='.fasta',\ include_class_id=False,result_constructor=FilePath): return super(Pplacer,self).getTmpFilename(tmp_dir=tmp_dir, prefix=prefix, suffix=suffix, include_class_id=include_class_id, result_constructor=result_constructor)
Define Tmp filename to contain .fasta suffix, since pplacer requires the suffix to be .fasta
def get_device_by_id(self, device_id): found_device = None for device in self.get_devices(): if device.device_id == device_id: found_device = device break if found_device is None: logger.debug('Did not find device with {}'.format(device_id)) return found_device
Search the list of connected devices by ID. device_id param is the integer ID of the device
def env(self): from copy import copy env = copy(self.doc.env) assert env is not None, 'Got a null execution context' env.update(self._envvar_env) env.update(self.all_props) return env
The execution context for rowprocessors and row-generating notebooks and functions.
def _get(self, url, query=None): if query is None: query = {} response = retry_request(self)(self._http_get)(url, query=query) if self.raw_mode: return response if response.status_code != 200: error = get_error(response) if self.raise_errors: raise error return error localized = query.get('locale', '') == '*' return ResourceBuilder( self.default_locale, localized, response.json(), max_depth=self.max_include_resolution_depth, reuse_entries=self.reuse_entries ).build()
Wrapper for the HTTP Request, Rate Limit Backoff is handled here, Responses are Processed with ResourceBuilder.
def warning (self, msg, pos=None): self.log(msg, 'warning: ' + self.location(pos))
Logs a warning message pertaining to the given SeqAtom.