code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _destroy(cls, cdata): pp = ffi.new('{} **'.format(cls.LEPTONICA_TYPENAME), cdata) cls.cdata_destroy(pp)
Destroy some cdata
def augmented_dickey_fuller(x, param): res = None try: res = adfuller(x) except LinAlgError: res = np.NaN, np.NaN, np.NaN except ValueError: res = np.NaN, np.NaN, np.NaN except MissingDataError: res = np.NaN, np.NaN, np.NaN return [('attr_"{}"'.format(config["attr"]), res[0] if config["attr"] == "teststat" else res[1] if config["attr"] == "pvalue" else res[2] if config["attr"] == "usedlag" else np.NaN) for config in param]
The Augmented Dickey-Fuller test is a hypothesis test which checks whether a unit root is present in a time series sample. This feature calculator returns the value of the respective test statistic. See the statsmodels implementation for references and more details. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"attr": x} with x str, either "teststat", "pvalue" or "usedlag" :type param: list :return: the value of this feature :return type: float
def move(self, u_function): if self.mesh: self.u = u_function delta = [u_function(p) for p in self.mesh.coordinates()] movedpts = self.mesh.coordinates() + delta self.polydata(False).GetPoints().SetData(numpy_to_vtk(movedpts)) self.poly.GetPoints().Modified() self.u_values = delta else: colors.printc("Warning: calling move() but actor.mesh is", self.mesh, c=3) return self
Move a mesh by using an external function which prescribes the displacement at any point in space. Useful for manipulating ``dolfin`` meshes.
def render_profile_data(self, as_parsed): try: return deep_map(self._render_profile_data, as_parsed) except RecursionException: raise DbtProfileError( 'Cycle detected: Profile input has a reference to itself', project=as_parsed )
Render the chosen profile entry, as it was parsed.
def add_cell_footer(self): logging.info('Adding footer cell') for cell in self.nb['cells']: if cell.cell_type == 'markdown': if 'pynb_footer_tag' in cell.source: logging.debug('Footer cell already present') return m = self.add_cell_markdown( m.format(exec_time=self.exec_time, exec_begin=self.exec_begin_dt, class_name=self.__class__.__name__, argv=str(sys.argv), cells_name=self.cells_name))
Add footer cell
def _getDecoratorsName(node): decorators = [] if not node.decorators: return decorators for decorator in node.decorators.nodes: decorators.append(decorator.as_string()) return decorators
Return a list with names of decorators attached to this node. @param node: current node of pylint
def complex_randn(*args): return np.random.randn(*args) + 1j*np.random.randn(*args)
Return a complex array of samples drawn from a standard normal distribution. Parameters ---------- d0, d1, ..., dn : int Dimensions of the random array Returns ------- a : ndarray Random array of shape (d0, d1, ..., dn)
def add_callback(obj, callback, args=()): callbacks = obj._callbacks node = Node(callback, args) if callbacks is None: obj._callbacks = node return node if not isinstance(callbacks, dllist): obj._callbacks = dllist() obj._callbacks.insert(callbacks) callbacks = obj._callbacks callbacks.insert(node) return node
Add a callback to an object.
def register_token_getter(self, provider): app = oauth.remote_apps[provider] decorator = getattr(app, 'tokengetter') def getter(token=None): return self.token_getter(provider, token) decorator(getter)
Register callback to retrieve token from session
def get_property(self, index, doctype, name): return self.indices[index][doctype].properties[name]
Returns a property of a given type :return a mapped property
def user_entry(entry_int, num_inst, command): valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry)
Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user.
def uv_to_color(uv, image): if image is None or uv is None: return None uv = np.asanyarray(uv, dtype=np.float64) x = (uv[:, 0] * (image.width - 1)) y = ((1 - uv[:, 1]) * (image.height - 1)) x = x.round().astype(np.int64) % image.width y = y.round().astype(np.int64) % image.height colors = np.asanyarray(image.convert('RGBA'))[y, x] assert colors.ndim == 2 and colors.shape[1] == 4 return colors
Get the color in a texture image. Parameters ------------- uv : (n, 2) float UV coordinates on texture image image : PIL.Image Texture image Returns ---------- colors : (n, 4) float RGBA color at each of the UV coordinates
def delete_objective_bank(self, objective_bank_id=None): from dlkit.abstract_osid.id.primitives import Id as ABCId if objective_bank_id is None: raise NullArgument() if not isinstance(objective_bank_id, ABCId): raise InvalidArgument('argument type is not an osid Id') try: objective_bank = ObjectiveBankLookupSession(proxy=self._proxy, runtime=self._runtime).get_objective_bank(objective_bank_id) except Exception: raise url_path = construct_url('objective_banks', bank_id=objective_bank_id) result = self._delete_request(url_path) return objects.ObjectiveBank(result)
Deletes an ObjectiveBank. arg: objectiveBankId (osid.id.Id): the Id of the ObjectiveBank to remove raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented.
def compile_regex_from_str(self, ft_str): sequence = [] for m in re.finditer(r'\[([^]]+)\]', ft_str): ft_mask = fts(m.group(1)) segs = self.all_segs_matching_fts(ft_mask) sub_pat = '({})'.format('|'.join(segs)) sequence.append(sub_pat) pattern = ''.join(sequence) regex = re.compile(pattern) return regex
Given a string describing features masks for a sequence of segments, return a regex matching the corresponding strings. Args: ft_str (str): feature masks, each enclosed in square brackets, in which the features are delimited by any standard delimiter. Returns: Pattern: regular expression pattern equivalent to `ft_str`
def itemgetters(*args): f = itemgetter(*args) def inner(l): return [f(x) for x in l] return inner
Get a handful of items from an iterable. This is just map(itemgetter(...), iterable) with a list comprehension.
def activate(self, user): org_user = self.organization.add_user(user, **self.activation_kwargs()) self.invitee = user self.save() return org_user
Updates the `invitee` value and saves the instance Provided as a way of extending the behavior. Args: user: the newly created user Returns: the linking organization user
def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id): feed_item_target_service = client.GetService( 'FeedItemTargetService', 'v201809') ad_group_target = { 'xsi_type': 'FeedItemAdGroupTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'adGroupId': adgroup_id } operation = {'operator': 'ADD', 'operand': ad_group_target} response = feed_item_target_service.mutate([operation]) new_ad_group_target = response['value'][0] print('Feed item target for feed ID %s and feed item ID %s was created to ' 'restrict serving to ad group ID %s' % (new_ad_group_target['feedId'], new_ad_group_target['feedItemId'], new_ad_group_target['adGroupId']))
Restricts the feed item to an ad group. Args: client: an AdWordsClient instance. feed_item: The feed item. adgroup_id: The ad group ID.
def build_transform(self): cfg = self.cfg if cfg.INPUT.TO_BGR255: to_bgr_transform = T.Lambda(lambda x: x * 255) else: to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]]) normalize_transform = T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD ) transform = T.Compose( [ T.ToPILImage(), T.Resize(self.min_image_size), T.ToTensor(), to_bgr_transform, normalize_transform, ] ) return transform
Creates a basic transformation that was used to train the models
def log_print_request(method, url, query_params=None, headers=None, body=None): log_msg = '\n>>>>>>>>>>>>>>>>>>>>> Request >>>>>>>>>>>>>>>>>>> \n' log_msg += '\t> Method: %s\n' % method log_msg += '\t> Url: %s\n' % url if query_params is not None: log_msg += '\t> Query params: {}\n'.format(str(query_params)) if headers is not None: log_msg += '\t> Headers:\n{}\n'.format(json.dumps(dict(headers), sort_keys=True, indent=4)) if body is not None: try: log_msg += '\t> Payload sent:\n{}\n'.format(_get_pretty_body(headers, body)) except: log_msg += "\t> Payload could't be formatted" logger.debug(log_msg)
Log an HTTP request data in a user-friendly representation. :param method: HTTP method :param url: URL :param query_params: Query parameters in the URL :param headers: Headers (dict) :param body: Body (raw body, string) :return: None
def mkpassword(length=16, chars=None, punctuation=None): if chars is None: chars = string.ascii_letters + string.digits data = [random.choice(chars) for _ in range(length)] if punctuation: data = data[:-punctuation] for _ in range(punctuation): data.append(random.choice(PUNCTUATION)) random.shuffle(data) return ''.join(data)
Generates a random ascii string - useful to generate authinfos :param length: string wanted length :type length: ``int`` :param chars: character population, defaults to alphabet (lower & upper) + numbers :type chars: ``str``, ``list``, ``set`` (sequence) :param punctuation: number of punctuation signs to include in string :type punctuation: ``int`` :rtype: ``str``
def stop_global_driver(force=False): address, pid = _read_driver() if address is None: return if not force: try: Client(address=address) except ConnectionError: if pid_exists(pid): raise try: os.kill(pid, signal.SIGTERM) except OSError as exc: ignore = (errno.ESRCH, errno.EPERM) if force else (errno.ESRCH,) if exc.errno not in ignore: raise try: os.remove(os.path.join(properties.config_dir, 'driver')) except OSError: pass
Stops the global driver if running. No-op if no global driver is running. Parameters ---------- force : bool, optional By default skein will check that the process associated with the driver PID is actually a skein driver. Setting ``force`` to ``True`` will kill the process in all cases.
def checkIfRemoteIsNewer(self, localfile, remote_size, remote_modify): is_remote_newer = False status = os.stat(localfile) LOG.info( "\nLocal file size: %i" "\nLocal Timestamp: %s", status[ST_SIZE], datetime.fromtimestamp(status.st_mtime)) remote_dt = Bgee._convert_ftp_time_to_iso(remote_modify) if remote_dt != datetime.fromtimestamp(status.st_mtime) or \ status[ST_SIZE] != int(remote_size): is_remote_newer = True LOG.info( "Object on server is has different size %i and/or date %s", remote_size, remote_dt) return is_remote_newer
Overrides checkIfRemoteIsNewer in Source class :param localfile: str file path :param remote_size: str bytes :param remote_modify: str last modify date in the form 20160705042714 :return: boolean True if remote file is newer else False
def _create_inbound_thread(self): inbound_thread = threading.Thread(target=self._process_incoming_data, name=__name__) inbound_thread.daemon = True inbound_thread.start() return inbound_thread
Internal Thread that handles all incoming traffic. :rtype: threading.Thread
def load(self, format=None, *, kwargs={}): return load(self, format=format, kwargs=kwargs)
deserialize object from the file. auto detect format by file extension name if `format` is None. for example, `.json` will detect as `json`. * raise `FormatNotFoundError` on unknown format. * raise `SerializeError` on any serialize exceptions.
def _get_dependencies_from_json(ireq, sources): if os.environ.get("PASSA_IGNORE_JSON_API"): return if ireq.extras: return try: version = get_pinned_version(ireq) except ValueError: return url_prefixes = [ proc_url[:-7] for proc_url in ( raw_url.rstrip("/") for raw_url in (source.get("url", "") for source in sources) ) if proc_url.endswith("/simple") ] session = requests.session() for prefix in url_prefixes: url = "{prefix}/pypi/{name}/{version}/json".format( prefix=prefix, name=packaging.utils.canonicalize_name(ireq.name), version=version, ) try: dependencies = _get_dependencies_from_json_url(url, session) if dependencies is not None: return dependencies except Exception as e: print("unable to read dependencies via {0} ({1})".format(url, e)) session.close() return
Retrieves dependencies for the install requirement from the JSON API. :param ireq: A single InstallRequirement :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None
def waveform_to_examples(data, sample_rate): import resampy if len(data.shape) > 1: data = np.mean(data, axis=1) if sample_rate != vggish_params.SAMPLE_RATE: data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE) log_mel = mel_features.log_mel_spectrogram( data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ) features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS example_window_length = int(round( vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)) example_hop_length = int(round( vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)) log_mel_examples = mel_features.frame( log_mel, window_length=example_window_length, hop_length=example_hop_length) return log_mel_examples
Converts audio waveform into an array of examples for VGGish. Args: data: np.array of either one dimension (mono) or two dimensions (multi-channel, with the outer dimension representing channels). Each sample is generally expected to lie in the range [-1.0, +1.0], although this is not required. sample_rate: Sample rate of data. Returns: 3-D np.array of shape [num_examples, num_frames, num_bands] which represents a sequence of examples, each of which contains a patch of log mel spectrogram, covering num_frames frames of audio and num_bands mel frequency bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
def guessoffset(args): p = OptionParser(guessoffset.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastqfile, = args ai = iter_fastq(fastqfile) rec = next(ai) offset = 64 while rec: quality = rec.quality lowcounts = len([x for x in quality if x < 59]) highcounts = len([x for x in quality if x > 74]) diff = highcounts - lowcounts if diff > 10: break elif diff < -10: offset = 33 break rec = next(ai) if offset == 33: print("Sanger encoding (offset=33)", file=sys.stderr) elif offset == 64: print("Illumina encoding (offset=64)", file=sys.stderr) return offset
%prog guessoffset fastqfile Guess the quality offset of the fastqfile, whether 33 or 64. See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format> SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS............................... ..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII .................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL............................... !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh | | | | | 33 59 64 73 104 S - Sanger Phred+33, raw reads typically (0, 40) X - Solexa Solexa+64, raw reads typically (-5, 40) I - Illumina 1.3+ Phred+64, raw reads typically (0, 40) J - Illumina 1.5+ Phred+64, raw reads typically (3, 40) L - Illumina 1.8+ Phred+33, raw reads typically (0, 40) with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
def _expect_method(self, command): child = pexpect.spawn(self._ipmitool_path, self.args + command) i = child.expect([pexpect.TIMEOUT, 'Password: '], timeout=10) if i == 0: child.terminate() self.error = 'ipmitool command timed out' self.status = 1 else: child.sendline(self.password) i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=10) if i == 0: child.terminate() self.error = 'ipmitool command timed out' self.status = 1 else: if child.exitstatus: self.error = child.before else: self.output = child.before self.status = child.exitstatus child.close()
Use the expect module to execute ipmitool commands and set status
def sel_list_pres(ds_sfc_x): p_min, p_max = ds_sfc_x.sp.min().values, ds_sfc_x.sp.max().values list_pres_level = [ '1', '2', '3', '5', '7', '10', '20', '30', '50', '70', '100', '125', '150', '175', '200', '225', '250', '300', '350', '400', '450', '500', '550', '600', '650', '700', '750', '775', '800', '825', '850', '875', '900', '925', '950', '975', '1000', ] ser_pres_level = pd.Series(list_pres_level).map(int)*100 pos_lev_max, pos_lev_min = ( ser_pres_level[ser_pres_level > p_max].idxmin(), ser_pres_level[ser_pres_level < p_min].idxmax() ) list_pres_sel = ser_pres_level.loc[pos_lev_min:pos_lev_max]/100 list_pres_sel = list_pres_sel.map(int).map(str).to_list() return list_pres_sel
select proper levels for model level data download
def _read_data_type_rpl(self, length): _cmpr = self._read_binary(1) _padr = self._read_binary(1) _resv = self._read_fileng(2) _inti = int(_cmpr[:4], base=2) _inte = int(_cmpr[4:], base=2) _plen = int(_padr[:4], base=2) _ilen = 16 - _inti _elen = 16 - _inte _addr = list() for _ in (((length - 4) - _elen - _plen) // _ilen): _addr.append(ipaddress.ip_address(self._read_fileng(_ilen))) _addr.append(ipaddress.ip_address(self._read_fileng(_elen))) _pads = self._read_fileng(_plen) data = dict( cmpri=_inti, cmpre=_inte, pad=_plen, ip=tuple(_addr), ) return data
Read IPv6-Route RPL Source data. Structure of IPv6-Route RPL Source data [RFC 6554]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Routing Type | Segments Left | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | CmprI | CmprE | Pad | Reserved | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . Addresses[1..n] . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 route.cmpri CmprI 4 36 route.cpmre CmprE 5 40 route.pad Pad Size 5 44 - Reserved 8 64 route.ip Addresses
def get_cursor_left_position(self, count=1): if count < 0: return self.get_cursor_right_position(-count) return - min(self.cursor_position_col, count)
Relative position for cursor left.
def make_annotation(self): annotation = dict() for item in dir(self): if len(item) > 0 and item[0] != '_' and \ not inspect.ismethod(getattr(self, item)): annotation[item] = getattr(self, item) annotation['action_mentions'] = list() for action_mention in self.action_mentions: annotation_mention = action_mention.make_annotation() annotation['action_mentions'].append(annotation_mention) return annotation
Returns a dictionary with all properties of the action and each of its action mentions.
def check_url(url): request = urllib2.Request(url) try: response = urlopen(request) return True, response.code except urllib2.HTTPError as e: return False, e.code
Check if resource at URL is fetchable. (by trying to fetch it and checking for 200 status. Args: url (str): Url to check. Returns: Returns a tuple of {True/False, response code}
def _valid_folder(self, base, name): valid = True fullpath = os.path.join(base, name) if ( not self.recursive or ( self.folder_exclude_check is not None and not self.compare_directory(fullpath[self._base_len:] if self.dir_pathname else name) ) ): valid = False if valid and (not self.show_hidden and util.is_hidden(fullpath)): valid = False return self.on_validate_directory(base, name) if valid else valid
Return whether a folder can be searched.
def intervals_to_boundaries(intervals, q=5): return np.unique(np.ravel(np.round(intervals, decimals=q)))
Convert interval times into boundaries. Parameters ---------- intervals : np.ndarray, shape=(n_events, 2) Array of interval start and end-times q : int Number of decimals to round to. (Default value = 5) Returns ------- boundaries : np.ndarray Interval boundary times, including the end of the final interval
def hookable(cls): assert isinstance(cls, type) hook_definitions = [] if not issubclass(cls, Hookable): for k, v in list(cls.__dict__.items()): if isinstance(v, (ClassHook, InstanceHook)): delattr(cls, k) if v.name is None: v.name = k hook_definitions.append((k, v)) hookable_cls = type(cls.__name__, (cls, Hookable), {}) for k, v in hook_definitions: setattr(hookable_cls, k, HookDescriptor(defining_hook=v, defining_class=hookable_cls)) return hookable_cls
Initialise hookery in a class that declares hooks by decorating it with this decorator. This replaces the class with another one which has the same name, but also inherits Hookable which has HookableMeta set as metaclass so that sub-classes of cls will have hook descriptors initialised properly. When you say: @hookable class My: before = Hook() then @hookable changes My.before to be a HookDescriptor which is then changed into Hook if anyone accesses it. There is no need to decorate sub-classes of cls with @hookable.
def DomainTokensCreate(self, domain_id, amount): if self.__SenseApiCall__('/domains/{0}/tokens.json'.format(domain_id), 'POST', parameters = {"amount":amount}): return True else: self.__error__ = "api call unsuccessful" return False
This method creates tokens that can be used by users who want to join the domain. Tokens are automatically deleted after usage. Only domain managers can create tokens.
def _configure_root_logger(self): root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) if self.args.verbose: handler = logging.StreamHandler(sys.stdout) else: handler = logging.handlers.RotatingFileHandler( common.LOG_FILE, maxBytes=common.MAX_LOG_SIZE, backupCount=common.MAX_LOG_COUNT ) handler.setLevel(logging.INFO) handler.setFormatter(logging.Formatter(common.LOG_FORMAT)) root_logger.addHandler(handler)
Initialise logging system
def _get_servers_deque(servers, database): key = (servers, database) if key not in _servers_deques: _servers_deques[key] = deque(servers) return _servers_deques[key]
Returns deque of servers for given tuple of servers and database name. This deque have active server at the begining, if first server is not accessible at the moment the deque will be rotated, second server will be moved to the first position, thirt to the second position etc, and previously first server will be moved to the last position. This allows to remember last successful server between calls to connect function.
def icetea_main(): from icetea_lib import IceteaManager manager = IceteaManager.IceteaManager() return_code = manager.run() sys.exit(return_code)
Main function for running Icetea. Calls sys.exit with the return code to exit. :return: Nothing.
def get_sections_2d_nts(self, sortby=None): sections_2d_nts = [] for section_name, hdrgos_actual in self.get_sections_2d(): hdrgo_nts = self.gosubdag.get_nts(hdrgos_actual, sortby=sortby) sections_2d_nts.append((section_name, hdrgo_nts)) return sections_2d_nts
Get high GO IDs that are actually used to group current set of GO IDs.
def _patch(self, uri, data): headers = self._get_headers() response = self.session.patch(uri, headers=headers, data=json.dumps(data)) if response.status_code == 204: return response else: logging.error(response.content) response.raise_for_status()
Simple PATCH operation for a given path. The body is expected to list operations to perform to update the data. Operations include: - add - remove - replace - move - copy - test [ { "op": "test", "path": "/a/b/c", "value": "foo" }, ]
def send_http_error(self, http_code, cim_error=None, cim_error_details=None, headers=None): self.send_response(http_code, http_client.responses.get(http_code, '')) self.send_header("CIMExport", "MethodResponse") if cim_error is not None: self.send_header("CIMError", cim_error) if cim_error_details is not None: self.send_header("CIMErrorDetails", cim_error_details) if headers is not None: for header, value in headers: self.send_header(header, value) self.end_headers() self.log('%s: HTTP status %s; CIMError: %s, CIMErrorDetails: %s', (self._get_log_prefix(), http_code, cim_error, cim_error_details), logging.WARNING)
Send an HTTP response back to the WBEM server that indicates an error at the HTTP level.
def _dump(self, tree): schema = [] if tree.tables: for table in tree.tables: desc = self.describe(table, refresh=True, require=True) schema.append(desc.schema) else: for table in self.describe_all(): schema.append(table.schema) return "\n\n".join(schema)
Run a DUMP statement
def create_partition(self, org_name, part_name, dci_id, vrf_prof, service_node_ip=None, desc=None): desc = desc or org_name res = self._create_or_update_partition(org_name, part_name, desc, dci_id=dci_id, service_node_ip=service_node_ip, vrf_prof=vrf_prof) if res and res.status_code in self._resp_ok: LOG.debug("Created %s partition in DCNM.", part_name) else: LOG.error("Failed to create %(part)s partition in DCNM." "Response: %(res)s", ({'part': part_name, 'res': res})) raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
Create partition on the DCNM. :param org_name: name of organization to be created :param part_name: name of partition to be created :param dci_id: DCI ID :vrf_prof: VRF profile for the partition :param service_node_ip: Specifies the Default route IP address. :param desc: string that describes organization
def fsdecode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None): if not isinstance(path, bytes): return path if not errors: use_strict = PY_LEGACY or os_name == 'nt' errors = 'strict' if use_strict else 'surrogateescape' return path.decode(fs_encoding, errors=errors)
Decode given path. :param path: path will be decoded if using bytes :type path: bytes or str :param os_name: operative system name, defaults to os.name :type os_name: str :param fs_encoding: current filesystem encoding, defaults to autodetected :type fs_encoding: str :return: decoded path :rtype: str
def _general_approximating_model(self, beta, T, Z, R, Q, h_approx): H = np.ones(self.data_length)*h_approx mu = np.zeros(self.data_length) return H, mu
Creates simplest kind of approximating Gaussian model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm h_approx : float Value to use for the H matrix Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants
def histpath(self): from os import path from fortpy import settings return path.join(settings.cache_directory, "history")
Returns the full path to the console history file.
def get_action_cache_key(name, argument): tokens = [str(name)] if argument: tokens.append(str(argument)) return '::'.join(tokens)
Get an action cache key string.
def remove_aliases(self_or_cls, aliases): for k,v in self_or_cls.aliases.items(): if v in aliases: self_or_cls.aliases.pop(k)
Remove a list of aliases.
def ChangeUserStatus(self, Status): if self.CurrentUserStatus.upper() == Status.upper(): return self._ChangeUserStatus_Event = threading.Event() self._ChangeUserStatus_Status = Status.upper() self.RegisterEventHandler('UserStatus', self._ChangeUserStatus_UserStatus) self.CurrentUserStatus = Status self._ChangeUserStatus_Event.wait() self.UnregisterEventHandler('UserStatus', self._ChangeUserStatus_UserStatus) del self._ChangeUserStatus_Event, self._ChangeUserStatus_Status
Changes the online status for the current user. :Parameters: Status : `enums`.cus* New online status for the user. :note: This function waits until the online status changes. Alternatively, use the `CurrentUserStatus` property to perform an immediate change of status.
def sync_streams(self): if self._sync_streams is None: self._sync_streams = SyncStreamList(self._version, service_sid=self._solution['sid'], ) return self._sync_streams
Access the sync_streams :returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamList :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamList
def prune(self): pruned = [] for c in self.children: c.prune() if c.isempty(False): pruned.append(c) for p in pruned: self.children.remove(p)
Prune the branch of empty nodes.
def energy_ratio_by_chunks(x, param): res_data = [] res_index = [] full_series_energy = np.sum(x ** 2) for parameter_combination in param: num_segments = parameter_combination["num_segments"] segment_focus = parameter_combination["segment_focus"] assert segment_focus < num_segments assert num_segments > 0 res_data.append(np.sum(np.array_split(x, num_segments)[segment_focus] ** 2.0)/full_series_energy) res_index.append("num_segments_{}__segment_focus_{}".format(num_segments, segment_focus)) return list(zip(res_index, res_data))
Calculates the sum of squares of chunk i out of N chunks expressed as a ratio with the sum of squares over the whole series. Takes as input parameters the number num_segments of segments to divide the series into and segment_focus which is the segment number (starting at zero) to return a feature on. If the length of the time series is not a multiple of the number of segments, the remaining data points are distributed on the bins starting from the first. For example, if your time series consists of 8 entries, the first two bins will contain 3 and the last two values, e.g. `[ 0., 1., 2.], [ 3., 4., 5.]` and `[ 6., 7.]`. Note that the answer for `num_segments = 1` is a trivial "1" but we handle this scenario in case somebody calls it. Sum of the ratios should be 1.0. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param param: contains dictionaries {"num_segments": N, "segment_focus": i} with N, i both ints :return: the feature values :return type: list of tuples (index, data)
def _handle_heading(self, token): level = token.level self._push() while self._tokens: token = self._tokens.pop() if isinstance(token, tokens.HeadingEnd): title = self._pop() return Heading(title, level) else: self._write(self._handle_token(token)) raise ParserError("_handle_heading() missed a close token")
Handle a case where a heading is at the head of the tokens.
def quantity(*args): if len(args) == 1: if isinstance(args[0], str): return Quantity(from_string(args[0])) elif isinstance(args[0], dict): if hasattr(args[0]["value"], "__len__"): return QuantVec(from_dict_v(args[0])) else: return Quantity(from_dict(args[0])) elif isinstance(args[0], Quantity) or isinstance(args[0], QuantVec): return args[0] else: raise TypeError("Invalid argument type for") else: if hasattr(args[0], "__len__"): return QuantVec(*args) else: return Quantity(*args)
Create a quantity. This can be from a scalar or vector. Example:: q1 = quantity(1.0, "km/s") q2 = quantity("1km/s") q1 = quantity([1.0,2.0], "km/s")
def connect_container_to_network(self, container, net_id, ipv4_address=None, ipv6_address=None, aliases=None, links=None, link_local_ips=None): data = { "Container": container, "EndpointConfig": self.create_endpoint_config( aliases=aliases, links=links, ipv4_address=ipv4_address, ipv6_address=ipv6_address, link_local_ips=link_local_ips ), } url = self._url("/networks/{0}/connect", net_id) res = self._post_json(url, data=data) self._raise_for_status(res)
Connect a container to a network. Args: container (str): container-id/name to be connected to the network net_id (str): network id aliases (:py:class:`list`): A list of aliases for this endpoint. Names in that list can be used within the network to reach the container. Defaults to ``None``. links (:py:class:`list`): A list of links for this endpoint. Containers declared in this list will be linked to this container. Defaults to ``None``. ipv4_address (str): The IP address of this container on the network, using the IPv4 protocol. Defaults to ``None``. ipv6_address (str): The IP address of this container on the network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses.
def _Moran_BV_Matrix_array(variables, w, permutations=0, varnames=None): if varnames is None: varnames = ['x{}'.format(i) for i in range(k)] k = len(variables) rk = list(range(0, k - 1)) results = {} for i in rk: for j in range(i + 1, k): y1 = variables[i] y2 = variables[j] results[i, j] = Moran_BV(y1, y2, w, permutations=permutations) results[j, i] = Moran_BV(y2, y1, w, permutations=permutations) results[i, j].varnames = {'x': varnames[i], 'y': varnames[j]} results[j, i].varnames = {'x': varnames[j], 'y': varnames[i]} return results
Base calculation for MORAN_BV_Matrix
def isCode(self, block, column): dataObject = block.userData() data = dataObject.data if dataObject is not None else None return self._syntax.isCode(data, column)
Check if character at column is a a code
def safe_round(self, x): val = x[self.col_name] if np.isposinf(val): val = sys.maxsize elif np.isneginf(val): val = -sys.maxsize if np.isnan(val): val = self.default_val if self.subtype == 'integer': return int(round(val)) return val
Returns a converter that takes in a value and turns it into an integer, if necessary. Args: col_name(str): Name of the column. subtype(str): Numeric subtype of the values. Returns: function
def load_irac_psf(channel, show_progress=False): channel = int(channel) if channel < 1 or channel > 4: raise ValueError('channel must be 1, 2, 3, or 4') fn = 'irac_ch{0}_flight.fits'.format(channel) path = get_path(fn, location='remote', show_progress=show_progress) hdu = fits.open(path)[0] return hdu
Load a Spitzer IRAC PSF image. Parameters ---------- channel : int (1-4) The IRAC channel number: * Channel 1: 3.6 microns * Channel 2: 4.5 microns * Channel 3: 5.8 microns * Channel 4: 8.0 microns show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- hdu : `~astropy.io.fits.ImageHDU` The IRAC PSF in a FITS image HDU. Examples -------- .. plot:: :include-source: from astropy.visualization import LogStretch, ImageNormalize from photutils.datasets import load_irac_psf hdu1 = load_irac_psf(1) hdu2 = load_irac_psf(2) hdu3 = load_irac_psf(3) hdu4 = load_irac_psf(4) norm = ImageNormalize(hdu1.data, stretch=LogStretch()) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) ax1.imshow(hdu1.data, origin='lower', interpolation='nearest', norm=norm) ax1.set_title('IRAC Ch1 PSF') ax2.imshow(hdu2.data, origin='lower', interpolation='nearest', norm=norm) ax2.set_title('IRAC Ch2 PSF') ax3.imshow(hdu3.data, origin='lower', interpolation='nearest', norm=norm) ax3.set_title('IRAC Ch3 PSF') ax4.imshow(hdu4.data, origin='lower', interpolation='nearest', norm=norm) ax4.set_title('IRAC Ch4 PSF') plt.tight_layout() plt.show()
def release(self, forceRelease=False): if not self.held: if forceRelease is False: return False else: self.held = True if not os.path.exists(self.lockPath): self.held = False self.acquiredAt = None return True if forceRelease is False: if self.maxLockAge and time.time() > self.acquiredAt + self.maxLockAge: self.held = False self.acquiredAt = None return False self.acquiredAt = None try: os.rmdir(self.lockPath) self.held = False return True except: self.held = False return False
release - Release the lock. @param forceRelease <bool> default False - If True, will release the lock even if we don't hold it. @return - True if lock is released, otherwise False
def derive_single_object_url_pattern(slug_url_kwarg, path, action): if slug_url_kwarg: return r'^%s/%s/(?P<%s>[^/]+)/$' % (path, action, slug_url_kwarg) else: return r'^%s/%s/(?P<pk>\d+)/$' % (path, action)
Utility function called by class methods for single object views
def python_job(self, function, parameters=None): if not callable(function): raise utils.StimelaCabRuntimeError('Object given as function is not callable') if self.name is None: self.name = function.__name__ self.job = { 'function' : function, 'parameters': parameters, } return 0
Run python function function : Python callable to execute name : Name of function (if not given, will used function.__name__) parameters : Parameters to parse to function label : Function label; for logging purposes
def list_builds(self, field_selector=None, koji_task_id=None, running=None, labels=None): if running: running_fs = ",".join(["status!={status}".format(status=status.capitalize()) for status in BUILD_FINISHED_STATES]) if not field_selector: field_selector = running_fs else: field_selector = ','.join([field_selector, running_fs]) response = self.os.list_builds(field_selector=field_selector, koji_task_id=koji_task_id, labels=labels) serialized_response = response.json() build_list = [] for build in serialized_response["items"]: build_list.append(BuildResponse(build, self)) return build_list
List builds with matching fields :param field_selector: str, field selector for Builds :param koji_task_id: str, only list builds for Koji Task ID :return: BuildResponse list
def unicode_to_hex(unicode_string): if unicode_string is None: return None acc = [] for c in unicode_string: s = hex(ord(c)).replace("0x", "").upper() acc.append("U+" + ("0" * (4 - len(s))) + s) return u" ".join(acc)
Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: the Unicode string to convert :rtype: (Unicode) str
def edit_securitygroup(self, group_id, name=None, description=None): successful = False obj = {} if name: obj['name'] = name if description: obj['description'] = description if obj: successful = self.security_group.editObject(obj, id=group_id) return successful
Edit security group details. :param int group_id: The ID of the security group :param string name: The name of the security group :param string description: The description of the security group
def _charlist(self, data) -> list: char_string = nosub = self.sas.nosub self.sas.nosub = False ll = self.sas.submit(char_string.format(data.libref, data.table + data._dsopts())) self.sas.nosub = nosub l2 = ll['LOG'].partition("VARLIST=\n") l2 = l2[2].rpartition("VARLISTend=\n") charlist1 = l2[0].split("\n") del charlist1[len(charlist1) - 1] charlist1 = [x.casefold() for x in charlist1] return charlist1
Private method to return the variables in a SAS Data set that are of type char :param data: SAS Data object to process :return: list of character variables :rtype: list
def delete(self): if not self.id: return if not self._loaded: self.reload() return self.http_delete(self.id, etag=self.etag)
Deletes the object. Returns without doing anything if the object is new.
def generate(env): global PDFLaTeXAction if PDFLaTeXAction is None: PDFLaTeXAction = SCons.Action.Action('$PDFLATEXCOM', '$PDFLATEXCOMSTR') global PDFLaTeXAuxAction if PDFLaTeXAuxAction is None: PDFLaTeXAuxAction = SCons.Action.Action(PDFLaTeXAuxFunction, strfunction=SCons.Tool.tex.TeXLaTeXStrFunction) env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes) from . import pdf pdf.generate(env) bld = env['BUILDERS']['PDF'] bld.add_action('.ltx', PDFLaTeXAuxAction) bld.add_action('.latex', PDFLaTeXAuxAction) bld.add_emitter('.ltx', SCons.Tool.tex.tex_pdf_emitter) bld.add_emitter('.latex', SCons.Tool.tex.tex_pdf_emitter) SCons.Tool.tex.generate_common(env)
Add Builders and construction variables for pdflatex to an Environment.
def _set(self): self.__event.set() if self._complete_func: self.__run_completion_func(self._complete_func, self.id_)
Called internally by Client to indicate this request has finished
def from_config(cls, cfg, **kwargs): cfg = dict(cfg, **kwargs) pythonpath = cfg.get('pythonpath', []) if 'here' in cfg: pythonpath.append(cfg['here']) for path in pythonpath: sys.path.append(os.path.expanduser(path)) prog = cls.server and 'irc3d' or 'irc3' if cfg.get('debug'): cls.venusian_categories.append(prog + '.debug') if cfg.get('interactive'): import irc3.testing context = getattr(irc3.testing, cls.__name__)(**cfg) else: context = cls(**cfg) if cfg.get('raw'): context.include('irc3.plugins.log', venusian_categories=[prog + '.debug']) return context
return an instance configured with the ``cfg`` dict
def dloglikarray(self): assert self.dparamscurrent, "dloglikarray requires paramscurrent == True" nparams = len(self._index_to_param) dloglikarray = scipy.ndarray(shape=(nparams,), dtype='float') for (i, param) in self._index_to_param.items(): if isinstance(param, str): dloglikarray[i] = self.dloglik[param] elif isinstance(param, tuple): dloglikarray[i] = self.dloglik[param[0]][param[1]] return dloglikarray
Derivative of `loglik` with respect to `paramsarray`.
def require_editable(f): def wrapper(self, *args, **kwargs): if not self._edit: raise RegistryKeyNotEditable("The key is not set as editable.") return f(self, *args, **kwargs) return wrapper
Makes sure the registry key is editable before trying to edit it.
def lookup_hlr(self, phonenumber, params=None): if params is None: params = {} return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'GET', params))
Retrieve the information of a specific HLR lookup.
def IsErrorSuppressedByNolint(category, linenum): return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment.
def markdown(iterable, renderer=HTMLRenderer): with renderer() as renderer: return renderer.render(Document(iterable))
Output HTML with default settings. Enables inline and block-level HTML tags.
def connect(self): if not getattr(self._local, 'conn', None): try: server = self._servers.get() logger.debug('Connecting to %s', server) self._local.conn = ClientTransport(server, self._framed_transport, self._timeout, self._recycle) except (Thrift.TException, socket.timeout, socket.error): logger.warning('Connection to %s failed.', server) self._servers.mark_dead(server) return self.connect() return self._local.conn
Create new connection unless we already have one.
def _EnforceProcessMemoryLimit(self, memory_limit): if resource: if memory_limit is None: memory_limit = 4 * 1024 * 1024 * 1024 elif memory_limit == 0: memory_limit = resource.RLIM_INFINITY resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))
Enforces a process memory limit. Args: memory_limit (int): maximum number of bytes the process is allowed to allocate, where 0 represents no limit and None a default of 4 GiB.
def _check_array(self, X, **kwargs): if isinstance(X, np.ndarray): X = da.from_array(X, X.shape) X = check_array(X, **kwargs) return X
Validate the data arguments X and y. By default, NumPy arrays are converted to 1-block dask arrays. Parameters ---------- X, y : array-like
def astype(self, dtype, copy=True): if not copy and np.dtype(dtype) == self.dtype: return self res = zeros(shape=self.shape, ctx=self.context, dtype=dtype, stype=self.stype) self.copyto(res) return res
Return a copy of the array after casting to a specified type. Parameters ---------- dtype : numpy.dtype or str The type of the returned array. copy : bool Default `True`. By default, astype always returns a newly allocated ndarray on the same context. If this is set to `False`, and the dtype requested is the same as the ndarray's dtype, the ndarray is returned instead of a copy. Examples -------- >>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32') >>> y = x.astype('int32') >>> y.dtype <type 'numpy.int32'>
def construct_asset_path(self, asset_path, css_path, output_filename, variant=None): public_path = self.absolute_path(asset_path, os.path.dirname(css_path).replace('\\', '/')) if self.embeddable(public_path, variant): return "__EMBED__%s" % public_path if not posixpath.isabs(asset_path): asset_path = self.relative_path(public_path, output_filename) return asset_path
Return a rewritten asset URL for a stylesheet
def URLRabbitmqBroker(url, *, middleware=None): warnings.warn( "Use RabbitmqBroker with the 'url' parameter instead of URLRabbitmqBroker.", DeprecationWarning, stacklevel=2, ) return RabbitmqBroker(url=url, middleware=middleware)
Alias for the RabbitMQ broker that takes a connection URL as a positional argument. Parameters: url(str): A connection string. middleware(list[Middleware]): The middleware to add to this broker.
def _format_description(ctx): help_string = ctx.command.help or ctx.command.short_help if not help_string: return bar_enabled = False for line in statemachine.string2lines( help_string, tab_width=4, convert_whitespace=True): if line == '\b': bar_enabled = True continue if line == '': bar_enabled = False line = '| ' + line if bar_enabled else line yield line yield ''
Format the description for a given `click.Command`. We parse this as reStructuredText, allowing users to embed rich information in their help messages if they so choose.
def latrec(radius, longitude, latitude): radius = ctypes.c_double(radius) longitude = ctypes.c_double(longitude) latitude = ctypes.c_double(latitude) rectan = stypes.emptyDoubleVector(3) libspice.latrec_c(radius, longitude, latitude, rectan) return stypes.cVectorToPython(rectan)
Convert from latitudinal coordinates to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latrec_c.html :param radius: Distance of a point from the origin. :type radius: float :param longitude: Longitude of point in radians. :type longitude: float :param latitude: Latitude of point in radians. :type latitude: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats
def get_choice(prompt, choices): print() checker = [] for offset, choice in enumerate(choices): number = offset + 1 print("\t{}): '{}'\n".format(number, choice)) checker.append(str(number)) response = get_input(prompt, tuple(checker) + ('',)) if not response: print("Exiting...") exit() offset = int(response) - 1 selected = choices[offset] return selected
Asks for a single choice out of multiple items. Given those items, and a prompt to ask the user with
def _check_download_dir(link, download_dir, hashes): download_path = os.path.join(download_dir, link.filename) if os.path.exists(download_path): logger.info('File was already downloaded %s', download_path) if hashes: try: hashes.check_against_path(download_path) except HashMismatch: logger.warning( 'Previously-downloaded file %s has bad hash. ' 'Re-downloading.', download_path ) os.unlink(download_path) return None return download_path return None
Check download_dir for previously downloaded file with correct hash If a correct file is found return its path else None
def getOutputElementCount(self, name): if name in ["activeCells", "predictedCells", "predictedActiveCells", "winnerCells"]: return self.cellsPerColumn * self.columnCount else: raise Exception("Invalid output name specified: %s" % name)
Return the number of elements for the given output.
def inspect_node(self, node_id): url = self._url('/nodes/{0}', node_id) return self._result(self._get(url), True)
Retrieve low-level information about a swarm node Args: node_id (string): ID of the node to be inspected. Returns: A dictionary containing data about this node. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def __type_check_attributes(self, node: yaml.Node, mapping: CommentedMap, argspec: inspect.FullArgSpec) -> None: logger.debug('Checking for extraneous attributes') logger.debug('Constructor arguments: {}, mapping: {}'.format( argspec.args, list(mapping.keys()))) for key, value in mapping.items(): if not isinstance(key, str): raise RecognitionError(('{}{}YAtiML only supports strings' ' for mapping keys').format( node.start_mark, os.linesep)) if key not in argspec.args and 'yatiml_extra' not in argspec.args: raise RecognitionError( ('{}{}Found additional attributes' ' and {} does not support those').format( node.start_mark, os.linesep, self.class_.__name__)) if key in argspec.args and not self.__type_matches( value, argspec.annotations[key]): raise RecognitionError(('{}{}Expected attribute {} to be of' ' type {} but it is a(n) {}').format( node.start_mark, os.linesep, key, argspec.annotations[key], type(value)))
Ensure all attributes have a matching constructor argument. This checks that there is a constructor argument with a \ matching type for each existing attribute. If the class has a yatiml_extra attribute, then extra \ attributes are okay and no error will be raised if they exist. Args: node: The node we're processing mapping: The mapping with constructed subobjects constructor_attrs: The attributes of the constructor, \ including self and yatiml_extra, if applicable
def get_node(self, goid, goobj): return pydot.Node( self.get_node_text(goid, goobj), shape="box", style="rounded, filled", fillcolor=self.go2color.get(goid, "white"), color=self.objcolor.get_bordercolor(goid))
Return pydot node.
def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn): x = tf.nn.l2_normalize(x) for _ in range(num_steps): x = eig_one_step(x, learning_rate, vector_prod_fn) return x
Computes eigenvector which corresponds to minimum eigenvalue. Args: x: initial value of eigenvector. num_steps: number of optimization steps. learning_rate: learning rate. vector_prod_fn: function which takes x and returns product H*x. Returns: approximate value of eigenvector. This function finds approximate value of eigenvector of matrix H which corresponds to smallest (by absolute value) eigenvalue of H. It works by solving optimization problem x^{T}*H*x -> min.
def show_xticklabels_for_all(self, row_column_list=None): if row_column_list is None: for subplot in self.subplots: subplot.show_xticklabels() else: for row, column in row_column_list: self.show_xticklabels(row, column)
Show the x-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None
def walk(self, dirpath): if self.is_ssh(dirpath): self._check_ftp() remotepath = self._get_remote(dirpath) return self._sftp_walk(remotepath) else: return os.walk(dirpath)
Performs an os.walk on a local or SSH filepath.
def as_view(cls, *class_args, **class_kwargs): def view(*args, **kwargs): self = view.view_class(*class_args, **class_kwargs) return self.dispatch_request(*args, **kwargs) if cls.decorators: view.__module__ = cls.__module__ for decorator in cls.decorators: view = decorator(view) view.view_class = cls view.__doc__ = cls.__doc__ view.__module__ = cls.__module__ view.__name__ = cls.__name__ return view
Return view function for use with the routing system, that dispatches request to appropriate handler method.
def hierarchical_map_vals(func, node, max_depth=None, depth=0): if not hasattr(node, 'items'): return func(node) elif max_depth is not None and depth >= max_depth: return map_dict_vals(func, node) else: keyval_list = [(key, hierarchical_map_vals(func, val, max_depth, depth + 1)) for key, val in node.items()] if isinstance(node, OrderedDict): return OrderedDict(keyval_list) else: return dict(keyval_list)
node is a dict tree like structure with leaves of type list TODO: move to util_dict CommandLine: python -m utool.util_dict --exec-hierarchical_map_vals Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> item_list = [1, 2, 3, 4, 5, 6, 7, 8] >>> groupids_list = [[1, 2, 1, 2, 1, 2, 1, 2], [3, 2, 2, 2, 3, 1, 1, 1]] >>> tree = ut.hierarchical_group_items(item_list, groupids_list) >>> len_tree = ut.hierarchical_map_vals(len, tree) >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=1)) >>> print(result) len_tree = { 1: {1: 1, 2: 1, 3: 2}, 2: {1: 2, 2: 2}, } Example1: >>> # DISABLE_DOCTEST >>> # UNSTABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> depth = 4 >>> item_list = list(range(2 ** (depth + 1))) >>> num = len(item_list) // 2 >>> groupids_list = [] >>> total = 0 >>> for level in range(depth): ... num2 = len(item_list) // int((num * 2)) ... #nonflat_levelids = [([total + 2 * x + 1] * num + [total + 2 * x + 2] * num) for x in range(num2)] ... nonflat_levelids = [([1] * num + [2] * num) for x in range(num2)] ... levelids = ut.flatten(nonflat_levelids) ... groupids_list.append(levelids) ... total += num2 * 2 ... num //= 2 >>> print('groupids_list = %s' % (ut.repr4(groupids_list, nl=1),)) >>> print('depth = %r' % (len(groupids_list),)) >>> tree = ut.hierarchical_group_items(item_list, groupids_list) >>> print('tree = ' + ut.repr4(tree, nl=None)) >>> flat_tree_values = list(ut.iflatten_dict_values(tree)) >>> assert sorted(flat_tree_values) == sorted(item_list) >>> print('flat_tree_values = ' + str(flat_tree_values)) >>> #print('flat_tree_keys = ' + str(list(ut.iflatten_dict_keys(tree)))) >>> #print('iflatten_dict_items = ' + str(list(ut.iflatten_dict_items(tree)))) >>> len_tree = ut.hierarchical_map_vals(len, tree, max_depth=4) >>> result = ('len_tree = ' + ut.repr4(len_tree, nl=None)) >>> print(result)
def log(self, level, msg): self._check_session() level = level.upper() allowed_levels = ('INFO', 'WARN', 'ERROR', 'FATAL') if level not in allowed_levels: raise ValueError('level must be one of: ' + ', '.join(allowed_levels)) self._rest.post_request( 'log', None, {'log_level': level.upper(), 'message': msg})
Write a diagnostic message to a log file or to standard output. Arguments: level -- Severity level of entry. One of: INFO, WARN, ERROR, FATAL. msg -- Message to write to log.
def get_rmse(self, data_x=None, data_y=None): if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") rmse_y = self.bestfit_func(data_x) return np.sqrt(np.mean((rmse_y - data_y) ** 2))
Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max
def notify(self, n: int = 1) -> None: waiters = [] while n and self._waiters: waiter = self._waiters.popleft() if not waiter.done(): n -= 1 waiters.append(waiter) for waiter in waiters: future_set_result_unless_cancelled(waiter, True)
Wake ``n`` waiters.
def loading(self): if getattr(self, '_initialized', False): raise ValueError("Already loading") self._initialized = False yield self._initialized = True
Context manager for when you need to instantiate entities upon unpacking