code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def pole_error(ax, fit, **kwargs): ell = normal_errors(fit.axes, fit.covariance_matrix) lonlat = -N.array(ell) n = len(lonlat) codes = [Path.MOVETO] codes += [Path.LINETO]*(n-1) vertices = list(lonlat) plot_patch(ax, vertices, codes, **kwargs)
Plot the error to the pole to a plane on a `mplstereonet` axis object.
def rebuild( self ): self.markForRebuild(False) self._textData = [] if ( self.rebuildBlocked() ): return scene = self.scene() if ( not scene ): return if ( scene.currentMode() == scene.Mode.Month ): self.rebuildMonth() elif ( scene.currentMode() in (scene.Mode.Day, scene.Mode.Week) ): self.rebuildDay()
Rebuilds the current item in the scene.
def get_pid(self, name): pid_file = os.path.join(self.get_work_folder(name), "notebook.pid") return pid_file
Get PID file name for a named notebook.
def size_in_days(self): unit, instant, length = self if unit == DAY: return length if unit in [MONTH, YEAR]: last_day = self.start.offset(length, unit).offset(-1, DAY) return (last_day.date - self.start.date).days + 1 raise ValueError("Cannot calculate number of days in {0}".format(unit))
Return the size of the period in days. >>> period('month', '2012-2-29', 4).size_in_days 28 >>> period('year', '2012', 1).size_in_days 366
def _build_datasets(*args, **kwargs): datasets = OrderedDict() _add_arg_datasets(datasets, args) _add_kwarg_datasets(datasets, kwargs) return datasets
Build the datasets into a dict, where the keys are the name of the data set and the values are the data sets themselves. :param args: Tuple of unnamed data sets. :type args: `tuple` of varies :param kwargs: Dict of pre-named data sets. :type kwargs: `dict` of `unicode` to varies :return: The dataset dict. :rtype: `dict`
def _in_search_queryset(*, instance, index) -> bool: try: return instance.__class__.objects.in_search_queryset(instance.id, index=index) except Exception: logger.exception("Error checking object in_search_queryset.") return False
Wrapper around the instance manager method.
def read(self): p = os.path.join(self.path, self.name) try: with open(p) as f: json_text = f.read() except FileNotFoundError as e: raise JSONFileError(e) from e try: json.loads(json_text) except (json.JSONDecodeError, TypeError) as e: raise JSONFileError(f"{e} Got {p}") from e return json_text
Returns the file contents as validated JSON text.
def completer_tokenize(cls, value, min_length=3): tokens = list(itertools.chain(*[ [m for m in n.split("'") if len(m) > min_length] for n in value.split(' ') ])) return list(set([value] + tokens + [' '.join(tokens)]))
Quick and dirty tokenizer for completion suggester
def crossplat_loop_run(coro) -> Any: if sys.platform == 'win32': signal.signal(signal.SIGINT, signal.SIG_DFL) loop = asyncio.ProactorEventLoop() else: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) with contextlib.closing(loop): return loop.run_until_complete(coro)
Cross-platform method for running a subprocess-spawning coroutine.
def getValue(self, name, scope=None, native=False): theParamList = self._taskParsObj.getParList() fullName = basicpar.makeFullName(scope, name) for i in range(self.numParams): par = theParamList[i] entry = self.entryNo[i] if par.fullName() == fullName or \ (scope is None and par.name == name): if native: return entry.convertToNative(entry.choice.get()) else: return entry.choice.get() raise RuntimeError('Could not find par: "'+fullName+'"')
Return current par value from the GUI. This does not do any validation, and it it not necessarily the same value saved in the model, which is always behind the GUI setting, in time. This is NOT to be used to get all the values - it would not be efficient.
def directory_listing(conn: FTP, path: str) -> Tuple[List, List]: entries = deque() conn.dir(path, entries.append) entries = map(parse_line, entries) grouped_entries = defaultdict(list) for key, value in entries: grouped_entries[key].append(value) directories = grouped_entries[ListingType.directory] files = grouped_entries[ListingType.file] return directories, files
Return the directories and files for single FTP listing.
def call_async(self, func: Callable, *args, **kwargs): return asyncio_extras.call_async(self.loop, func, *args, **kwargs)
Call the given callable in the event loop thread. This method lets you call asynchronous code from a worker thread. Do not use it from within the event loop thread. If the callable returns an awaitable, it is resolved before returning to the caller. :param func: a regular function or a coroutine function :param args: positional arguments to call the callable with :param kwargs: keyword arguments to call the callable with :return: the return value of the call
def showDecidePage(request, openid_request): trust_root = openid_request.trust_root return_to = openid_request.return_to try: trust_root_valid = verifyReturnTo(trust_root, return_to) \ and "Valid" or "Invalid" except DiscoveryFailure, err: trust_root_valid = "DISCOVERY_FAILED" except HTTPFetchingError, err: trust_root_valid = "Unreachable" pape_request = pape.Request.fromOpenIDRequest(openid_request) return direct_to_template( request, 'server/trust.html', {'trust_root': trust_root, 'trust_handler_url':getViewURL(request, processTrustResult), 'trust_root_valid': trust_root_valid, 'pape_request': pape_request, })
Render a page to the user so a trust decision can be made. @type openid_request: openid.server.server.CheckIDRequest
def _parse_subject(subject): ret = {} nids = [] for nid_name, nid_num in six.iteritems(subject.nid): if nid_num in nids: continue try: val = getattr(subject, nid_name) if val: ret[nid_name] = val nids.append(nid_num) except TypeError as err: log.debug("Missing attribute '%s'. Error: %s", nid_name, err) return ret
Returns a dict containing all values in an X509 Subject
def __parseKeyValueStore(self, data): offset = 0 key_value_store = {} while offset != len(data): key = get_str(data, offset) offset += len(key)+1 value = get_str(data, offset) offset += len(value)+1 key_value_store[key] = value return key_value_store
Returns a dictionary filled with the keys and values of the key value store
def get_cell(self, row, col): return javabridge.call( self.jobject, "getCell", "(II)Ljava/lang/Object;", row, col)
Returns the JB_Object at the specified location. :param row: the 0-based index of the row :type row: int :param col: the 0-based index of the column :type col: int :return: the object in that cell :rtype: JB_Object
def _install_signal_handlers(self): def request_stop(signum, frame): self._stop_requested = True self.log.info('stop requested, waiting for task to finish') signal.signal(signal.SIGINT, request_stop) signal.signal(signal.SIGTERM, request_stop)
Sets up signal handlers for safely stopping the worker.
def send_point_data(events, additional): bodies = {} for (site, content_id), count in events.items(): if not len(site) or not len(content_id): continue bodies.setdefault(site, []) event, path = additional.get((site, content_id), (None, None)) bodies[site].append([content_id, event, path, count]) for site, points in bodies.items(): try: data = [{ "name": site, "columns": ["content_id", "event", "path", "value"], "points": points, }] INFLUXDB_CLIENT.write_points(data) except Exception as e: LOGGER.exception(e)
creates data point payloads and sends them to influxdb
def on_assign(self, node): val = self.run(node.value) for tnode in node.targets: self.node_assign(tnode, val) return
Simple assignment.
def get(_class, api, vid): busses = api.vehicles(vid=vid)['vehicle'] return _class.fromapi(api, api.vehicles(vid=vid)['vehicle'])
Return a Bus object for a certain vehicle ID `vid` using API instance `api`.
def find_txt(xml_tree, path, default=''): value = '' try: xpath_applied = xml_tree.xpath(path) if len(xpath_applied) and xpath_applied[0] is not None: xpath_result = xpath_applied[0] if isinstance(xpath_result, type(xml_tree)): value = xpath_result.text.strip() else: value = xpath_result except Exception: value = default return py23_compat.text_type(value)
Extracts the text value from an XML tree, using XPath. In case of error, will return a default value. :param xml_tree: the XML Tree object. Assumed is <type 'lxml.etree._Element'>. :param path: XPath to be applied, in order to extract the desired data. :param default: Value to be returned in case of error. :return: a str value.
def check_basic_auth(user, passwd): auth = request.authorization return auth and auth.username == user and auth.password == passwd
Checks user authentication using HTTP Basic Auth.
def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT): for i in self.intf.values(): try: return i.sendto(out.packet(), 0, (addr, port)) except: traceback.print_exc() return -1
Sends an outgoing packet.
def make_op_return_tx(data, private_key, blockchain_client=BlockchainInfoClient(), fee=OP_RETURN_FEE, change_address=None, format='bin'): private_key_obj, from_address, inputs = analyze_private_key(private_key, blockchain_client) if not change_address: change_address = from_address outputs = make_op_return_outputs(data, inputs, change_address, fee=fee, format=format) unsigned_tx = serialize_transaction(inputs, outputs) for i in xrange(0, len(inputs)): signed_tx = sign_transaction(unsigned_tx, i, private_key_obj.to_hex()) unsigned_tx = signed_tx return signed_tx
Builds and signs an OP_RETURN transaction.
def get_buildenv_graph(self): buildenvs = set(target.buildenv for target in self.targets.values() if target.buildenv) return nx.DiGraph(self.target_graph.subgraph(reduce( lambda x, y: x | set(y), (get_descendants(self.target_graph, buildenv) for buildenv in buildenvs), buildenvs)))
Return a graph induced by buildenv nodes
def on_serial_port_change(self, serial_port): if not isinstance(serial_port, ISerialPort): raise TypeError("serial_port can only be an instance of type ISerialPort") self._call("onSerialPortChange", in_p=[serial_port])
Triggered when settings of a serial port of the associated virtual machine have changed. in serial_port of type :class:`ISerialPort` raises :class:`VBoxErrorInvalidVmState` Session state prevents operation. raises :class:`VBoxErrorInvalidObjectState` Session type prevents operation.
def preferences(self, section=None): if section is None: return [self[section][name] for section in self for name in self[section]] else: return [self[section][name] for name in self[section]]
Return a list of all registered preferences or a list of preferences registered for a given section :param section: The section name under which the preference is registered :type section: str. :return: a list of :py:class:`prefs.BasePreference` instances
def install_hook(self, hook_name, hook_content): hook_path = os.path.join(self.path, '.git/hooks', hook_name) with open(hook_path, 'w') as f: f.write(hook_content) os.chmod(hook_path, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
Install the repository hook for this repo. Args: hook_name (str) hook_content (str)
def extend_variables(raw_variables, override_variables): if not raw_variables: override_variables_mapping = ensure_mapping_format(override_variables) return override_variables_mapping elif not override_variables: raw_variables_mapping = ensure_mapping_format(raw_variables) return raw_variables_mapping else: raw_variables_mapping = ensure_mapping_format(raw_variables) override_variables_mapping = ensure_mapping_format(override_variables) raw_variables_mapping.update(override_variables_mapping) return raw_variables_mapping
extend raw_variables with override_variables. override_variables will merge and override raw_variables. Args: raw_variables (list): override_variables (list): Returns: dict: extended variables mapping Examples: >>> raw_variables = [{"var1": "val1"}, {"var2": "val2"}] >>> override_variables = [{"var1": "val111"}, {"var3": "val3"}] >>> extend_variables(raw_variables, override_variables) { 'var1', 'val111', 'var2', 'val2', 'var3', 'val3' }
def log_file(self): log_file = self.get('log') if not log_file: log_file = '%s.log' % (self.name) self.set('log', log_file) return os.path.join(self.initial_dir, self.get('log'))
The path to the log file for this job.
def _args2_fpath(dpath, fname, cfgstr, ext): r if len(ext) > 0 and ext[0] != '.': raise ValueError('Please be explicit and use a dot in ext') max_len = 128 cfgstr_hashlen = 16 prefix = fname fname_cfgstr = consensed_cfgstr(prefix, cfgstr, max_len=max_len, cfgstr_hashlen=cfgstr_hashlen) fpath = join(dpath, fname_cfgstr + ext) fpath = normpath(fpath) return fpath
r""" Ensures that the filename is not too long Internal util_cache helper function Windows MAX_PATH=260 characters Absolute length is limited to 32,000 characters Each filename component is limited to 255 characters Args: dpath (str): fname (str): cfgstr (str): ext (str): Returns: str: fpath CommandLine: python -m utool.util_cache --test-_args2_fpath Example: >>> # ENABLE_DOCTEST >>> from utool.util_cache import * # NOQA >>> from utool.util_cache import _args2_fpath >>> import utool as ut >>> dpath = 'F:\\data\\work\\PZ_MTEST\\_ibsdb\\_ibeis_cache' >>> fname = 'normalizer_' >>> cfgstr = u'PZ_MTEST_DSUUIDS((9)67j%dr%&bl%4oh4+)_QSUUIDS((9)67j%dr%&bl%4oh4+)zebra_plains_vsone_NN(single,K1+1,last,cks1024)_FILT(ratio<0.625;1.0,fg;1.0)_SV(0.01;2;1.57minIn=4,nRR=50,nsum,)_AGG(nsum)_FLANN(4_kdtrees)_FEATWEIGHT(ON,uselabel,rf)_FEAT(hesaff+sift_)_CHIP(sz450)' >>> ext = '.cPkl' >>> fpath = _args2_fpath(dpath, fname, cfgstr, ext) >>> result = str(ut.ensure_unixslash(fpath)) >>> target = 'F:/data/work/PZ_MTEST/_ibsdb/_ibeis_cache/normalizer_xfylfboirymmcpfg.cPkl' >>> ut.assert_eq(result, target)
def run_task(func): def _wrapped(*a, **k): loop = asyncio.get_event_loop() return loop.run_until_complete(func(*a, **k)) return _wrapped
Decorator to wrap an async function in an event loop. Use for main sync interface methods.
def upgrade_juju( self, dry_run=False, reset_previous_upgrade=False, upload_tools=False, version=None): raise NotImplementedError()
Upgrade Juju on all machines in a model. :param bool dry_run: Don't do the actual upgrade :param bool reset_previous_upgrade: Clear the previous (incomplete) upgrade status :param bool upload_tools: Upload local version of tools :param str version: Upgrade to a specific version
def _offline_fcp_device(self, fcp, target_wwpn, target_lun, multipath): device = '0.0.%s' % fcp target = '%s:%s:%s' % (device, target_wwpn, target_lun) disk_offline = '/sbin/chzdev zfcp-lun %s -d' % target host_offline = '/sbin/chzdev zfcp-host %s -d' % fcp offline_dev = 'chccwdev -d %s' % fcp return '\n'.join((disk_offline, host_offline, offline_dev))
ubuntu offline zfcp.
def init_datamembers(self, rec): if 'synonym' in self.optional_attrs: rec.synonym = [] if 'xref' in self.optional_attrs: rec.xref = set() if 'subset' in self.optional_attrs: rec.subset = set() if 'comment' in self.optional_attrs: rec.comment = "" if 'relationship' in self.optional_attrs: rec.relationship = {} rec.relationship_rev = {}
Initialize current GOTerm with data members for storing optional attributes.
def DefaultExtension(schema_obj, form_obj, schemata=None): if schemata is None: schemata = ['systemconfig', 'profile', 'client'] DefaultExtends = { 'schema': { "properties/modules": [ schema_obj ] }, 'form': { 'modules': { 'items/': form_obj } } } output = {} for schema in schemata: output[schema] = DefaultExtends return output
Create a default field
def _StartDebugger(): global _hub_client global _breakpoints_manager cdbg_native.InitializeModule(_flags) _hub_client = gcp_hub_client.GcpHubClient() visibility_policy = _GetVisibilityPolicy() _breakpoints_manager = breakpoints_manager.BreakpointsManager( _hub_client, visibility_policy) capture_collector.SetLogger(logging.getLogger()) capture_collector.CaptureCollector.pretty_printers.append( appengine_pretty_printers.PrettyPrinter) _hub_client.on_active_breakpoints_changed = ( _breakpoints_manager.SetActiveBreakpoints) _hub_client.on_idle = _breakpoints_manager.CheckBreakpointsExpiration _hub_client.SetupAuth( _flags.get('project_id'), _flags.get('project_number'), _flags.get('service_account_json_file')) _hub_client.InitializeDebuggeeLabels(_flags) _hub_client.Start()
Configures and starts the debugger.
def _render_binaries(files, written_files): for source_path, target_path in files.items(): needdir = os.path.dirname(target_path) assert needdir, "Target should have valid parent dir" try: os.makedirs(needdir) except OSError as err: if err.errno != errno.EEXIST: raise if os.path.isfile(target_path): if target_path in written_files: LOG.warning("Previous stencil has already written file %s.", target_path) else: print("Skipping existing file %s" % target_path) LOG.info("Skipping existing file %s", target_path) continue print("Writing rendered file %s" % target_path) LOG.info("Writing rendered file %s", target_path) shutil.copy(source_path, target_path) if os.path.exists(target_path): written_files.append(target_path)
Write binary contents from filetable into files. Using filetable for the input files, and the list of files, render all the templates into actual files on disk, forcing to overwrite the file as appropriate, and using the given open mode for the file.
def make_http_credentials(username=None, password=None): credentials = '' if username is None: return credentials if username is not None: if ':' in username: return credentials credentials += username if credentials and password is not None: credentials += ":%s" % password return "%s@" % credentials
Build auth part for api_url.
def loglik(self, theta, t=None): if t is None: t = self.T - 1 l = np.zeros(shape=theta.shape[0]) for s in range(t + 1): l += self.logpyt(theta, s) return l
log-likelihood at given parameter values. Parameters ---------- theta: dict-like theta['par'] is a ndarray containing the N values for parameter par t: int time (if set to None, the full log-likelihood is returned) Returns ------- l: float numpy.ndarray the N log-likelihood values
def openTypeHeadCreatedFallback(info): if "SOURCE_DATE_EPOCH" in os.environ: t = datetime.utcfromtimestamp(int(os.environ["SOURCE_DATE_EPOCH"])) return t.strftime(_date_format) else: return dateStringForNow()
Fallback to the environment variable SOURCE_DATE_EPOCH if set, otherwise now.
def get_version(tool_name, tool_command): result = {} for line in Bash(ShellConfig(script=tool_command, internal=True)).process(): if line.find("command not found") >= 0: VersionsCheck.LOGGER.error("Required tool '%s' not found (stopping pipeline)!", tool_name) sys.exit(1) else: version = list(re.findall(r'(\d+(\.\d+)+)+', line))[0][0] result = {tool_name: Version(str(version))} break return result
Get name and version of a tool defined by given command. Args: tool_name (str): name of the tool. tool_command (str): Bash one line command to get the version of the tool. Returns: dict: tool name and version or empty when no line has been found
def no_duplicates_sections2d(sections2d, prt=None): no_dups = True ctr = cx.Counter() for _, hdrgos in sections2d: for goid in hdrgos: ctr[goid] += 1 for goid, cnt in ctr.most_common(): if cnt == 1: break no_dups = False if prt is not None: prt.write("**SECTIONS WARNING FOUND: {N:3} {GO}\n".format(N=cnt, GO=goid)) return no_dups
Check for duplicate header GO IDs in the 2-D sections variable.
def coverage(): "generate coverage report and show in browser" coverage_index = path("build/coverage/index.html") coverage_index.remove() sh("paver test") coverage_index.exists() and webbrowser.open(coverage_index)
generate coverage report and show in browser
def __instances(self): for instance in self.__instances_cache: yield instance for instance in self.__instances_original: self.__instances_cache.append(instance) yield instance
Cache instances, allowing generators to be used and reused. This fills a cache as the generator gets emptied, eventually reading exclusively from the cache.
def ifo(self): if len(self.ifo_list) == 1: return self.ifo_list[0] else: err = "self.ifo_list must contain only one ifo to access the " err += "ifo property. %s." %(str(self.ifo_list),) raise TypeError(err)
If only one ifo in the ifo_list this will be that ifo. Otherwise an error is raised.
def layer_prepostprocess(previous_value, x, sequence, dropout_rate, norm_type, depth, epsilon, default_name, name=None, dropout_broadcast_dims=None, layer_collection=None): with tf.variable_scope(name, default_name=default_name): if sequence == "none": return x for c in sequence: if c == "a": x += previous_value elif c == "z": x = zero_add(previous_value, x) elif c == "n": x = apply_norm( x, norm_type, depth, epsilon, layer_collection=layer_collection) else: assert c == "d", ("Unknown sequence step %s" % c) x = dropout_with_broadcast_dims( x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) return x
Apply a sequence of functions to the input or output of a layer. The sequence is specified as a string which may contain the following characters: a: add previous_value n: apply normalization d: apply dropout z: zero add For example, if sequence=="dna", then the output is previous_value + normalize(dropout(x)) Args: previous_value: A Tensor, to be added as a residual connection ('a') x: A Tensor to be transformed. sequence: a string. dropout_rate: a float norm_type: a string (see apply_norm()) depth: an integer (size of last dimension of x). epsilon: a float (parameter for normalization) default_name: a string name: a string dropout_broadcast_dims: an optional list of integers less than 3 specifying in which dimensions to broadcast the dropout decisions. saves memory. layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor
def printSysLog(self, logString): if zvmsdklog.LOGGER.getloglevel() <= logging.DEBUG: if self.daemon == '': self.logger.debug(self.requestId + ": " + logString) else: self.daemon.logger.debug(self.requestId + ": " + logString) if self.captureLogs is True: self.results['logEntries'].append(self.requestId + ": " + logString) return
Log one or more lines. Optionally, add them to logEntries list. Input: Strings to be logged.
def _add(self, shard_uri, name): return self.router_command("addShard", (shard_uri, {"name": name}), is_eval=False)
execute addShard command
def get_active_tasks(self): current_tasks = self.celery.control.inspect().active() or dict() return [ task.get('id') for host in current_tasks.values() for task in host]
Return a list of UUIDs of active tasks.
def build_or_reuse_placeholder(tensor_spec): g = tfv1.get_default_graph() name = tensor_spec.name try: tensor = g.get_tensor_by_name(name + ':0') assert "Placeholder" in tensor.op.type, "Tensor {} exists but is not a placeholder!".format(name) assert tensor_spec.is_compatible_with(tensor), \ "Tensor {} exists but is not compatible with the signature!".format(tensor) return tensor except KeyError: with tfv1.name_scope(None): ret = tfv1.placeholder( tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name) return ret
Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one. Args: tensor_spec (tf.TensorSpec): Returns: tf.Tensor:
def open_listing_page(trailing_part_of_url): base_url = 'http://www.bbc.co.uk/programmes/' print("Opening web page: " + base_url + trailing_part_of_url) try: html = requests.get(base_url + trailing_part_of_url).text except (IOError, NameError): print("Error opening web page.") print("Check network connection and/or programme id.") sys.exit(1) try: return lxml.html.fromstring(html) except lxml.etree.ParserError: print("Error trying to parse web page.") print("Maybe there's no programme listing?") sys.exit(1)
Opens a BBC radio tracklisting page based on trailing part of url. Returns a lxml ElementTree derived from that page. trailing_part_of_url: a string, like the pid or e.g. pid/segments.inc
def keep_levels(self, level_indices): if not isinstance(level_indices, list): self.log_exc(u"level_indices is not an instance of list", None, True, TypeError) for l in level_indices: if not isinstance(l, int): self.log_exc(u"level_indices contains an element not int", None, True, TypeError) prev_levels = self.levels level_indices = set(level_indices) if 0 not in level_indices: level_indices.add(0) level_indices = level_indices & set(range(self.height)) level_indices = sorted(level_indices)[::-1] for l in level_indices: for node in prev_levels[l]: node.remove_children(reset_parent=False) for i in range(len(level_indices) - 1): l = level_indices[i] for node in prev_levels[l]: parent_node = node.ancestor(l - level_indices[i + 1]) parent_node.add_child(node)
Rearrange the tree rooted at this node to keep only the given levels. The returned Tree will still be rooted at the current node, i.e. this function implicitly adds ``0`` to ``level_indices``. If ``level_indices`` is an empty list, only this node will be returned, with no children. Elements of ``level_indices`` that do not represent valid level indices (e.g., negative, or too large) will be ignored and no error will be raised. Important: this function modifies the original tree in place! :param list level_indices: the list of int, representing the levels to keep :raises: TypeError if ``level_indices`` is not a list or if it contains an element which is not an int
def ip(): ok, err = _hack_ip() if not ok: click.secho(click.style(err, fg='red')) sys.exit(1) click.secho(click.style(err, fg='green'))
Show ip address.
def _validate_scales(self, proposal): scales = proposal.value for name in self.trait_names(scaled=True): trait = self.traits()[name] if name not in scales: if not trait.allow_none: raise TraitError("Missing scale for data attribute %s." % name) else: if scales[name].rtype != trait.get_metadata('rtype'): raise TraitError("Range type mismatch for scale %s." % name) return scales
Validates the `scales` based on the mark's scaled attributes metadata. First checks for missing scale and then for 'rtype' compatibility.
def has_active_subscription(self, plan=None): if plan is None: valid_subscriptions = self._get_valid_subscriptions() if len(valid_subscriptions) == 0: return False elif len(valid_subscriptions) == 1: return True else: raise TypeError( "plan cannot be None if more than one valid subscription exists for this customer." ) else: if isinstance(plan, StripeModel): plan = plan.id return any( [ subscription.is_valid() for subscription in self.subscriptions.filter(plan__id=plan) ] )
Checks to see if this customer has an active subscription to the given plan. :param plan: The plan for which to check for an active subscription. If plan is None and there exists only one active subscription, this method will check if that subscription is valid. Calling this method with no plan and multiple valid subscriptions for this customer will throw an exception. :type plan: Plan or string (plan ID) :returns: True if there exists an active subscription, False otherwise. :throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
def _paramf16(ins): output = _f16_oper(ins.quad[1]) output.append('push de') output.append('push hl') return output
Pushes 32bit fixed point param into the stack
def match_entry_line(str_to_match, regex_obj=MAIN_REGEX_OBJ): match_obj = regex_obj.match(str_to_match) if not match_obj: error_message = ('Line "%s" is unrecognized by overlay4u. ' 'This is only meant for use with Ubuntu Linux.') raise UnrecognizedMountEntry(error_message % str_to_match) return match_obj.groupdict()
Does a regex match of the mount entry string
def execute_on_keys(self, keys, entry_processor): key_list = [] for key in keys: check_not_none(key, "key can't be None") key_list.append(self._to_data(key)) if len(keys) == 0: return ImmediateFuture([]) return self._encode_invoke(map_execute_on_keys_codec, entry_processor=self._to_data(entry_processor), keys=key_list)
Applies the user defined EntryProcessor to the entries mapped by the collection of keys. Returns the results mapped by each key in the collection. :param keys: (Collection), collection of the keys for the entries to be processed. :param entry_processor: (object), A stateful serializable object which represents the EntryProcessor defined on server side. This object must have a serializable EntryProcessor counter part registered on server side with the actual ``org.hazelcast.map.EntryProcessor`` implementation. :return: (Sequence), list of map entries which includes the keys and the results of the entry process.
def resolver(schema): name = schema.__name__ if name.endswith("Schema"): return name[:-6] or name return name
Default implementation of a schema name resolver function
def _parse_or_match(self, text, pos, method_name): if not self.grammar: raise RuntimeError( "The {cls}.{method}() shortcut won't work because {cls} was " "never associated with a specific " "grammar. Fill out its " "`grammar` attribute, and try again.".format( cls=self.__class__.__name__, method=method_name)) return self.visit(getattr(self.grammar, method_name)(text, pos=pos))
Execute a parse or match on the default grammar, followed by a visitation. Raise RuntimeError if there is no default grammar specified.
def cross_boundary(prev_obj, obj): if prev_obj is None: return if isinstance(obj, _SecuredAttribute): obj.parent = prev_obj if hasattr(prev_obj, '_pecan'): if obj not in prev_obj._pecan.get('unlocked', []): handle_security(prev_obj)
Check permissions as we move between object instances.
def arbiter(rst, clk, req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None, gnt_rdy=None, ARBITER_TYPE="priority"): if ARBITER_TYPE == "priority": _arb = arbiter_priority(req_vec, gnt_vec, gnt_idx, gnt_vld) elif (ARBITER_TYPE == "roundrobin"): _arb = arbiter_roundrobin(rst, clk, req_vec, gnt_vec, gnt_idx, gnt_vld, gnt_rdy) else: assert "Arbiter: Unknown arbiter type: {}".format(ARBITER_TYPE) return _arb
Wrapper that provides common interface to all arbiters
def _make_tonnetz_matrix(): pi = np.pi chroma = np.arange(12) fifth_x = r_fifth*(np.sin((7*pi/6) * chroma)) fifth_y = r_fifth*(np.cos((7*pi/6) * chroma)) minor_third_x = r_minor_thirds*(np.sin(3*pi/2 * chroma)) minor_third_y = r_minor_thirds*(np.cos(3*pi/2 * chroma)) major_third_x = r_major_thirds*(np.sin(2*pi/3 * chroma)) major_third_y = r_major_thirds*(np.cos(2*pi/3 * chroma)) return np.vstack((fifth_x, fifth_y, minor_third_x, minor_third_y, major_third_x, major_third_y))
Return the tonnetz projection matrix.
def delete(self,storagemodel): modeldefinition = self.getmodeldefinition(storagemodel, True) pk = storagemodel.getPartitionKey() rk = storagemodel.getRowKey() try: modeldefinition['tableservice'].delete_entity(modeldefinition['tablename'], pk, rk) storagemodel._exists = False except AzureMissingResourceHttpError as e: log.debug('can not delete table entity: Table {}, PartitionKey {}, RowKey {} because {!s}'.format(modeldefinition['tablename'], pk, rk, e)) finally: return storagemodel
delete existing Entity
def _augment(self): _pred, _ready, istar, j, mu = self._build_tree() self._v[_ready] += self._d[_ready] - mu while True: i = _pred[j] self._y[j] = i k = j j = self._x[i] self._x[i] = k if i == istar: break self._update_cred()
Finds a minimum cost path and adds it to the matching
def diff_archives(archive1, archive2, verbosity=0, interactive=True): util.check_existing_filename(archive1) util.check_existing_filename(archive2) if verbosity >= 0: util.log_info("Comparing %s with %s ..." % (archive1, archive2)) res = _diff_archives(archive1, archive2, verbosity=verbosity, interactive=interactive) if res == 0 and verbosity >= 0: util.log_info("... no differences found.")
Print differences between two archives.
def clear_on_run(self, prefix="Running Tests:"): if platform.system() == 'Windows': os.system('cls') else: os.system('clear') if prefix: print(prefix)
Clears console before running the tests.
def as_percent(self, precision=2, *args, **kwargs): f = Formatter(as_percent(precision), args, kwargs) return self._add_formatter(f)
Format subset as percentages :param precision: Decimal precision :param subset: Pandas subset
def generic_find_fk_constraint_name(table, columns, referenced, insp): for fk in insp.get_foreign_keys(table): if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns: return fk['name']
Utility to find a foreign-key constraint name in alembic migrations
def _get_server(self): with self._lock: inactive_server_count = len(self._inactive_servers) for i in range(inactive_server_count): try: ts, server, message = heapq.heappop(self._inactive_servers) except IndexError: pass else: if (ts + self.retry_interval) > time(): heapq.heappush(self._inactive_servers, (ts, server, message)) else: self._active_servers.append(server) logger.warn("Restored server %s into active pool", server) if not self._active_servers: ts, server, message = heapq.heappop(self._inactive_servers) self._active_servers.append(server) logger.info("Restored server %s into active pool", server) server = self._active_servers[0] self._roundrobin() return server
Get server to use for request. Also process inactive server list, re-add them after given interval.
def is_valid_group(group_name, nova_creds): valid_groups = [] for key, value in nova_creds.items(): supernova_groups = value.get('SUPERNOVA_GROUP', []) if hasattr(supernova_groups, 'startswith'): supernova_groups = [supernova_groups] valid_groups.extend(supernova_groups) valid_groups.append('all') if group_name in valid_groups: return True else: return False
Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option.
def cal_model_performance(obsl, siml): nse = MathClass.nashcoef(obsl, siml) r2 = MathClass.rsquare(obsl, siml) rmse = MathClass.rmse(obsl, siml) pbias = MathClass.pbias(obsl, siml) rsr = MathClass.rsr(obsl, siml) print('NSE: %.2f, R-square: %.2f, PBIAS: %.2f%%, RMSE: %.2f, RSR: %.2f' % (nse, r2, pbias, rmse, rsr))
Calculate model performance indexes.
def validate(bundle): errors = [] add_error = errors.append series, services, machines, relations = _validate_sections( bundle, add_error) if errors: return errors _validate_series(series, 'bundle', add_error) _validate_services(services, machines, add_error) _validate_machines(machines, add_error) _validate_relations(relations, services, add_error) return errors
Validate a bundle object and all of its components. The bundle must be passed as a YAML decoded object. Return a list of bundle errors, or an empty list if the bundle is valid.
def _ProcessEntries(self, fd): p = config_file.KeyValueParser(kv_sep="{", term="}", sep=None) data = utils.ReadFileBytesAsUnicode(fd) entries = p.ParseEntries(data) for entry in entries: for section, cfg in iteritems(entry): if cfg: cfg = cfg[0].strip() else: cfg = "" self._ParseSection(section, cfg)
Extract entries from the xinetd config files.
def search_string(self): self.__normalize() tmpl_source = unicode(open(self.tmpl_file).read()) compiler = Compiler() template = compiler.compile(tmpl_source) out = template(self) if not out: return False out = ''.join(out) out = re.sub('\n', '', out) out = re.sub('\s{3,}', ' ', out) out = re.sub(',\s*([}\\]])', '\\1', out) out = re.sub('([{\\[}\\]])(,?)\s*([{\\[}\\]])', '\\1\\2\\3', out) out = re.sub('\s*([{\\[\\]}:,])\s*', '\\1', out) return out
Returns the JSON string that LendingClub expects for it's search
def install(plugin_name, *args, **kwargs): if isinstance(plugin_name, types.StringTypes): plugin_name = [plugin_name] conda_args = (['install', '-y', '--json'] + list(args) + plugin_name) install_log_js = ch.conda_exec(*conda_args, verbose=False) install_log = json.loads(install_log_js.split('\x00')[-1]) if 'actions' in install_log and not install_log.get('dry_run'): _save_action({'conda_args': conda_args, 'install_log': install_log}) logger.debug('Installed plugin(s): ```%s```', install_log['actions']) return install_log
Install plugin packages based on specified Conda channels. .. versionchanged:: 0.19.1 Do not save rollback info on dry-run. .. versionchanged:: 0.24 Remove channels argument. Use Conda channels as configured in Conda environment. Note that channels can still be explicitly set through :data:`*args`. Parameters ---------- plugin_name : str or list Plugin package(s) to install. Version specifiers are also supported, e.g., ``package >=1.0.5``. *args Extra arguments to pass to Conda ``install`` command. Returns ------- dict Conda installation log object (from JSON Conda install output).
def compute_permutation_for_rotation(positions_a, positions_b, lattice, symprec): def sort_by_lattice_distance(fracs): carts = np.dot(fracs - np.rint(fracs), lattice.T) perm = np.argsort(np.sum(carts**2, axis=1)) sorted_fracs = np.array(fracs[perm], dtype='double', order='C') return perm, sorted_fracs (perm_a, sorted_a) = sort_by_lattice_distance(positions_a) (perm_b, sorted_b) = sort_by_lattice_distance(positions_b) perm_between = _compute_permutation_c(sorted_a, sorted_b, lattice, symprec) return perm_a[perm_between][np.argsort(perm_b)]
Get the overall permutation such that positions_a[perm[i]] == positions_b[i] (modulo the lattice) or in numpy speak, positions_a[perm] == positions_b (modulo the lattice) This version is optimized for the case where positions_a and positions_b are related by a rotation.
def _get_upstream(self): if not self._remote or not self._branch: branch = self.branch_name if not branch: raise Scm.LocalException('Failed to determine local branch') def get_local_config(key): value = self._check_output(['config', '--local', '--get', key], raise_type=Scm.LocalException) return value.strip() self._remote = self._remote or get_local_config('branch.{}.remote'.format(branch)) self._branch = self._branch or get_local_config('branch.{}.merge'.format(branch)) return self._remote, self._branch
Return the remote and remote merge branch for the current branch
def getVals(self): return [(name, self._fieldValDict.get(name)) for name in self._fieldNameList]
Returns value list for Munin Graph @return: List of name-value pairs.
def _fetch_size(self, request: Request) -> int: try: size = yield from self._commander.size(request.file_path) return size except FTPServerError: return
Return size of file. Coroutine.
def onScreen(x, y=None): x, y = _unpackXY(x, y) x = int(x) y = int(y) width, height = platformModule._size() return 0 <= x < width and 0 <= y < height
Returns whether the given xy coordinates are on the screen or not. Args: Either the arguments are two separate values, first arg for x and second for y, or there is a single argument of a sequence with two values, the first x and the second y. Example: onScreen(x, y) or onScreen([x, y]) Returns: bool: True if the xy coordinates are on the screen at its current resolution, otherwise False.
def create_task(self, ): depi = self.dep_cb.currentIndex() assert depi >= 0 dep = self.deps[depi] deadline = self.deadline_de.dateTime().toPython() try: task = djadapter.models.Task(department=dep, project=self.element.project, element=self.element, deadline=deadline) task.save() self.task = task self.accept() except: log.exception("Could not create new task")
Create a task and store it in the self.task :returns: None :rtype: None :raises: None
def pipeline_status(url, pipeline_id, auth, verify_ssl): status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl) status_result.raise_for_status() logging.debug('Status request: ' + url + '/status') logging.debug(status_result.json()) return status_result.json()
Retrieve the current status for a pipeline. Args: url (str): the host url in the form 'http://host:port/'. pipeline_id (str): the ID of of the exported pipeline. auth (tuple): a tuple of username, and password. verify_ssl (bool): whether to verify ssl certificates Returns: dict: the response json
def parse_numeric_code(self, force_hex=False): code = None got_error = False if not force_hex: try: code = int(self.numeric_code) except ValueError: got_error = True if force_hex or got_error: try: code = int(self.numeric_code, 16) except ValueError: raise return code
Parses and returns the numeric code as an integer. The numeric code can be either base 10 or base 16, depending on where the message came from. :param force_hex: force the numeric code to be processed as base 16. :type force_hex: boolean :raises: ValueError
def rotate_x(self, deg): rad = math.radians(deg) mat = numpy.array([ [1, 0, 0, 0], [0, math.cos(rad), math.sin(rad), 0], [0, -math.sin(rad), math.cos(rad), 0], [0, 0, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
Rotate mesh around x-axis :param float deg: Rotation angle (degree) :return:
def add_voice_call_api(mock): mock.AddProperty('org.ofono.VoiceCallManager', 'EmergencyNumbers', ['911', '13373']) mock.calls = [] mock.AddMethods('org.ofono.VoiceCallManager', [ ('GetProperties', '', 'a{sv}', 'ret = self.GetAll("org.ofono.VoiceCallManager")'), ('Transfer', '', '', ''), ('SwapCalls', '', '', ''), ('ReleaseAndAnswer', '', '', ''), ('ReleaseAndSwap', '', '', ''), ('HoldAndAnswer', '', '', ''), ('SendTones', 's', '', ''), ('PrivateChat', 'o', 'ao', NOT_IMPLEMENTED), ('CreateMultiparty', '', 'o', NOT_IMPLEMENTED), ('HangupMultiparty', '', '', NOT_IMPLEMENTED), ('GetCalls', '', 'a(oa{sv})', 'ret = [(c, objects[c].GetAll("org.ofono.VoiceCall")) for c in self.calls]') ])
Add org.ofono.VoiceCallManager API to a mock
def dataframe(self, spark, group_by='greedy', limit=None, sample=1, seed=42, decode=None, summaries=None, schema=None, table_name=None): rdd = self.records(spark.sparkContext, group_by, limit, sample, seed, decode, summaries) if not schema: df = rdd.map(lambda d: Row(**d)).toDF() else: df = spark.createDataFrame(rdd, schema=schema) if table_name: df.createOrReplaceTempView(table_name) return df
Convert RDD returned from records function to a dataframe :param spark: a SparkSession object :param group_by: specifies a paritition strategy for the objects :param limit: maximum number of objects to retrieve :param decode: an optional transformation to apply to the objects retrieved :param sample: percentage of results to return. Useful to return a sample of the dataset. This parameter is ignored when 'limit' is set. :param seed: initialize internal state of the random number generator (42 by default). This is used to make the dataset sampling reproducible. It an be set to None to obtain different samples. :param summaries: an iterable containing the summary for each item in the dataset. If None, it will compute calling the summaries dataset. :param schema: a Spark schema that overrides automatic conversion to a dataframe :param table_name: allows resulting dataframe to easily be queried using SparkSQL :return: a Spark DataFrame
def gpg_error(exception, message): LOGGER.debug("GPG Command %s", ' '.join([str(x) for x in exception.cmd])) LOGGER.debug("GPG Output %s", exception.output) raise CryptoritoError(message)
Handles the output of subprocess errors in a way that is compatible with the log level
def close_db(): db = repo.Repo.db if db is not None: db.close() repo.Repo.db = None base.Repo.db = None query.Repo.db = None
Close the connection to the database opened in `connect_db`
def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options): from .model import delete_multi deleted = 0 options = QueryOptions(self).replace(keys_only=True) for page in self.paginate(page_size=page_size, **options): keys = list(page) deleted += len(keys) delete_multi(keys) return deleted
Deletes all the entities that match this query. Note: Since Datasotre doesn't provide a native way to delete entities by query, this method paginates through all the entities' keys and issues a single delete_multi call per page. Parameters: \**options(QueryOptions, optional) Returns: int: The number of deleted entities.
def wr_xlsx(self, fout_xlsx): objwr = WrXlsxSortedGos("GOEA", self.sortobj) kws_xlsx = { 'title': self.ver_list, 'fld2fmt': {f:'{:8.2e}' for f in self.flds_cur if f[:2] == 'p_'}, 'prt_flds': self.flds_cur} objwr.wr_xlsx_nts(fout_xlsx, self.desc2nts, **kws_xlsx)
Print grouped GOEA results into an xlsx file.
def get_top_level_forum_url(self): return ( reverse('forum:index') if self.top_level_forum is None else reverse( 'forum:forum', kwargs={'slug': self.top_level_forum.slug, 'pk': self.kwargs['pk']}, ) )
Returns the parent forum from which forums are marked as read.
def set_opengl_implementation(option): if option == 'software': QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL) if QQuickWindow is not None: QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software) elif option == 'desktop': QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL) if QQuickWindow is not None: QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL) elif option == 'gles': QCoreApplication.setAttribute(Qt.AA_UseOpenGLES) if QQuickWindow is not None: QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
Set the OpenGL implementation used by Spyder. See issue 7447 for the details.
def semimajor(P,M): if type(P) != Quantity: P = P*u.day if type(M) != Quantity: M = M*u.M_sun a = ((P/2/np.pi)**2*const.G*M)**(1./3) return a.to(u.AU)
P, M can be ``Quantity`` objects; otherwise default to day, M_sun
def load_from_args(args): if not args.reads: return None if args.read_source_name: read_source_names = util.expand( args.read_source_name, 'read_source_name', 'read source', len(args.reads)) else: read_source_names = util.drop_prefix(args.reads) filters = [] for (name, info) in READ_FILTERS.items(): value = getattr(args, name) if value is not None: filters.append(functools.partial(info[-1], value)) return [ load_bam(filename, name, filters) for (filename, name) in zip(args.reads, read_source_names) ]
Given parsed commandline arguments, returns a list of ReadSource objects
def find_connectable_ip(host, port=None): try: addrinfos = socket.getaddrinfo(host, None) except socket.gaierror: return None ip = None for family, _, _, _, sockaddr in addrinfos: connectable = True if port: connectable = is_connectable(port, sockaddr[0]) if connectable and family == socket.AF_INET: return sockaddr[0] if connectable and not ip and family == socket.AF_INET6: ip = sockaddr[0] return ip
Resolve a hostname to an IP, preferring IPv4 addresses. We prefer IPv4 so that we don't change behavior from previous IPv4-only implementations, and because some drivers (e.g., FirefoxDriver) do not support IPv6 connections. If the optional port number is provided, only IPs that listen on the given port are considered. :Args: - host - A hostname. - port - Optional port number. :Returns: A single IP address, as a string. If any IPv4 address is found, one is returned. Otherwise, if any IPv6 address is found, one is returned. If neither, then None is returned.
def set_value(self, var_name, value): if var_name in self.outside_name_map: var_name = self.outside_name_map[var_name] print('%s=%.5f' % (var_name, 1e9*value)) if var_name == 'Precipitation': value = 1e9*value species_idx = self.species_name_map[var_name] self.state[species_idx] = value
Set the value of a given variable to a given value. Parameters ---------- var_name : str The name of the variable in the model whose value should be set. value : float The value the variable should be set to
def set_scale_alpha_from_selection(self): selection = self.treeview_layers.get_selection() list_store, selected_iter = selection.get_selected() if selected_iter is None: self.adjustment_alpha.set_value(100) self.scale_alpha.set_sensitive(False) return else: surface_name, alpha = list_store[selected_iter] self.adjustment_alpha.set_value(alpha * 100) self.scale_alpha.set_sensitive(True)
Set scale marker to alpha for selected layer.
def observable_object_keys(instance): digits_re = re.compile(r"^\d+$") for key in instance['objects']: if not digits_re.match(key): yield JSONError("'%s' is not a good key value. Observable Objects " "should use non-negative integers for their keys." % key, instance['id'], 'observable-object-keys')
Ensure observable-objects keys are non-negative integers.