code
stringlengths
64
7.01k
docstring
stringlengths
2
15.8k
#vtb def output(self, pin, value): if pin < 0 or pin > 15: raise ValueError() self._output_pin(pin, value) self.mpsse_write_gpio()
Set the specified pin the provided high/low value. Value should be either HIGH/LOW or a boolean (true = high).
#vtb def group_and_sort_statements(stmt_list, ev_totals=None): def _count(stmt): if ev_totals is None: return len(stmt.evidence) else: return ev_totals[stmt.get_hash()] stmt_rows = defaultdict(list) stmt_counts = defaultdict(lambda: 0) arg_counts = defaultdict(lambda: 0) for key, s in _get_keyed_stmts(stmt_list): stmt_rows[key].append(s) stmt_counts[key] += _count(s) if key[0] == : subj = key[1] for obj in key[2] + key[3]: arg_counts[(subj, obj)] += _count(s) else: arg_counts[key[1:]] += _count(s) def process_rows(stmt_rows): for key, stmts in stmt_rows.items(): verb = key[0] inps = key[1:] sub_count = stmt_counts[key] arg_count = arg_counts[inps] if verb == and sub_count == arg_count and len(inps) <= 2: if all([len(set(ag.name for ag in s.agent_list())) > 2 for s in stmts]): continue new_key = (arg_count, inps, sub_count, verb) stmts = sorted(stmts, key=lambda s: _count(s) + 1/(1+len(s.agent_list())), reverse=True) yield new_key, verb, stmts sorted_groups = sorted(process_rows(stmt_rows), key=lambda tpl: tpl[0], reverse=True) return sorted_groups
Group statements by type and arguments, and sort by prevalence. Parameters ---------- stmt_list : list[Statement] A list of INDRA statements. ev_totals : dict{int: int} A dictionary, keyed by statement hash (shallow) with counts of total evidence as the values. Including this will allow statements to be better sorted. Returns ------- sorted_groups : list[tuple] A list of tuples containing a sort key, the statement type, and a list of statements, also sorted by evidence count, for that key and type. The sort key contains a count of statements with those argument, the arguments (normalized strings), the count of statements with those arguements and type, and then the statement type.
#vtb def great_circle_distance(self, other): distance_latitude = math.radians(abs(self.latitude - other.latitude)) distance_longitude = math.radians(abs(self.longitude - other.longitude)) a = math.sin(distance_latitude / 2) * math.sin(distance_latitude / 2) \ + math.cos(math.radians(self.latitude)) \ * math.cos(math.radians(other.latitude)) \ * math.sin(distance_longitude / 2) \ * math.sin(distance_longitude / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return GeoPoint.EARTH_RADIUS_METERS * c
Return the great-circle distance, in meters, from this geographic coordinates to the specified other point, i.e., the shortest distance over the earth’s surface, ‘as-the-crow-flies’ distance between the points, ignoring any natural elevations of the ground. Haversine formula:: R = earth’s radius (mean radius = 6,371km) Δlat = lat2 − lat1 Δlong = long2 − long1 a = sin²(Δlat / 2) + cos(lat1).cos(lat2).sin²(Δlong/2) c = 2.atan2(√a, √(1−a)) d = R.c @param other: a ``GeoPoint`` instance. @return: the great-circle distance, in meters, between this geographic coordinates to the specified other point.
#vtb def get_eval_metrics(logits, labels, params): metrics = { "accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels), "accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)( logits, labels), "accuracy_per_sequence": _convert_to_eval_metric( padded_sequence_accuracy)(logits, labels), "neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)( logits, labels, params.vocab_size), "approx_bleu_score": _convert_to_eval_metric(bleu_score)(logits, labels), "rouge_2_fscore": _convert_to_eval_metric(rouge_2_fscore)(logits, labels), "rouge_L_fscore": _convert_to_eval_metric(rouge_l_fscore)(logits, labels), } metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)} return metrics
Return dictionary of model evaluation metrics.
#vtb def enbw(data): r N = len(data) return N * np.sum(data**2) / np.sum(data)**2
r"""Computes the equivalent noise bandwidth .. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2} .. doctest:: >>> from spectrum import create_window, enbw >>> w = create_window(64, 'rectangular') >>> enbw(w) 1.0 The following table contains the ENBW values for some of the implemented windows in this module (with N=16384). They have been double checked against litterature (Source: [Harris]_, [Marple]_). If not present, it means that it has not been checked. =================== ============ ============= name ENBW litterature =================== ============ ============= rectangular 1. 1. triangle 1.3334 1.33 Hann 1.5001 1.5 Hamming 1.3629 1.36 blackman 1.7268 1.73 kaiser 1.7 blackmanharris,4 2.004 2. riesz 1.2000 1.2 riemann 1.32 1.3 parzen 1.917 1.92 tukey 0.25 1.102 1.1 bohman 1.7858 1.79 poisson 2 1.3130 1.3 hanningpoisson 0.5 1.609 1.61 cauchy 1.489 1.48 lanczos 1.3 =================== ============ =============
#vtb def spherical_histogram(data=None, radial_bins="numpy", theta_bins=16, phi_bins=16, transformed=False, *args, **kwargs): dropna = kwargs.pop("dropna", True) data = _prepare_data(data, transformed=transformed, klass=SphericalHistogram, dropna=dropna) if isinstance(theta_bins, int): theta_range = (0, np.pi) if "theta_range" in "kwargs": theta_range = kwargs["theta_range"] elif "range" in "kwargs": theta_range = kwargs["range"][1] theta_range = list(theta_range) + [theta_bins + 1] theta_bins = np.linspace(*theta_range) if isinstance(phi_bins, int): phi_range = (0, 2 * np.pi) if "phi_range" in "kwargs": phi_range = kwargs["phi_range"] elif "range" in "kwargs": phi_range = kwargs["range"][2] phi_range = list(phi_range) + [phi_bins + 1] phi_bins = np.linspace(*phi_range) bin_schemas = binnings.calculate_bins_nd(data, [radial_bins, theta_bins, phi_bins], *args, check_nan=not dropna, **kwargs) weights = kwargs.pop("weights", None) frequencies, errors2, missed = histogram_nd.calculate_frequencies(data, ndim=3, binnings=bin_schemas, weights=weights) return SphericalHistogram(binnings=bin_schemas, frequencies=frequencies, errors2=errors2, missed=missed)
Facade construction function for the SphericalHistogram.
#vtb def _convert_operator(self, node_name, op_name, attrs, inputs): if op_name in convert_map: op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self) else: raise NotImplementedError("Operator {} not implemented.".format(op_name)) if isinstance(op_name, string_types): new_op = getattr(symbol, op_name, None) if not new_op: raise RuntimeError("Unable to map op_name {} to sym".format(op_name)) if node_name is None: mxnet_sym = new_op(*inputs, **new_attrs) else: mxnet_sym = new_op(name=node_name, *inputs, **new_attrs) return mxnet_sym return op_name
Convert from onnx operator to mxnet operator. The converter must specify conversions explicitly for incompatible name, and apply handlers to operator attributes. Parameters ---------- :param node_name : str name of the node to be translated. :param op_name : str Operator name, such as Convolution, FullyConnected :param attrs : dict Dict of operator attributes :param inputs: list list of inputs to the operator Returns ------- :return mxnet_sym Converted mxnet symbol
#vtb def default_software_reset_type(self, reset_type): assert isinstance(reset_type, Target.ResetType) assert reset_type in (Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET, Target.ResetType.SW_EMULATED) self._default_software_reset_type = reset_type
! @brief Modify the default software reset method. @param self @param reset_type Must be one of the software reset types: Target.ResetType.SW_SYSRESETREQ, Target.ResetType.SW_VECTRESET, or Target.ResetType.SW_EMULATED.
#vtb def get_choices_for(self, field): choices = self._fields[field].choices if isinstance(choices, six.string_types): return [(d[], d[]) for d in self._choices_manager.get_all(choices)] else: return choices
Get the choices for the given fields. Args: field (str): Name of field. Returns: List of tuples. [(name, value),...]
#vtb def set_time(self, vfy_time): param = _lib.X509_VERIFY_PARAM_new() param = _ffi.gc(param, _lib.X509_VERIFY_PARAM_free) _lib.X509_VERIFY_PARAM_set_time(param, int(vfy_time.strftime())) _openssl_assert(_lib.X509_STORE_set1_param(self._store, param) != 0)
Set the time against which the certificates are verified. Normally the current time is used. .. note:: For example, you can determine if a certificate was valid at a given time. .. versionadded:: 17.0.0 :param datetime vfy_time: The verification time to set on this store. :return: ``None`` if the verification time was successfully set.
#vtb def default_username_algo(email): return smart_text(username)
Generate username for the Django user. :arg str/unicode email: the email address to use to generate a username :returns: str/unicode
#vtb def push_new_themes(catalog, portal_url, apikey): ckan_portal = RemoteCKAN(portal_url, apikey=apikey) existing_themes = ckan_portal.call_action() new_themes = [theme[] for theme in catalog[ ] if theme[] not in existing_themes] pushed_names = [] for new_theme in new_themes: name = push_theme_to_ckan( catalog, portal_url, apikey, identifier=new_theme) pushed_names.append(name) return pushed_names
Toma un catálogo y escribe los temas de la taxonomía que no están presentes. Args: catalog (DataJson): El catálogo de origen que contiene la taxonomía. portal_url (str): La URL del portal CKAN de destino. apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar los temas. Returns: str: Los ids de los temas creados.
#vtb def vecs_to_datmesh(x, y): x, y = meshgrid(x, y) out = zeros(x.shape + (2,), dtype=float) out[:, :, 0] = x out[:, :, 1] = y return out
Converts input arguments x and y to a 2d meshgrid, suitable for calling Means, Covariances and Realizations.
#vtb def get_nonoauth_parameters(self): return dict([(k, v) for k, v in self.items() if not k.startswith()])
Get any non-OAuth parameters.
#vtb def _row_to_str(self, row): _row_text = for col, width in self.col_widths.items(): _row_text += self.COLUMN_SEP l_pad, r_pad = self._split_int(width - len(row[col])) _row_text += .format( * (l_pad + self.PADDING), row[col], * (r_pad + self.PADDING)) _row_text += self.COLUMN_SEP + return _row_text
Converts a list of strings to a correctly spaced and formatted row string. e.g. ['some', 'foo', 'bar'] --> '| some | foo | bar |' :param row: list :return: str
#vtb def qos(self, prefetch_size=0, prefetch_count=0, is_global=False): args = Writer() args.write_long(prefetch_size).\ write_short(prefetch_count).\ write_bit(is_global) self.send_frame(MethodFrame(self.channel_id, 60, 10, args)) self.channel.add_synchronous_cb(self._recv_qos_ok)
Set QoS on this channel.
#vtb def kw_changelist_view(self, request: HttpRequest, extra_context=None, **kw): return self.changelist_view(request, extra_context)
Changelist view which allow key-value arguments. :param request: HttpRequest :param extra_context: Extra context dict :param kw: Key-value dict :return: See changelist_view()
#vtb def setCurrentProfile(self, prof): if prof is None: self.clearActive() return profile = None blocked = self.signalsBlocked() self.blockSignals(True) for act in self._profileGroup.actions(): if prof in (act.profile(), act.profile().name()): act.setChecked(True) profile = act.profile() else: act.setChecked(False) self.blockSignals(blocked) if profile == self._currentProfile and not self._viewWidget.isEmpty(): return self._currentProfile = profile if self._viewWidget and profile and not blocked: self._viewWidget.restoreProfile(profile) if not blocked: self.loadProfileFinished.emit(profile) self.currentProfileChanged.emit(profile)
Sets the current profile for this toolbar to the inputed profile. :param prof | <projexui.widgets.xviewwidget.XViewProfile> || <str>
#vtb def clear_annotation_data(self): self.genes = set() self.annotations = [] self.term_annotations = {} self.gene_annotations = {}
Clear annotation data. Parameters ---------- Returns ------- None
#vtb def fast_maxwell_boltzmann(mass, file_name=None, return_code=False): r code = "" code = "def maxwell_boltzmann(v, T):\n" code += code += " if hasattr(v, ):\n" code += " d = 1\n" code += " m = %s\n" % mass code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n" code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n" code += " return f\n" code += " elif hasattr(v, ):\n" code += " d = len(v)\n" code += " m = %s\n" % mass code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n" code += " vsquare = sum([v[i]**2 for i in range(d)])\n" code += " f = f * np.exp(-m*vsquare/2/k_B_num/T)\n" code += " return f\n" code += " else:\n" code += " d = 1\n" code += " m = %s\n" % mass code += " f = np.sqrt(m/2/np.pi/k_B_num/T)**d\n" code += " f = f * np.exp(-m*v**2/2/k_B_num/T)\n" code += " return f\n" if file_name is not None: f = file(file_name+".py", "w") f.write(code) f.close() maxwell_boltzmann = code if not return_code: exec maxwell_boltzmann return maxwell_boltzmann
r"""Return a function that returns values of a Maxwell-Boltzmann distribution. >>> from fast import Atom >>> mass = Atom("Rb", 87).mass >>> f = fast_maxwell_boltzmann(mass) >>> print f(0, 273.15+20) 0.00238221482739 >>> import numpy as np >>> v = np.linspace(-600, 600, 101) >>> dist = f(v, 273.15+20) >>> dv = v[1]-v[0] >>> print sum(dist)*dv 0.999704711134
#vtb def deserialize(self, xml_input, *args, **kwargs): return xmltodict.parse(xml_input, *args, **kwargs)
Convert XML to dict object
#vtb def cmd_join(self, connection, sender, target, payload): if payload: connection.join(payload) else: raise ValueError("No channel given")
Asks the bot to join a channel
#vtb def sqliteRowsToDicts(sqliteRows): return map(lambda r: dict(zip(r.keys(), r)), sqliteRows)
Unpacks sqlite rows as returned by fetchall into an array of simple dicts. :param sqliteRows: array of rows returned from fetchall DB call :return: array of dicts, keyed by the column names.
#vtb def make_strain_from_inj_object(self, inj, delta_t, detector_name, distance_scale=1): detector = Detector(detector_name) hp, hc = ringdown_td_approximants[inj[]]( inj, delta_t=delta_t, **self.extra_args) hp._epoch += inj[] hc._epoch += inj[] if distance_scale != 1: hp /= distance_scale hc /= distance_scale signal = detector.project_wave(hp, hc, inj[], inj[], inj[]) return signal
Make a h(t) strain time-series from an injection object as read from an hdf file. Parameters ----------- inj : injection object The injection object to turn into a strain h(t). delta_t : float Sample rate to make injection at. detector_name : string Name of the detector used for projecting injections. distance_scale: float, optional Factor to scale the distance of an injection with. The default (=1) is no scaling. Returns -------- signal : float h(t) corresponding to the injection.
#vtb def acoustic_similarity_directories(directories, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True): files = [] if call_back is not None: call_back() call_back(0, len(directories)) cur = 0 for d in directories: if not os.path.isdir(d): continue if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 3 == 0: call_back(cur) files += [os.path.join(d, x) for x in os.listdir(d) if x.lower().endswith()] if len(files) == 0: raise (ConchError("The directories specified do not contain any wav files")) if call_back is not None: call_back() call_back(0, len(files) * len(files)) cur = 0 path_mapping = list() for x in files: for y in files: if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 20 == 0: call_back(cur) if not x.lower().endswith(): continue if not y.lower().endswith(): continue if x == y: continue path_mapping.append((x, y)) result = acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check, call_back, multiprocessing) return result
Analyze many directories. Parameters ---------- directories : list of str List of fully specified paths to the directories to be analyzed
#vtb def clear(self): self.prop_dt_map = dict() self.prop_data = dict() self.rev_lookup = defaultdict(set)
convinience function to empty this fastrun container
#vtb def flush_all(self, conn): command = b response = yield from self._execute_simple_command( conn, command) if const.OK != response: raise ClientException(, response)
Its effect is to invalidate all existing items immediately
#vtb def spearmanr(x, y): from scipy import stats if not x or not y: return 0 corr, pvalue = stats.spearmanr(x, y) return corr
Michiel de Hoon's library (available in BioPython or standalone as PyCluster) returns Spearman rsb which does include a tie correction. >>> x = [5.05, 6.75, 3.21, 2.66] >>> y = [1.65, 26.5, -5.93, 7.96] >>> z = [1.65, 2.64, 2.64, 6.95] >>> round(spearmanr(x, y), 4) 0.4 >>> round(spearmanr(x, z), 4) -0.6325
#vtb def get_profiles(self): out = set(x.profile for x in self.requires if x.profile) out.update(x.profile for x in self.removes if x.profile) return out
Returns set of profile names referenced in this Feature :returns: set of profile names
#vtb def group_dashboard(request, group_slug): groups = get_user_groups(request.user) group = get_object_or_404(groups, slug=group_slug) tenants = get_user_tenants(request.user, group) can_edit_group = request.user.has_perm(, group) count = len(tenants) if count == 1: return redirect(tenants[0]) context = { : group, : tenants, : count, : can_edit_group, } return render(request, , context)
Dashboard for managing a TenantGroup.
#vtb def broadcast(self, event): try: if event.broadcasttype == "users": if len(self._users) > 0: self.log("Broadcasting to all users:", event.content, lvl=network) for useruuid in self._users.keys(): self.fireEvent( send(useruuid, event.content, sendtype="user")) elif event.broadcasttype == "clients": if len(self._clients) > 0: self.log("Broadcasting to all clients: ", event.content, lvl=network) for client in self._clients.values(): self.fireEvent(write(client.sock, event.content), "wsserver") elif event.broadcasttype == "socks": if len(self._sockets) > 0: self.log("Emergency?! Broadcasting to all sockets: ", event.content) for sock in self._sockets: self.fireEvent(write(sock, event.content), "wsserver") except Exception as e: self.log("Error during broadcast: ", e, type(e), lvl=critical)
Broadcasts an event either to all users or clients, depending on event flag
#vtb def BTC(cpu, dest, src): if dest.type == : value = dest.read() pos = src.read() % dest.size cpu.CF = value & (1 << pos) == 1 << pos dest.write(value ^ (1 << pos)) elif dest.type == : addr, pos = cpu._getMemoryBit(dest, src) base, size, ty = cpu.get_descriptor(cpu.DS) addr += base value = cpu.read_int(addr, 8) cpu.CF = value & (1 << pos) == 1 << pos value = value ^ (1 << pos) cpu.write_int(addr, value, 8) else: raise NotImplementedError(f"Unknown operand for BTC: {dest.type}")
Bit test and complement. Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset operand (second operand), stores the value of the bit in the CF flag, and complements the selected bit in the bit string. :param cpu: current CPU. :param dest: bit base operand. :param src: bit offset operand.
#vtb def get(self, date=datetime.date.today(), country=None): if not country: country = self.country if country == "all": raise ValueError("You need to specify a country") if not isinstance(date, str) and not isinstance(date, int): date = date.year cpi = self.data.get(country.upper(), {}).get(str(date)) if not cpi: raise ValueError("Missing CPI data for {} for {}".format( country, date)) return CPIResult(date=date, value=cpi)
Get the CPI value for a specific time. Defaults to today. This uses the closest method internally but sets limit to one day.
#vtb def check(f): if hasattr(f, ): return f else: @wraps(f) def decorated(*args, **kwargs): return check_conditions(f, args, kwargs) decorated.wrapped_fn = f return decorated
Wraps the function with a decorator that runs all of the pre/post conditions.
#vtb def upload(self, file_path, timeout=-1): return self._client.upload(file_path, timeout=timeout)
Upload an SPP ISO image file or a hotfix file to the appliance. The API supports upload of one hotfix at a time into the system. For the successful upload of a hotfix, ensure its original name and extension are not altered. Args: file_path: Full path to firmware. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Information about the updated firmware bundle.
#vtb def _load_wm_map(exclude_auto=None): exclude_auto = [] if not exclude_auto else exclude_auto path_here = os.path.dirname(os.path.abspath(__file__)) ontomap_file = os.path.join(path_here, ) mappings = {} def make_hume_prefix_map(): hume_ont = os.path.join(path_here, ) graph = rdflib.Graph() graph.parse(os.path.abspath(hume_ont), format=) entry_map = {} for node in graph.all_nodes(): entry = node.split()[1] if not in entry: entry_map[entry] = None continue parts = entry.split() prefix, real_entry = parts[0], .join(parts[1:]) entry_map[real_entry] = prefix return entry_map hume_prefix_map = make_hume_prefix_map() def add_hume_prefix(hume_entry): prefix = hume_prefix_map[hume_entry] return % (prefix, hume_entry) def map_entry(reader, entry): if reader == : namespace = entry = entry.replace(, ) entry_id = entry elif reader == : namespace = entry = entry.replace(, ) entry_id = add_hume_prefix(entry) elif reader == : namespace = parts = entry.split()[1:] parts = [.join([p.capitalize() for p in part.split()]) for part in parts] entry_id = .join(parts) else: return reader, entry return namespace, entry_id with open(ontomap_file, ) as fh: for line in fh.readlines(): s, se, t, te, score = line.strip().split() score = float(score) s, se = map_entry(s, se) t, te = map_entry(t, te) if (s, t) not in exclude_auto: if (s, se, t) in mappings: if mappings[(s, se, t)][1] < score: mappings[(s, se, t)] = ((t, te), score) else: mappings[(s, se, t)] = ((t, te), score) if (t, s) not in exclude_auto: if (t, te, s) in mappings: if mappings[(t, te, s)][1] < score: mappings[(t, te, s)] = ((s, se), score) else: mappings[(t, te, s)] = ((s, se), score) ontomap = [] for s, ts in mappings.items(): ontomap.append(((s[0], s[1]), ts[0], ts[1])) override_file = os.path.join(path_here, ) override_mappings = [] with open(override_file, ) as fh: for row in fh.readlines(): if not in row: continue _, te, _, se = row.strip().split() s = t = se = se.replace(, ) te = te.replace(, ) if se.startswith(): se = se[1:] override_mappings.append((s, se, t, te)) for s, se, t, te in override_mappings: found = False for idx, ((so, seo), (eo, teo), score) in enumerate(ontomap): if (s, se, t) == (so, seo, eo): ontomap[idx] = ((s, se), (t, te), 1.0) found = True if not found: ontomap.append(((s, se), (t, te), 1.0)) return ontomap
Load an ontology map for world models. exclude_auto : None or list[tuple] A list of ontology mappings for which automated mappings should be excluded, e.g. [(HUME, UN)] would result in not using mappings from HUME to UN.
#vtb def unregister_transform(self, node_class, transform, predicate=None): self.transforms[node_class].remove((transform, predicate))
Unregister the given transform.
#vtb def cwd_filt2(depth): full_cwd = os.getcwdu() cwd = full_cwd.replace(HOME,"~").split(os.sep) if in cwd and len(cwd) == depth+1: depth += 1 drivepart = if sys.platform == and len(cwd) > depth: drivepart = os.path.splitdrive(full_cwd)[0] out = drivepart + .join(cwd[-depth:]) return out or os.sep
Return the last depth elements of the current working directory. $HOME is always replaced with '~'. If depth==0, the full path is returned.
#vtb def cancel(self): if not self.id: raise TypeError(u"You cant been created yet.") self.refresh_change_key() self.service.send(soap_request.delete_event(self)) return None
Cancels an event in Exchange. :: event = service.calendar().get_event(id='KEY HERE') event.cancel() This will send notifications to anyone who has not declined the meeting.
#vtb def recovery(self, using=None, **kwargs): return self._get_connection(using).indices.recovery(index=self._name, **kwargs)
The indices recovery API provides insight into on-going shard recoveries for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.recovery`` unchanged.
#vtb def getLabelByName(self, name): name = name.lower() if name in self.stimLabels: return self.stimLabels[name] else: return None
Gets a label widget by it component name :param name: name of the AbstractStimulusComponent which this label is named after :type name: str :returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>`
#vtb def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, show_examples): example_call = .join(map(str.strip, example_call.split()[1:])) ret = docstrings.dedents( % (summary, full_name, identifier, example_call, doc_str)) if show_examples: ret += + cls._gen_examples(identifier) return ret
Generate the documentation docstring for a PlotMethod
#vtb def parse_name_altree(record): name_tuple = split_name(record.value) if name_tuple[1] == : name_tuple = (name_tuple[0], , name_tuple[2]) maiden = record.sub_tag_value("SURN") if maiden: ending = + maiden + surname = name_tuple[1] if surname.endswith(ending): surname = surname[:-len(ending)].rstrip() if surname == : surname = name_tuple = (name_tuple[0], surname, name_tuple[2], maiden) return name_tuple
Parse NAME structure assuming ALTREE dialect. In ALTREE dialect maiden name (if present) is saved as SURN sub-record and is also appended to family name in parens. Given name is saved in GIVN sub-record. Few examples: No maiden name: 1 NAME John /Smith/ 2 GIVN John With maiden name: 1 NAME Jane /Smith (Ivanova)/ 2 GIVN Jane 2 SURN Ivanova No maiden name 1 NAME Mers /Daimler (-Benz)/ 2 GIVN Mers Because family name can also contain parens it's not enough to parse family name and guess maiden name from it, we also have to check for SURN record. ALTREE also replaces empty names with question mark, we undo that too. :param record: NAME record :return: tuple with 3 or 4 elements, first three elements of tuple are the same as returned from :py:meth:`split_name` method, fourth element (if present) denotes maiden name.
#vtb def _coerce_json_to_collection(self, json_repr): if isinstance(json_repr, dict): collection = json_repr else: try: collection = anyjson.loads(json_repr) except: _LOG.warn() return None return collection
Use to ensure that a JSON string (if found) is parsed to the equivalent dict in python. If the incoming value is already parsed, do nothing. If a string fails to parse, return None.
#vtb def getDirectory(*args): result = QtGui.QFileDialog.getDirectory(*args) if type(result) is not tuple: return result, bool(result) else: return result
Normalizes the getDirectory method between the different Qt wrappers. :return (<str> filename, <bool> accepted)
#vtb def get_configuration_set_by_id(self, id): for cs in self.configuration_sets: if cs.id == id: return cs return None
Finds a configuration set in the component by its ID. @param id The ID of the configuration set to search for. @return The ConfigurationSet object for the set, or None if it was not found.
#vtb def onBatchRejected(self, ledger_id): if ledger_id == POOL_LEDGER_ID: if isinstance(self.poolManager, TxnPoolManager): self.get_req_handler(POOL_LEDGER_ID).onBatchRejected() elif self.get_req_handler(ledger_id): self.get_req_handler(ledger_id).onBatchRejected() else: logger.debug(.format(self, ledger_id)) self.audit_handler.post_batch_rejected(ledger_id) self.execute_hook(NodeHooks.POST_BATCH_REJECTED, ledger_id)
A batch of requests has been rejected, if stateRoot is None, reject the current batch. :param ledger_id: :param stateRoot: state root after the batch was created :return:
#vtb def getR(self, i=5, j=6): if self.refresh is True: self.getMatrix() return self.transM[i - 1, j - 1]
return transport matrix element, indexed by i, j, be default, return dispersion value, i.e. getR(5,6) in [m] :param i: row index, with initial index of 1 :param j: col indx, with initial index of 1 :return: transport matrix element
#vtb def dynamic_content_item_variant_delete(self, item_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/dynamic_content api_path = "/api/v2/dynamic_content/items/{item_id}/variants/{id}.json" api_path = api_path.format(item_id=item_id, id=id) return self.call(api_path, method="DELETE", **kwargs)
https://developer.zendesk.com/rest_api/docs/core/dynamic_content#delete-variant
#vtb def _read_linguas_from_files(env, linguas_files=None): import SCons.Util import SCons.Environment global _re_comment global _re_lang if not SCons.Util.is_List(linguas_files) \ and not SCons.Util.is_String(linguas_files) \ and not isinstance(linguas_files, SCons.Node.FS.Base) \ and linguas_files: linguas_files = [] if linguas_files is None: return [] fnodes = env.arg2nodes(linguas_files) linguas = [] for fnode in fnodes: contents = _re_comment.sub("", fnode.get_text_contents()) ls = [l for l in _re_lang.findall(contents) if l] linguas.extend(ls) return linguas
Parse `LINGUAS` file and return list of extracted languages
#vtb def winsorize(x, axis=0, limits=0.01): x = x.copy() if isinstance(x, pd.DataFrame): return x.apply(_winsorize_wrapper, axis=axis, args=(limits, )) else: return pd.Series(_winsorize_wrapper(x, limits).values, index=x.index)
`Winsorize <https://en.wikipedia.org/wiki/Winsorizing>`_ values based on limits
#vtb def AFF4Path(self, client_urn): if not self.HasField("pathtype"): raise ValueError("Can't determine AFF4 path without a valid pathtype.") first_component = self[0] dev = first_component.path if first_component.HasField("offset"): dev += ":{}".format(first_component.offset // 512) if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and self[1].pathtype == PathSpec.PathType.TSK): result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev] start = 1 else: result = [self.AFF4_PREFIXES[first_component.pathtype]] start = 0 for p in self[start]: component = p.path if p.HasField("offset"): component += ":{}".format(p.offset // 512) if p.HasField("stream_name"): component += ":" + p.stream_name result.append(component) return client_urn.Add("/".join(result))
Returns the AFF4 URN this pathspec will be stored under. Args: client_urn: A ClientURN. Returns: A urn that corresponds to this pathspec. Raises: ValueError: If pathspec is not of the correct type.
#vtb def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ret = {: name, : True, : , : {}} asg = __salt__[](name, region, key, keyid, profile) if asg is None: ret[] = False ret[] = elif asg: if __opts__[]: ret[] = ret[] = None if remove_lc: msg = .format(asg[]) ret[] = .join([ret[], msg]) return ret deleted = __salt__[](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__[](asg[], region, key, keyid, profile) if lc_deleted: if not in ret[]: ret[][] = {} ret[][][] = asg[] else: ret[] = False ret[] = .join([ret[], ]) ret[][] = asg ret[][] = None ret[] = else: ret[] = False ret[] = else: ret[] = return ret
Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
#vtb def getBody(self, url, method=, headers={}, data=None, socket=None): if not in headers: headers[] = [] return self.request(url, method, headers, data, socket)
Make an HTTP request and return the body
#vtb def requiv_contact_min(b, component, solve_for=None, **kwargs): hier = b.get_hierarchy() if not len(hier.get_value()): raise NotImplementedError("constraint for requiv_contact_min requires hierarchy") component_ps = _get_system_ps(b, component) parentorbit = hier.get_parent_of(component) parentorbit_ps = _get_system_ps(b, parentorbit) requiv_min = component_ps.get_parameter(qualifier=) q = parentorbit_ps.get_parameter(qualifier=) sma = parentorbit_ps.get_parameter(qualifier=) if solve_for in [None, requiv_min]: lhs = requiv_min rhs = roche_requiv_contact_L1(q, sma, hier.get_primary_or_secondary(component, return_ind=True)) else: raise NotImplementedError("requiv_contact_min can only be solved for requiv_min") return lhs, rhs, {: component}
Create a constraint to determine the critical (at L1) value of requiv at which a constact will underflow. This will only be used for contacts for requiv_min :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter str component: the label of the star in which this constraint should be built :parameter str solve_for: if 'requiv_max' should not be the derived/constrained parameter, provide which other parameter should be derived :returns: lhs (Parameter), rhs (ConstraintParameter), args (list of arguments that were passed to this function)
#vtb def authenticate(self): log.info("Authenticating to HP Cloud...") creds = self.creds access_key_id = creds.get(, ) secret_access_key = creds.get(, ) if access_key_id and secret_access_key: self.nova_client.client.os_access_key_id = access_key_id self.nova_client.client.os_secret_key = secret_access_key self.nova_client.authenticate()
Authenticate against the HP Cloud Identity Service. This is the first step in any hpcloud.com session, although this method is automatically called when accessing higher-level methods/attributes. **Examples of Credentials Configuration** - Bare minimum for authentication using HP API keys: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo - With multiple *compute* availability zones activated, the region must also be specified (due to current limitations in the OpenStack client libraries): .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 access_key_id: MZOFIE9S83FOS248FIE3 secret_access_key: EU859vjksor73gkY378f9gkslbkrabcxwfyW2loo region_name: az-1.region-a.geo-1 - Using ``username`` and ``password`` is also allowed, but discouraged: .. code-block:: yaml deployer_credentials: hpcloud: auth_url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ tenant_name: farley.mowat-tenant1 username: farley.mowat password: NeverCryW0lf When both API keys and ``username+password`` are specified, the API keys are used.
#vtb def frombed(args): from jcvi.formats.fasta import Fasta from jcvi.formats.bed import Bed from jcvi.utils.cbook import fill p = OptionParser(frombed.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, contigfasta, readfasta = args prefix = bedfile.rsplit(".", 1)[0] contigfile = prefix + ".contig" idsfile = prefix + ".ids" contigfasta = Fasta(contigfasta) readfasta = Fasta(readfasta) bed = Bed(bedfile) checksum = "00000000 checksum." fw_ids = open(idsfile, "w") fw = open(contigfile, "w") for ctg, reads in bed.sub_beds(): ctgseq = contigfasta[ctg] ctgline = " ctg, len(reads), len(ctgseq), checksum) print(ctg, file=fw_ids) print(ctgline, file=fw) print(fill(ctgseq.seq), file=fw) for b in reads: read = b.accn strand = b.strand readseq = readfasta[read] rc = " [RC]" if strand == "-" else "" readlen = len(readseq) rstart, rend = 1, readlen if strand == "-": rstart, rend = rend, rstart readrange = "{{{0} {1}}}".format(rstart, rend) conrange = "<{0} {1}>".format(b.start, b.end) readline = " read, rc, readlen, checksum, readrange, conrange) print(readline, file=fw) print(fill(readseq.seq), file=fw) logging.debug("Mapped contigs written to `{0}`.".format(contigfile)) logging.debug("Contig IDs written to `{0}`.".format(idsfile))
%prog frombed bedfile contigfasta readfasta Convert read placement to contig format. This is useful before running BAMBUS.
#vtb def is_instance_of(self, some_class): try: if not isinstance(self.val, some_class): if hasattr(self.val, ): t = self.val.__name__ elif hasattr(self.val, ): t = self.val.__class__.__name__ else: t = self._err( % (self.val, t, some_class.__name__)) except TypeError: raise TypeError() return self
Asserts that val is an instance of the given class.
#vtb def _set_autobw_threshold_table_summary(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=autobw_threshold_table_summary.autobw_threshold_table_summary, is_container=, presence=False, yang_name="autobw-threshold-table-summary", rest_name="autobw-threshold-table-summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=False) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__autobw_threshold_table_summary = t if hasattr(self, ): self._set()
Setter method for autobw_threshold_table_summary, mapped from YANG variable /mpls_state/autobw_threshold_table_summary (container) If this variable is read-only (config: false) in the source YANG file, then _set_autobw_threshold_table_summary is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autobw_threshold_table_summary() directly. YANG Description: MPLS Auto Bandwidth Threshold TableSummary
#vtb def get_token(self, hash): tokens_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN).snapshot() try: val = tokens_snapshot.get(hash.ToBytes()) if val: event = SmartContractEvent.FromByteArray(val) return event except Exception as e: logger.error("Smart contract event with contract hash %s not found: %s " % (hash.ToString(), e)) return None
Looks up a token by hash Args: hash (UInt160): The token to look up Returns: SmartContractEvent: A smart contract event with a contract that is an NEP5 Token
#vtb def _AddEvent(self, event): if hasattr(event, ): event_data_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT_DATA, event.event_data_row_identifier) lookup_key = event_data_identifier.CopyToString() event_data_identifier = self._event_data_identifier_mappings[lookup_key] event.SetEventDataIdentifier(event_data_identifier) self._storage_writer.AddEvent(event)
Adds an event. Args: event (EventObject): event.
#vtb async def traverse(self, func): async_executor = self if inspect.isasyncgenfunction(func): async for result in func(*async_executor.args): yield result else: yield await func(*async_executor.args)
Traverses an async function or generator, yielding each result. This function is private. The class should be used as an iterator instead of using this method.
#vtb def load_texture(self, texture_version): import numpy as np lowres_tex_template = % texture_version highres_tex_template = % texture_version from lace.mesh import Mesh from lace.cache import sc mesh_with_texture = Mesh(filename=sc(lowres_tex_template)) if not np.all(mesh_with_texture.f.shape == self.f.shape): mesh_with_texture = Mesh(filename=sc(highres_tex_template)) self.transfer_texture(mesh_with_texture)
Expect a texture version number as an integer, load the texture version from /is/ps/shared/data/body/template/texture_coordinates/. Currently there are versions [0, 1, 2, 3] availiable.
#vtb def list(args): jm = setup(args) jm.list(job_ids=get_ids(args.job_ids), print_array_jobs=args.print_array_jobs, print_dependencies=args.print_dependencies, status=args.status, long=args.long, print_times=args.print_times, ids_only=args.ids_only, names=args.names)
Lists the jobs in the given database.
#vtb def do_lzop_get(creds, url, path, decrypt, do_retry): assert url.endswith(), with files.DeleteOnError(path) as decomp_out: key = _uri_to_key(creds, url) with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: g = gevent.spawn(write_and_return_error, key, pl.stdin) exc = g.get() if exc is not None: raise exc logger.info( msg=, detail= .format(url=url, path=path)) return True
Get and decompress a URL This streams the content directly to lzop; the compressed version is never stored on disk.
#vtb def split_by(self, layer, sep=): if not self.is_tagged(layer): self.tag(layer) return self.split_given_spans(self.spans(layer), sep=sep)
Split the text into multiple instances defined by elements of given layer. The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans` method. Parameters ---------- layer: str String determining the layer that is used to define the start and end positions of resulting splits. sep: str (default: ' ') The separator to use to join texts of multilayer elements. Returns ------- list of Text
#vtb def locate_profile(profile=): from IPython.core.profiledir import ProfileDir, ProfileDirError try: pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile) except ProfileDirError: raise IOError("Couldn't find profile %r" % profile) return pd.location
Find the path to the folder associated with a given profile. I.e. find $IPYTHONDIR/profile_whatever.
#vtb def zone_helper(zone): if zone is None: return None elif isinstance(zone, Zone): return zone.href elif zone.startswith(): return zone return Zone.get_or_create(name=zone).href
Zone finder by name. If zone doesn't exist, create it and return the href :param str zone: name of zone (if href, will be returned as is) :return str href: href of zone
#vtb def stop_scan(self): try: self.bable.stop_scan(sync=True) except bable_interface.BaBLEException: pass self.scanning = False
Stop to scan.
#vtb def build_requirements(docs_path, package_name="yacms"): mezz_string = "yacms==" project_path = os.path.join(docs_path, "..") requirements_file = os.path.join(project_path, package_name, "project_template", "requirements.txt") with open(requirements_file, "r") as f: requirements = f.readlines() with open(requirements_file, "w") as f: f.write("yacms==%s\n" % __version__) for requirement in requirements: if requirement.strip() and not requirement.startswith(mezz_string): f.write(requirement)
Updates the requirements file with yacms's version number.
#vtb def forward(self, X): s = X[:-2] f = X[-2] w = X[-1] batch_size = len(f) x_idx = self._cuda( torch.as_tensor(np.arange(1, self.settings["lstm_dim"] + 1)).repeat( batch_size, 1 ) ) outputs = self._cuda(torch.Tensor([])) for i in range(len(s)): state_word = self.lstms[0].init_hidden(batch_size) output = self.lstms[0].forward(s[i][0], s[i][1], state_word) outputs = torch.cat((outputs, output), 1) feaures = torch.cat((x_idx, f), 1) weights = torch.cat((outputs, w), 1) return self.sparse_linear(feaures, weights)
Forward function. :param X: The input (batch) of the model contains word sequences for lstm, features and feature weights. :type X: For word sequences: a list of torch.Tensor pair (word sequence and word mask) of shape (batch_size, sequence_length). For features: torch.Tensor of shape (batch_size, sparse_feature_size). For feature weights: torch.Tensor of shape (batch_size, sparse_feature_size). :return: The output of LSTM layer. :rtype: torch.Tensor of shape (batch_size, num_classes)
#vtb def info(self): print("\n--- File Info ---") for key, val in self.file_header.items(): if key == : val = val.to_string(unit=u.hour, sep=) if key == : val = val.to_string(unit=u.deg, sep=) print("%16s : %32s" % (key, val)) print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file)) print("%16s : %32s" % ("File shape", self.file_shape)) print("--- Selection Info ---") print("%16s : %32s" % ("Data selection shape", self.selection_shape)) print("%16s : %32s" % ("Minimum freq (MHz)", self.container.f_start)) print("%16s : %32s" % ("Maximum freq (MHz)", self.container.f_stop))
Print header information and other derived information.
#vtb def get_page_of_iterator(iterator, page_size, page_number): try: page_number = validate_page_number(page_number) except (PageNotAnInteger, EmptyPage): page_number = 1 start = (page_number - 1) * page_size end = (page_number * page_size) + 1 skipped_items = list(islice(iterator, start)) items = list(islice(iterator, end)) if len(items) == 0 and page_number != 1: items = skipped_items page_number = 1 has_next = len(items) > page_size items = items[:page_size] return NoCountPage(items, page_number, page_size, has_next)
Get a page from an interator, handling invalid input from the page number by defaulting to the first page.
#vtb def mm_top1( n_items, data, initial_params=None, alpha=0.0, max_iter=10000, tol=1e-8): return _mm(n_items, data, initial_params, alpha, max_iter, tol, _mm_top1)
Compute the ML estimate of model parameters using the MM algorithm. This function computes the maximum-likelihood (ML) estimate of model parameters given top-1 data (see :ref:`data-top1`), using the minorization-maximization (MM) algorithm [Hun04]_, [CD12]_. If ``alpha > 0``, the function returns the maximum a-posteriori (MAP) estimate under a (peaked) Dirichlet prior. See :ref:`regularization` for details. Parameters ---------- n_items : int Number of distinct items. data : list of lists Top-1 data. initial_params : array_like, optional Parameters used to initialize the iterative procedure. alpha : float, optional Regularization parameter. max_iter : int, optional Maximum number of iterations allowed. tol : float, optional Maximum L1-norm of the difference between successive iterates to declare convergence. Returns ------- params : numpy.ndarray The ML estimate of model parameters.
#vtb def update_issue_remote_link_by_id(self, issue_key, link_id, url, title, global_id=None, relationship=None): data = {: {: url, : title}} if global_id: data[] = global_id if relationship: data[] = relationship url = .format(issue_key=issue_key, link_id=link_id) return self.put(url, data=data)
Update existing Remote Link on Issue :param issue_key: str :param link_id: str :param url: str :param title: str :param global_id: str, OPTIONAL: :param relationship: str, Optional. Default by built-in method: 'Web Link'
#vtb def update_reach_number_data(self): if not self.rapid_connect_file: log("Missing rapid_connect_file. " "Please set before running this function ...", "ERROR") if not self.riv_bas_id_file: log("Missing riv_bas_id_file. " "Please set before running this function ...", "ERROR") rapid_connect_table = np.loadtxt(self.rapid_connect_file, ndmin=2, delimiter=",", dtype=int) self.IS_riv_tot = int(rapid_connect_table.shape[0]) self.IS_max_up = int(rapid_connect_table[:, 2].max()) riv_bas_id_table = np.loadtxt(self.riv_bas_id_file, ndmin=1, delimiter=",", usecols=(0,), dtype=int) self.IS_riv_bas = int(riv_bas_id_table.size) if not self.for_tot_id_file: self.IS_for_tot = 0 log("Missing for_tot_id_file. Skipping ...", "WARNING") else: for_tot_id_table = np.loadtxt(self.for_tot_id_file, ndmin=1, delimiter=",", usecols=(0,), dtype=int) self.IS_for_tot = int(for_tot_id_table.size) if not self.for_use_id_file: self.IS_for_use = 0 log("Missing for_use_id_file. Skipping ...", "WARNING") else: for_use_id_table = np.loadtxt(self.for_use_id_file, ndmin=1, delimiter=",", usecols=(0,), dtype=int) self.IS_for_use = int(for_use_id_table.size)
Update the reach number data for the namelist based on input files. .. warning:: You need to make sure you set *rapid_connect_file* and *riv_bas_id_file* before running this function. Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_connect_file='../rapid-io/input/rapid_connect.csv', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', ) rapid_manager.update_reach_number_data() Example with forcing data: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( rapid_connect_file='../rapid-io/input/rapid_connect.csv', riv_bas_id_file='../rapid-io/input/riv_bas_id.csv', Qfor_file='../rapid-io/input/qfor_file.csv', for_tot_id_file='../rapid-io/input/for_tot_id_file.csv', for_use_id_file='../rapid-io/input/for_use_id_file.csv', ZS_dtF=3*60*60, BS_opt_for=True ) rapid_manager.update_reach_number_data()
#vtb def expand(self, url): url = self.clean_url(url) expand_url = f payload = { : getattr(self, , ), : getattr(self, , ), : getattr(self, , None), : self.api_key, : self.user_id, : url, } response = self._post(expand_url, data=payload) if not response.ok: raise BadAPIResponseException(response.content) try: data = response.json() except json.decoder.JSONDecodeError: raise BadAPIResponseException() if data.get(): errors = .join(i[] for i in data[]) raise ShorteningErrorException(errors) if not data.get(): raise BadAPIResponseException(response.content) return data[][0][]
Expand implementation for Adf.ly Args: url: the URL you want to expand Returns: A string containing the expanded URL Raises: BadAPIResponseException: If the data is malformed or we got a bad status code on API response ShorteningErrorException: If the API Returns an error as response
#vtb def get_anchor_point(self, anchor_name): if anchor_name in self._possible_anchors: return TikZNodeAnchor(self.handle, anchor_name) else: try: anchor = int(anchor_name.split()[1]) except: anchor = None if anchor is not None: return TikZNodeAnchor(self.handle, str(anchor)) raise ValueError(.format(anchor_name))
Return an anchor point of the node, if it exists.
#vtb def correlation(T, obs1, obs2=None, times=(1), maxtime=None, k=None, ncv=None, return_times=False): r T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind=) n = T.shape[0] obs1 = _types.ensure_ndarray(obs1, ndim=1, size=n, kind=) obs2 = _types.ensure_ndarray_or_None(obs2, ndim=1, size=n, kind=) times = _types.ensure_int_vector(times, require_order=True) if _issparse(T): return sparse.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k, ncv=ncv) else: return dense.fingerprints.correlation(T, obs1, obs2=obs2, times=times, k=k)
r"""Time-correlation for equilibrium experiment. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Transition matrix obs1 : (M,) ndarray Observable, represented as vector on state space obs2 : (M,) ndarray (optional) Second observable, for cross-correlations times : array-like of int (optional), default=(1) List of times (in tau) at which to compute correlation maxtime : int, optional, default=None Maximum time step to use. Equivalent to . Alternative to times. k : int (optional) Number of eigenvalues and eigenvectors to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- correlations : ndarray Correlation values at given times times : ndarray, optional time points at which the correlation was computed (if return_times=True) References ---------- .. [1] Noe, F, S Doose, I Daidone, M Loellmann, M Sauer, J D Chodera and J Smith. 2010. Dynamical fingerprints for probing individual relaxation processes in biomolecular dynamics with simulations and kinetic experiments. PNAS 108 (12): 4822-4827. Notes ----- **Auto-correlation** The auto-correlation of an observable :math:`a(x)` for a system in equilibrium is .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_x \mu(x) a(x, 0) a(x, t) :math:`a(x,0)=a(x)` is the observable at time :math:`t=0`. It can be propagated forward in time using the t-step transition matrix :math:`p^{t}(x, y)`. The propagated observable at time :math:`t` is :math:`a(x, t)=\sum_y p^t(x, y)a(y, 0)`. Using the eigenvlaues and eigenvectors of the transition matrix the autocorrelation can be written as .. math:: \mathbb{E}_{\mu}[a(x,0)a(x,t)]=\sum_i \lambda_i^t \langle a, r_i\rangle_{\mu} \langle l_i, a \rangle. **Cross-correlation** The cross-correlation of two observables :math:`a(x)`, :math:`b(x)` is similarly given .. math:: \mathbb{E}_{\mu}[a(x,0)b(x,t)]=\sum_x \mu(x) a(x, 0) b(x, t) Examples -------- >>> import numpy as np >>> from msmtools.analysis import correlation >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> a = np.array([1.0, 0.0, 0.0]) >>> times = np.array([1, 5, 10, 20]) >>> corr = correlation(T, a, times=times) >>> corr array([ 0.40909091, 0.34081364, 0.28585667, 0.23424263])
#vtb def setup(self, phase, entry_pressure=, pore_volume=, throat_volume=): r self.settings[] = phase.name if pore_volume: self.settings[] = pore_volume if throat_volume: self.settings[] = throat_volume if entry_pressure: self.settings[] = entry_pressure self[] = phase[self.settings[]] self[] = sp.argsort(self[], axis=0) self[] = 0 self[][self[]] = sp.arange(0, self.Nt) self[] = -1 self[] = -1 self._tcount = 0
r""" Set up the required parameters for the algorithm Parameters ---------- phase : OpenPNM Phase object The phase to be injected into the Network. The Phase must have the capillary entry pressure values for the system. entry_pressure : string The dictionary key to the capillary entry pressure. If none is supplied then the current value is retained. The default is 'throat.capillary_pressure'. pore_volume : string The dictionary key to the pore volume. If none is supplied then the current value is retained. The default is 'pore.volume'. throat_volume : string The dictionary key to the throat volume. If none is supplied then the current value is retained. The default is 'throat.volume'.
#vtb def add_metadata(self, metadata_matrix, meta_index_store): assert isinstance(meta_index_store, IndexStore) assert len(metadata_matrix.shape) == 2 assert metadata_matrix.shape[0] == self.get_num_docs() return self._make_new_term_doc_matrix(new_X=self._X, new_y=None, new_category_idx_store=None, new_y_mask=np.ones(self.get_num_docs()).astype(bool), new_mX=metadata_matrix, new_term_idx_store=self._term_idx_store, new_metadata_idx_store=meta_index_store)
Returns a new corpus with a the metadata matrix and index store integrated. :param metadata_matrix: scipy.sparse matrix (# docs, # metadata) :param meta_index_store: IndexStore of metadata values :return: TermDocMatrixWithoutCategories
#vtb def permission_set(self, name, func=None): if func is None: return functools.partial(self.predicate, name) self.permission_sets[name] = func return func
Define a new permission set (directly, or as a decorator). E.g.:: @authz.permission_set('HTTP') def is_http_perm(perm): return perm.startswith('http.')
#vtb def clean(self): errors = {} cleaned = {} for name, validator in self.validate_schema.items(): val = getattr(self, name, None) try: cleaned[name] = validator.to_python(val) except formencode.api.Invalid, err: errors[name] = err if errors: raise ValidationError(, errors) return cleaned
Cleans the data and throws ValidationError on failure
#vtb def folderitem(self, obj, item, index): obj = api.get_object(obj) uid = api.get_uid(obj) url = api.get_url(obj) title = api.get_title(obj) if self.show_categories_enabled(): category = obj.getCategoryTitle() if category not in self.categories: self.categories.append(category) item["category"] = category rr = self.referenceresults.get(uid, {}) item["Title"] = title item["replace"]["Title"] = get_link(url, value=title) item["allow_edit"] = self.get_editable_columns() item["required"] = self.get_required_columns() item["selected"] = rr and True or False item["result"] = rr.get("result", "") item["min"] = rr.get("min", "") item["max"] = rr.get("max", "") after_icons = "" if obj.getAccredited(): after_icons += get_image( "accredited.png", title=_("Accredited")) if obj.getAttachmentOption() == "r": after_icons += get_image( "attach_reqd.png", title=_("Attachment required")) if obj.getAttachmentOption() == "n": after_icons += get_image( "attach_no.png", title=_("Attachment not permitted")) if after_icons: item["after"]["Title"] = after_icons return item
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
#vtb def inplace_filter(func, sequence): target = 0 for source in xrange(len(sequence)): if func(sequence[source]): sequence[target] = sequence[source] target += 1 del sequence[target:]
Like Python's filter() builtin, but modifies the sequence in place. Example: >>> l = range(10) >>> inplace_filter(lambda x: x > 5, l) >>> l [6, 7, 8, 9] Performance considerations: the function iterates over the sequence, shuffling surviving members down and deleting whatever top part of the sequence is left empty at the end, so sequences whose surviving members are predominantly at the bottom will be processed faster.
#vtb def is_all_field_none(self): if self._BillingInvoice is not None: return False if self._DraftPayment is not None: return False if self._MasterCardAction is not None: return False if self._Payment is not None: return False if self._PaymentBatch is not None: return False if self._RequestResponse is not None: return False if self._ScheduleInstance is not None: return False if self._TabResultResponse is not None: return False if self._WhitelistResult is not None: return False return True
:rtype: bool
#vtb def biclique(self, xmin, xmax, ymin, ymax): Aside = sum((self.maximum_hline_bundle(y, xmin, xmax) for y in range(ymin, ymax + 1)), []) Bside = sum((self.maximum_vline_bundle(x, ymin, ymax) for x in range(xmin, xmax + 1)), []) return Aside, Bside
Compute a maximum-sized complete bipartite graph contained in the rectangle defined by ``xmin, xmax, ymin, ymax`` where each chain of qubits is either a vertical line or a horizontal line. INPUTS: xmin,xmax,ymin,ymax: integers defining the bounds of a rectangle where we look for unbroken chains. These ranges include both endpoints. OUTPUT: (A_side, B_side): a tuple of two lists containing lists of qubits. the lists found in ``A_side`` and ``B_side`` are chains of qubits. These lists of qubits are arranged so that >>> [zip(chain,chain[1:]) for chain in A_side] and >>> [zip(chain,chain[1:]) for chain in B_side] are lists of valid couplers.
#vtb def _make_cmap(colors, position=None, bit=False): bit_rgb = np.linspace(0,1,256) if position == None: position = np.linspace(0,1,len(colors)) else: if len(position) != len(colors): sys.exit("position length must be the same as colors") elif position[0] != 0 or position[-1] != 1: sys.exit("position must start with 0 and end with 1") palette = [(i, (float(r), float(g), float(b), float(a))) for i, (r, g, b, a) in enumerate(colors)] cmap = Colormap(*palette) return cmap
_make_cmap takes a list of tuples which contain RGB values. The RGB values may either be in 8-bit [0 to 255] (in which bit must be set to True when called) or arithmetic [0 to 1] (default). _make_cmap returns a cmap with equally spaced colors. Arrange your tuples so that the first color is the lowest value for the colorbar and the last is the highest. position contains values from 0 to 1 to dictate the location of each color.
#vtb def fromOPEndpointURL(cls, op_endpoint_url): service = cls() service.server_url = op_endpoint_url service.type_uris = [OPENID_IDP_2_0_TYPE] return service
Construct an OP-Identifier OpenIDServiceEndpoint object for a given OP Endpoint URL @param op_endpoint_url: The URL of the endpoint @rtype: OpenIDServiceEndpoint
#vtb def get_field_mappings(self, field): retdict = {} retdict[] = False retdict[] = False for (key, val) in iteritems(field): if key in self.mappings: if (key == and (val == "long" or val == "integer" or val == "double" or val == "float")): val = "number" retdict[key] = val if key == and val != "no": retdict[] = True if val == "analyzed": retdict[] = True return retdict
Converts ES field mappings to .kibana field mappings
#vtb def bind(self, server, net=None, address=None): if _debug: NetworkServiceAccessPoint._debug("bind %r net=%r address=%r", server, net, address) if net in self.adapters: raise RuntimeError("already bound") adapter = NetworkAdapter(self, net) self.adapters[net] = adapter if _debug: NetworkServiceAccessPoint._debug(" - adapters[%r]: %r", net, adapter) if address and not self.local_address: self.local_adapter = adapter self.local_address = address bind(adapter, server)
Create a network adapter object and bind.
#vtb def extract_ast_species(ast): species_id = "None" species_label = "None" species = [ (species_id, species_label) for (species_id, species_label) in ast.species if species_id ] if len(species) == 1: (species_id, species_label) = species[0] if not species_id: species_id = "None" species_label = "None" log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}") return (species_id, species_label)
Extract species from ast.species set of tuples (id, label)
#vtb def accepts(*argtypes, **kwargtypes): theseargtypes = [T.TypeFactory(a) for a in argtypes] thesekwargtypes = {k : T.TypeFactory(a) for k,a in kwargtypes.items()} def _decorator(func): f = func.__wrapped__ if hasattr(func, "__wrapped__") else func try: argtypes = inspect.getcallargs(f, *theseargtypes, **thesekwargtypes) argtypes = {k: v if issubclass(type(v), T.Type) else T.Constant(v) for k,v in argtypes.items()} except TypeError: raise E.ArgumentTypeError("Invalid argument specification to @accepts in %s" % func.__qualname__) kwargname = U.get_func_kwargs_name(func) if kwargname in argtypes.keys(): argtypes[kwargname] = T.KeywordArguments() posargname = U.get_func_posargs_name(func) if posargname in argtypes.keys(): argtypes[posargname] = T.PositionalArguments() if U.has_fun_prop(func, "argtypes"): raise ValueError("Cannot set argument types twice") U.set_fun_prop(func, "argtypes", argtypes) return _wrap(func) return _decorator
A function decorator to specify argument types of the function. Types may be specified either in the order that they appear in the function or via keyword arguments (just as if you were calling the function). Example usage: | @accepts(Positive0) | def square_root(x): | ...
#vtb def add_at(self, moment: float, fn_process: Callable, *args: Any, **kwargs: Any) -> : delay = moment - self.now() if delay < 0.0: raise ValueError( f"The given moment to start the process ({moment:f}) is in the past (now is {self.now():f})." ) return self.add_in(delay, fn_process, *args, **kwargs)
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note that times in the past when compared to the current moment on the simulated clock are forbidden. See method add() for more details.
#vtb def m2i(self, pkt, s): diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag, implicit_tag=self.implicit_tag, explicit_tag=self.explicit_tag, safe=self.flexible_tag) if diff_tag is not None: if self.implicit_tag is not None: self.implicit_tag = diff_tag elif self.explicit_tag is not None: self.explicit_tag = diff_tag codec = self.ASN1_tag.get_codec(pkt.ASN1_codec) if self.flexible_tag: return codec.safedec(s, context=self.context) else: return codec.dec(s, context=self.context)
The good thing about safedec is that it may still decode ASN1 even if there is a mismatch between the expected tag (self.ASN1_tag) and the actual tag; the decoded ASN1 object will simply be put into an ASN1_BADTAG object. However, safedec prevents the raising of exceptions needed for ASN1F_optional processing. Thus we use 'flexible_tag', which should be False with ASN1F_optional. Regarding other fields, we might need to know whether encoding went as expected or not. Noticeably, input methods from cert.py expect certain exceptions to be raised. Hence default flexible_tag is False.
#vtb def regex(pattern, flags: int = 0): def f(_, m): m.matches = [i for i in _.p.finditer(m.text or m.caption or "")] return bool(m.matches) return create("Regex", f, p=re.compile(pattern, flags))
Filter messages that match a given RegEx pattern. Args: pattern (``str``): The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches, all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_ are stored in the *matches* field of the :class:`Message <pyrogram.Message>` itself. flags (``int``, *optional*): RegEx flags.
#vtb def create_aggregator(self, subordinates): if not isinstance(subordinates, list): raise TypeError("subordinates can only be an instance of type list") for a in subordinates[:10]: if not isinstance(a, IEventSource): raise TypeError( "array can only contain objects of type IEventSource") result = self._call("createAggregator", in_p=[subordinates]) result = IEventSource(result) return result
Creates an aggregator event source, collecting events from multiple sources. This way a single listener can listen for events coming from multiple sources, using a single blocking :py:func:`get_event` on the returned aggregator. in subordinates of type :class:`IEventSource` Subordinate event source this one aggregates. return result of type :class:`IEventSource` Event source aggregating passed sources.
#vtb def write_json(json_obj, filename, mode="w", print_pretty=True): with open(filename, mode) as filey: if print_pretty: filey.writelines(print_json(json_obj)) else: filey.writelines(json.dumps(json_obj)) return filename
write_json will (optionally,pretty print) a json object to file Parameters ========== json_obj: the dict to print to json filename: the output file to write to pretty_print: if True, will use nicer formatting
#vtb def make_mujoco_env(env_id, seed, reward_scale=1.0): rank = MPI.COMM_WORLD.Get_rank() myseed = seed + 1000 * rank if seed is not None else None set_global_seeds(myseed) env = gym.make(env_id) logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank)) env = Monitor(env, logger_path, allow_early_resets=True) env.seed(seed) if reward_scale != 1.0: from baselines.common.retro_wrappers import RewardScaler env = RewardScaler(env, reward_scale) return env
Create a wrapped, monitored gym.Env for MuJoCo.
#vtb def is_finished(self): if self._total_time > self._global_time_limit: logger.warning("Exceeded global time limit {} / {}".format( self._total_time, self._global_time_limit)) return True trials_done = all(trial.is_finished() for trial in self._trials) return trials_done and self._search_alg.is_finished()
Returns whether all trials have finished running.