code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def request_uri(self): uri = self.path or '/' if self.query is not None: uri += '?' + self.query return uri
Absolute path including the query string.
def update_time_range(form_data): if 'since' in form_data or 'until' in form_data: form_data['time_range'] = '{} : {}'.format( form_data.pop('since', '') or '', form_data.pop('until', '') or '', )
Move since and until to time_range.
def get_base_branch(cherry_pick_branch): prefix, sha, base_branch = cherry_pick_branch.split("-", 2) if prefix != "backport": raise ValueError( 'branch name is not prefixed with "backport-". Is this a cherry_picker branch?' ) if not re.match("[0-9a-f]{7,40}", sha): raise ValueError(f"branch name has an invalid sha: {sha}") validate_sha(sha) version_from_branch(base_branch) return base_branch
return '2.7' from 'backport-sha-2.7' raises ValueError if the specified branch name is not of a form that cherry_picker would have created
def table_columns(self): with self.conn.cursor() as cur: cur.execute(self.TABLE_COLUMNS_QUERY % self.database) for row in cur: yield row
Yields column names.
def conn_aws(cred, crid): driver = get_driver(Provider.EC2) try: aws_obj = driver(cred['aws_access_key_id'], cred['aws_secret_access_key'], region=cred['aws_default_region']) except SSLError as e: abort_err("\r SSL Error with AWS: {}".format(e)) except InvalidCredsError as e: abort_err("\r Error with AWS Credentials: {}".format(e)) return {crid: aws_obj}
Establish connection to AWS service.
def _create_skt(self): log.debug('Creating the auth socket') if ':' in self.auth_address: self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: self.socket.bind((self.auth_address, self.auth_port)) except socket.error as msg: error_string = 'Unable to bind (auth) to port {} on {}: {}'.format(self.auth_port, self.auth_address, msg) log.error(error_string, exc_info=True) raise BindException(error_string)
Create the authentication socket.
def binary_float_to_decimal_float(number: Union[float, str]) -> float: if isinstance(number, str): if number[0] == '-': n_sign = -1 else: n_sign = 1 elif isinstance(number, float): n_sign = np.sign(number) number = str(number) deci = 0 for ndx, val in enumerate(number.split('.')[-1]): deci += float(val) / 2**(ndx+1) deci *= n_sign return deci
Convert binary floating point to decimal floating point. :param number: Binary floating point. :return: Decimal floating point representation of binary floating point.
def coerce(self, value, resource): return {r.name: self.registry.solve_resource(value, r) for r in resource.resources}
Get a dict with attributes from ``value``. Arguments --------- value : ? The value to get some resources from. resource : dataql.resources.Object The ``Object`` object used to obtain this value from the original one. Returns ------- dict A dictionary containing the wanted resources for the given value. Key are the ``name`` attributes of the resources, and the values are the solved values.
def get_details(self, ids): if isinstance(ids, list): if len(ids) > 5: ids = ids[:5] id_param = ';'.join(ids) + '/' else: ids = str(ids) id_param = ids + '/' header, content = self._http_request(id_param) resp = json.loads(content) if not self._is_http_response_ok(header): error = resp.get('error_message', 'Unknown Error') raise HttpException(header.status, header.reason, error) return resp
Locu Venue Details API Call Wrapper Args: list of ids : ids of a particular venues to get insights about. Can process up to 5 ids
def emotes(self, emotes): if emotes is None: self._emotes = [] return es = [] for estr in emotes.split('/'): es.append(Emote.from_str(estr)) self._emotes = es
Set the emotes :param emotes: the key of the emotes tag :type emotes: :class:`str` :returns: None :rtype: None :raises: None
def find_default_container(builder, default_container=None, use_biocontainers=None, ): if not default_container and use_biocontainers: default_container = get_container_from_software_requirements( use_biocontainers, builder) return default_container
Default finder for default containers.
def add_data_from_jsonp(self, data_src, data_name = 'json_data', series_type="map", name=None, **kwargs): self.jsonp_data_flag = True self.jsonp_data_url = json.dumps(data_src) if data_name == 'data': data_name = 'json_'+ data_name self.jsonp_data = data_name self.add_data_set(RawJavaScriptText(data_name), series_type, name=name, **kwargs)
add data directly from a https source the data_src is the https link for data using jsonp
def remove(self, observableElement): if observableElement in self._observables: self._observables.remove(observableElement)
remove an obsrvable element :param str observableElement: the name of the observable element
def create_job(self, job_template_uri): endpoint = self._build_url('jobs') data = self._query_api('POST', endpoint, None, {'Content-Type': 'application/json'}, json.dumps({'jobTemplateUri': job_template_uri})) return data['results']
Creates a job
def append(self, node): if node.parent == self.key and not self.elapsed_time: self.children.append(node) else: for child in self.children: if not child.elapsed_time: child.append(node)
To append a new child.
def _should_trigger_abbreviation(self, buffer): return any(self.__checkInput(buffer, abbr) for abbr in self.abbreviations)
Checks whether, based on the settings for the abbreviation and the given input, the abbreviation should trigger. @param buffer Input buffer to be checked (as string)
def is_option(value, *options): if not isinstance(value, string_types): raise VdtTypeError(value) if not value in options: raise VdtValueError(value) return value
This check matches the value to any of a set of options. >>> vtor = Validator() >>> vtor.check('option("yoda", "jedi")', 'yoda') 'yoda' >>> vtor.check('option("yoda", "jedi")', 'jed') # doctest: +SKIP Traceback (most recent call last): VdtValueError: the value "jed" is unacceptable. >>> vtor.check('option("yoda", "jedi")', 0) # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type.
def _logging_callback(level, domain, message, data): domain = ffi.string(domain).decode() message = ffi.string(message).decode() logger = LOGGER.getChild(domain) if level not in LOG_LEVELS: return logger.log(LOG_LEVELS[level], message)
Callback that outputs libgphoto2's logging message via Python's standard logging facilities. :param level: libgphoto2 logging level :param domain: component the message originates from :param message: logging message :param data: Other data in the logging record (unused)
def zoomset_cb(self, setting, value, channel): if not self.gui_up: return info = channel.extdata._info_info if info is None: return scale_x, scale_y = value if scale_x == scale_y: text = self.fv.scale2text(scale_x) else: textx = self.fv.scale2text(scale_x) texty = self.fv.scale2text(scale_y) text = "X: %s Y: %s" % (textx, texty) info.winfo.zoom.set_text(text)
This callback is called when the main window is zoomed.
def redirect_ext(to, params_=None, anchor_=None, permanent_=False, args=None, kwargs=None): if permanent_: redirect_class = HttpResponsePermanentRedirect else: redirect_class = HttpResponseRedirect return redirect_class(resolve_url_ext(to, params_, anchor_, args, kwargs))
Advanced redirect which can includes GET-parameters and anchor.
def _find_fld_pkt_val(self, pkt, val): fld = self._iterate_fields_cond(pkt, val, True) dflts_pkt = pkt.default_fields if val == dflts_pkt[self.name] and self.name not in pkt.fields: dflts_pkt[self.name] = fld.default val = fld.default return fld, val
Given a Packet instance `pkt` and the value `val` to be set, returns the Field subclass to be used, and the updated `val` if necessary.
def unregister_child(self, child): self._children.remove(child) child.on_closed.disconnect(self.unregister_child)
Unregister an existing child that is no longer to be owned by the current instance. :param child: The child instance.
def balance(self): while self.recNum > self.shpNum: self.null() while self.recNum < self.shpNum: self.record()
Adds corresponding empty attributes or null geometry records depending on which type of record was created to make sure all three files are in synch.
def _reset(self, **kwargs): super(Tag, self)._reset(**kwargs) self._api_name = self.name if 'server' in self.servers: self.servers = kwargs['servers']['server'] if self.servers and isinstance(self.servers[0], six.string_types): self.servers = [Server(uuid=server, populated=False) for server in self.servers]
Reset the objects attributes. Accepts servers as either unflattened or flattened UUID strings or Server objects.
def cleanup(context): for name in 'work_dir', 'artifact_dir', 'task_log_dir': path = context.config[name] if os.path.exists(path): log.debug("rm({})".format(path)) rm(path) makedirs(path)
Clean up the work_dir and artifact_dir between task runs, then recreate. Args: context (scriptworker.context.Context): the scriptworker context.
def InstallTemplatePackage(): virtualenv_bin = os.path.dirname(sys.executable) extension = os.path.splitext(sys.executable)[1] pip = "%s/pip%s" % (virtualenv_bin, extension) major_minor_version = ".".join( pkg_resources.get_distribution("grr-response-core").version.split(".") [0:2]) subprocess.check_call([ sys.executable, pip, "install", "--upgrade", "-f", "https://storage.googleapis.com/releases.grr-response.com/index.html", "grr-response-templates==%s.*" % major_minor_version ])
Call pip to install the templates.
def spec(self): from ambry_sources.sources import SourceSpec d = self.dict d['url'] = self.url return SourceSpec(**d)
Return a SourceSpec to describe this source
def figure(size=(8,8), *args, **kwargs): return plt.figure(figsize=size, *args, **kwargs)
Creates a figure. Parameters ---------- size : 2-tuple size of the view window in inches args : list args of mayavi figure kwargs : list keyword args of mayavi figure Returns ------- pyplot figure the current figure
def report_stderr(host, stderr): lines = stderr.readlines() if lines: print("STDERR from {host}:".format(host=host)) for line in lines: print(line.rstrip(), file=sys.stderr)
Take a stderr and print it's lines to output if lines are present. :param host: the host where the process is running :type host: str :param stderr: the std error of that process :type stderr: paramiko.channel.Channel
def lazy_result(f): @wraps(f) def decorated(ctx, param, value): return LocalProxy(lambda: f(ctx, param, value)) return decorated
Decorate function to return LazyProxy.
def get_network(self, org, segid): network_info = { 'organizationName': org, 'partitionName': self._part_name, 'segmentId': segid, } res = self._get_network(network_info) if res and res.status_code in self._resp_ok: return res.json()
Return given network from DCNM. :param org: name of organization. :param segid: segmentation id of the network.
def getFixedStarList(IDs, date): starList = [getFixedStar(ID, date) for ID in IDs] return FixedStarList(starList)
Returns a list of fixed stars.
def current_state(self): field_names = set() [field_names.add(f.name) for f in self._meta.local_fields] [field_names.add(f.attname) for f in self._meta.local_fields] return dict([(field_name, getattr(self, field_name)) for field_name in field_names])
Returns a ``field -> value`` dict of the current state of the instance.
def get_random_name(): char_seq = [] name_source = random.randint(1, 2**8-1) current_value = name_source while current_value > 0: char_offset = current_value % 26 current_value = current_value - random.randint(1, 26) char_seq.append(chr(char_offset + ord('a'))) name = ''.join(char_seq) assert re.match(VALID_PACKAGE_RE, name) return name
Return random lowercase name
def getAllKws(self): kws_ele = [] kws_bl = [] for ele in self.all_elements: if ele == '_prefixstr' or ele == '_epics': continue elif self.getElementType(ele).lower() == u'beamline': kws_bl.append(ele) else: kws_ele.append(ele) return tuple((kws_ele, kws_bl))
extract all keywords into two categories kws_ele: magnetic elements kws_bl: beamline elements return (kws_ele, kws_bl)
def is_delimiter(line): return bool(line) and line[0] in punctuation and line[0]*len(line) == line
True if a line consists only of a single punctuation character.
def get_cluster_interfaces(cluster, extra_cond=lambda nic: True): nics = get_nics(cluster) nics = [(nic['device'], nic['name']) for nic in nics if nic['mountable'] and nic['interface'] == 'Ethernet' and not nic['management'] and extra_cond(nic)] nics = sorted(nics) return nics
Get the network interfaces names corresponding to a criteria. Note that the cluster is passed (not the individual node names), thus it is assumed that all nodes in a cluster have the same interface names same configuration. In addition to ``extra_cond``, only the mountable and Ehernet interfaces are returned. Args: cluster(str): the cluster to consider extra_cond(lambda): boolean lambda that takes the nic(dict) as parameter
def lp10(self, subset_k, subset_p, weights={}): if self._z is None: self._add_minimization_vars() positive = set(subset_k) - self._flipped negative = set(subset_k) & self._flipped v = self._v.set(positive) cs = self._prob.add_linear_constraints(v >= self._epsilon) self._temp_constr.extend(cs) v = self._v.set(negative) cs = self._prob.add_linear_constraints(v <= -self._epsilon) self._temp_constr.extend(cs) self._prob.set_objective(self._z.expr( (rxnid, -weights.get(rxnid, 1)) for rxnid in subset_p)) self._solve()
Force reactions in K above epsilon while minimizing support of P. This program forces reactions in subset K to attain flux > epsilon while minimizing the sum of absolute flux values for reactions in subset P (L1-regularization).
def run(self, resources): if not resources['connection']._port.startswith('jlink'): raise ArgumentError("FlashBoardStep is currently only possible through jlink", invalid_port=args['port']) hwman = resources['connection'] debug = hwman.hwman.debug(self._debug_string) debug.flash(self._file)
Runs the flash step Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step.
def new_as_dict(self, raw=True, vars=None): result = {} for section in self.sections(): if section not in result: result[section] = {} for option in self.options(section): value = self.get(section, option, raw=raw, vars=vars) try: value = cherrypy.lib.reprconf.unrepr(value) except Exception: x = sys.exc_info()[1] msg = ("Config error in section: %r, option: %r, " "value: %r. Config values must be valid Python." % (section, option, value)) raise ValueError(msg, x.__class__.__name__, x.args) result[section][option] = value return result
Convert an INI file to a dictionary
def run_crbox(self,spstring,form,output="",wavecat="INDEF", lowave=0,hiwave=30000): range=hiwave-lowave midwave=range/2.0 iraf.countrate(spectrum=spstring, magnitude="", instrument="box(%f,%f)"%(midwave,range), form=form, wavecat=wavecat, output=output)
Calcspec has a bug. We will use countrate instead, and force it to use a box function of uniform transmission as the obsmode.
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): name = referred_cls.__name__.lower() + "_ref" return name
Overriding naming schemes.
def _init_alphabet_from_tokens(self, tokens): self._alphabet = {c for token in tokens for c in token} self._alphabet |= _ESCAPE_CHARS
Initialize alphabet from an iterable of token or subtoken strings.
def set_item(self, key, value): keys = list(self.keys()) if key in keys: self.set_value(1,keys.index(key),str(value)) else: self.set_value(0,len(self), str(key)) self.set_value(1,len(self)-1, str(value))
Sets the item by key, and refills the table sorted.
def list_attr(self, recursive=False): if recursive: raise DeprecationWarning("Symbol.list_attr with recursive=True has been deprecated. " "Please use attr_dict instead.") size = mx_uint() pairs = ctypes.POINTER(ctypes.c_char_p)() f_handle = _LIB.MXSymbolListAttrShallow check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs))) return {py_str(pairs[i * 2]): py_str(pairs[i * 2 + 1]) for i in range(size.value)}
Gets all attributes from the symbol. Example ------- >>> data = mx.sym.Variable('data', attr={'mood': 'angry'}) >>> data.list_attr() {'mood': 'angry'} Returns ------- ret : Dict of str to str A dictionary mapping attribute keys to values.
def clear( self ): self.blockSignals(True) self.setUpdatesEnabled(False) for child in self.findChildren(XRolloutItem): child.setParent(None) child.deleteLater() self.setUpdatesEnabled(True) self.blockSignals(False)
Clears out all of the rollout items from the widget.
def subscribe(self, sender=None, iface=None, signal=None, object=None, arg0=None, flags=0, signal_fired=None): callback = (lambda con, sender, object, iface, signal, params: signal_fired(sender, object, iface, signal, params.unpack())) if signal_fired is not None else lambda *args: None return Subscription(self.con, sender, iface, signal, object, arg0, flags, callback)
Subscribes to matching signals. Subscribes to signals on connection and invokes signal_fired callback whenever the signal is received. To receive signal_fired callback, you need an event loop. https://github.com/LEW21/pydbus/blob/master/doc/tutorial.rst#setting-up-an-event-loop Parameters ---------- sender : string, optional Sender name to match on (unique or well-known name) or None to listen from all senders. iface : string, optional Interface name to match on or None to match on all interfaces. signal : string, optional Signal name to match on or None to match on all signals. object : string, optional Object path to match on or None to match on all object paths. arg0 : string, optional Contents of first string argument to match on or None to match on all kinds of arguments. flags : SubscriptionFlags, optional signal_fired : callable, optional Invoked when there is a signal matching the requested data. Parameters: sender, object, iface, signal, params Returns ------- Subscription An object you can use as a context manager to unsubscribe from the signal later. See Also -------- See https://developer.gnome.org/gio/2.44/GDBusConnection.html#g-dbus-connection-signal-subscribe for more information.
def validateAllServers(self): url = self._url + "/servers/validate" params = {"f" : "json"} return self._get(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_ur)
This operation provides status information about a specific ArcGIS Server federated with Portal for ArcGIS. Parameters: serverId - unique id of the server
def create_xref(self): log.debug("Creating Crossreferences (XREF)") tic = time.time() for c in self._get_all_classes(): self._create_xref(c) log.info("End of creating cross references (XREF)") log.info("run time: {:0d}min {:02d}s".format(*divmod(int(time.time() - tic), 60)))
Create Class, Method, String and Field crossreferences for all classes in the Analysis. If you are using multiple DEX files, this function must be called when all DEX files are added. If you call the function after every DEX file, the crossreferences might be wrong!
def _get_binned_arrays(self, wavelengths, flux_unit, area=None, vegaspec=None): x = self._validate_binned_wavelengths(wavelengths) y = self.sample_binned(wavelengths=x, flux_unit=flux_unit, area=area, vegaspec=vegaspec) if isinstance(wavelengths, u.Quantity): w = x.to(wavelengths.unit, u.spectral()) else: w = x return w, y
Get binned observation in user units.
def rollback(self): self._tx_active = False return self._channel.rpc_request(specification.Tx.Rollback())
Abandon the current transaction. Rollback all messages published during the current transaction session to the remote server. Note that all messages published during this transaction session will be lost, and will have to be published again. A new transaction session starts as soon as the command has been executed. :return:
def has_in_url_path(url, subs): scheme, netloc, path, query, fragment = urlparse.urlsplit(url) return any([sub in path for sub in subs])
Test if any of `subs` strings is present in the `url` path.
async def finish_pairing(self, pin): self.srp.step1(pin) pub_key, proof = self.srp.step2(self._atv_pub_key, self._atv_salt) msg = messages.crypto_pairing({ tlv8.TLV_SEQ_NO: b'\x03', tlv8.TLV_PUBLIC_KEY: pub_key, tlv8.TLV_PROOF: proof}) resp = await self.protocol.send_and_receive( msg, generate_identifier=False) pairing_data = _get_pairing_data(resp) atv_proof = pairing_data[tlv8.TLV_PROOF] log_binary(_LOGGER, 'Device', Proof=atv_proof) encrypted_data = self.srp.step3() msg = messages.crypto_pairing({ tlv8.TLV_SEQ_NO: b'\x05', tlv8.TLV_ENCRYPTED_DATA: encrypted_data}) resp = await self.protocol.send_and_receive( msg, generate_identifier=False) pairing_data = _get_pairing_data(resp) encrypted_data = pairing_data[tlv8.TLV_ENCRYPTED_DATA] return self.srp.step4(encrypted_data)
Finish pairing process.
def extract_changesets(objects): def add_changeset_info(collation, axis, item): if axis not in collation: collation[axis] = {} first = collation[axis] first["id"] = axis first["username"] = item["username"] first["uid"] = item["uid"] first["timestamp"] = item["timestamp"] collation[axis] = first changeset_collation = {} for node in objects.nodes.values(): _collate_data(changeset_collation, node['changeset'], node['action']) add_changeset_info(changeset_collation, node['changeset'], node) for way in objects.ways.values(): _collate_data(changeset_collation, way['changeset'], way['action']) add_changeset_info(changeset_collation, way['changeset'], way) for relation in objects.relations.values(): _collate_data(changeset_collation, relation['changeset'], relation['action']) add_changeset_info(changeset_collation, relation['changeset'], relation) return changeset_collation
Provides information about each changeset present in an OpenStreetMap diff file. Parameters ---------- objects : osc_decoder class A class containing OpenStreetMap object dictionaries. Returns ------- changeset_collation : dict A dictionary of dictionaries with each changeset as a separate key, information about each changeset as attributes in that dictionary, and the actions performed in the changeset as keys.
def _verify_password(self, raw_password, hashed_password): PraetorianError.require_condition( self.pwd_ctx is not None, "Praetorian must be initialized before this method is available", ) return self.pwd_ctx.verify(raw_password, hashed_password)
Verifies that a plaintext password matches the hashed version of that password using the stored passlib password context
def _format_snapshots(snapshots: List[icontract._Snapshot], prefix: Optional[str] = None) -> List[str]: if not snapshots: return [] result = [] if prefix is not None: result.append(":{} OLD:".format(prefix)) else: result.append(":OLD:") for snapshot in snapshots: text = _capture_as_text(capture=snapshot.capture) result.append(" * :code:`.{}` = :code:`{}`".format(snapshot.name, text)) return result
Format snapshots as reST. :param snapshots: snapshots defined to capture the argument values of a function before the invocation :param prefix: prefix to be prepended to ``:OLD:`` directive :return: list of lines describing the snapshots
def serialize(self, data): return json.dumps(self._serialize_datetime(data), ensure_ascii=False)
Return the data as serialized string. :param dict data: The data to serialize :rtype: str
def Font(name=None, source="sys", italic=False, bold=False, size=20): assert source in ["sys", "file"] if not name: return pygame.font.SysFont(pygame.font.get_default_font(), size, bold=bold, italic=italic) if source == "sys": return pygame.font.SysFont(name, size, bold=bold, italic=italic) else: f = pygame.font.Font(name, size) f.set_italic(italic) f.set_bold(bold) return f
Unifies loading of fonts. :param name: name of system-font or filepath, if None is passed the default system-font is loaded :type name: str :param source: "sys" for system font, or "file" to load a file :type source: str
def load_model( self, the_metamodel, filename, is_main_model, encoding='utf-8', add_to_local_models=True): if not self.local_models.has_model(filename): if self.all_models.has_model(filename): new_model = self.all_models.filename_to_model[filename] else: new_model = the_metamodel.internal_model_from_file( filename, pre_ref_resolution_callback=lambda other_model: self.pre_ref_resolution_callback(other_model), is_main_model=is_main_model, encoding=encoding) self.all_models.filename_to_model[filename] = new_model if add_to_local_models: self.local_models.filename_to_model[filename] = new_model assert self.all_models.has_model(filename) return self.all_models.filename_to_model[filename]
load a single model Args: the_metamodel: the metamodel used to load the model filename: the model to be loaded (if not cached) Returns: the loaded/cached model
def _non_reducing_slice(slice_): kinds = (ABCSeries, np.ndarray, Index, list, str) if isinstance(slice_, kinds): slice_ = IndexSlice[:, slice_] def pred(part): return ((isinstance(part, slice) or is_list_like(part)) and not isinstance(part, tuple)) if not is_list_like(slice_): if not isinstance(slice_, slice): slice_ = [[slice_]] else: slice_ = [slice_] else: slice_ = [part if pred(part) else [part] for part in slice_] return tuple(slice_)
Ensurse that a slice doesn't reduce to a Series or Scalar. Any user-paseed `subset` should have this called on it to make sure we're always working with DataFrames.
def add_type(cls, typ): if not isinstance(typ, basestring): raise TypeError("The type should be a string. But is %s" % type(typ)) cls.types.append(typ)
Register a type for jb_reftrack nodes. A type specifies how the reference should be handled. For example the type shader will connect shaders with the parent when it the shaders are loaded. Default types are :data:`JB_ReftrackNode.types`. .. Note:: You have to add types before you initialize the plugin! :param typ: a new type specifier, e.g. \"asset\" :type typ: str :returns: None :rtype: None :raises: :class:`TypeError`
async def connect(self): if self.connected or self.is_connecting: return self._is_connecting = True try: logger.info("Connecting to RabbitMQ...") self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters) logger.info("Getting channel...") self._channel = await self._protocol.channel() if self._global_qos is not None: logger.info("Setting prefetch count on connection (%s)", self._global_qos) await self._channel.basic_qos(0, self._global_qos, 1) logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type) await self._channel.exchange(self._exchange_name, self._exchange_type) except (aioamqp.AmqpClosedConnection, Exception): logger.error("Error initializing RabbitMQ connection", exc_info=True) self._is_connecting = False raise exceptions.StreamConnectionError self._is_connecting = False
Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything.
def unsnip(tag=None,start=-1): import IPython i = IPython.get_ipython() if tag in _tagged_inputs.keys(): if len(_tagged_inputs[tag]) > 0: i.set_next_input(_tagged_inputs[tag][start]) else: if len(_last_inputs) > 0: i.set_next_input(_last_inputs[start])
This function retrieves a tagged or untagged snippet.
def is_reassignment_pending(self): in_progress_plan = self.zk.get_pending_plan() if in_progress_plan: in_progress_partitions = in_progress_plan['partitions'] self.log.info( 'Previous re-assignment in progress for {count} partitions.' ' Current partitions in re-assignment queue: {partitions}' .format( count=len(in_progress_partitions), partitions=in_progress_partitions, ) ) return True else: return False
Return True if there are reassignment tasks pending.
def _enum_from_direction(direction): if isinstance(direction, int): return direction if direction == Query.ASCENDING: return enums.StructuredQuery.Direction.ASCENDING elif direction == Query.DESCENDING: return enums.StructuredQuery.Direction.DESCENDING else: msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING) raise ValueError(msg)
Convert a string representation of a direction to an enum. Args: direction (str): A direction to order by. Must be one of :attr:`~.firestore.Query.ASCENDING` or :attr:`~.firestore.Query.DESCENDING`. Returns: int: The enum corresponding to ``direction``. Raises: ValueError: If ``direction`` is not a valid direction.
def _draw_outer_connector(context, width, height): c = context arrow_height = height / 2.5 gap = height / 6. connector_height = (height - gap) / 2. c.rel_move_to(-width / 2., -gap / 2.) c.rel_line_to(width, 0) c.rel_line_to(0, -(connector_height - arrow_height)) c.rel_line_to(-width / 2., -arrow_height) c.rel_line_to(-width / 2., arrow_height) c.close_path()
Draw the outer connector for container states Connector for container states can be connected from the inside and the outside. Thus the connector is split in two parts: A rectangle on the inside and an arrow on the outside. This method draws the outer arrow. :param context: Cairo context :param float port_size: The side length of the port
def _to_link_header(self, link): try: bucket, key, tag = link except ValueError: raise RiakError("Invalid link tuple %s" % link) tag = tag if tag is not None else bucket url = self.object_path(bucket, key) header = '<%s>; riaktag="%s"' % (url, tag) return header
Convert the link tuple to a link header string. Used internally.
def active_brokers(self): return { broker for broker in six.itervalues(self.brokers) if not broker.inactive and not broker.decommissioned }
Set of brokers that are not inactive or decommissioned.
def remove_message(self, message): if message in self.__messages: self.__messages.remove(message)
Remove a message from the batch
def build(self): if self._category_text_iter is None: raise CategoryTextIterNotSetError() nlp = self.get_nlp() category_document_iter = ( (category, self._clean_function(raw_text)) for category, raw_text in self._category_text_iter ) term_doc_matrix = self._build_from_category_spacy_doc_iter( ( (category, nlp(text)) for (category, text) in category_document_iter if text.strip() != '' ) ) return term_doc_matrix
Generate a TermDocMatrix from data in parameters. Returns ---------- term_doc_matrix : TermDocMatrix The object that this factory class builds.
def info(vm, info_type='all', key='uuid'): ret = {} if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']: ret['Error'] = 'Requested info_type is not available' return ret if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm cmd = 'vmadm info {uuid} {type}'.format( uuid=vm, type=info_type ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return salt.utils.json.loads(res['stdout'])
Lookup info on running kvm vm : string vm to be targeted info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc] info type to return key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc salt '*' vmadm.info nacl key=alias salt '*' vmadm.info nacl vnc key=alias
def require(*requirements, **kwargs): none_on_failure = kwargs.get('none_on_failure', False) def inner(f): @functools.wraps(f) def wrapper(*args, **kwargs): for req in requirements: if none_on_failure: if not getattr(req, 'is_available'): return None else: getattr(req, 'require')() return f(*args, **kwargs) return wrapper return inner
Decorator that can be used to require requirements. :param requirements: List of requirements that should be verified :param none_on_failure: If true, does not raise a PrerequisiteFailedError, but instead returns None
def get_config(self, view = None): path = self._path() + '/config' resp = self._get_resource_root().get(path, params = view and dict(view=view) or None) return self._parse_svc_config(resp, view)
Retrieve the service's configuration. Retrieves both the service configuration and role type configuration for each of the service's supported role types. The role type configurations are returned as a dictionary, whose keys are the role type name, and values are the respective configuration dictionaries. The 'summary' view contains strings as the dictionary values. The full view contains ApiConfig instances as the values. @param view: View to materialize ('full' or 'summary') @return: 2-tuple (service config dictionary, role type configurations)
def calibration_template(self): temp = {} temp['tone_doc'] = self.tone_calibrator.stimulus.templateDoc() comp_doc = [] for calstim in self.bs_calibrator.get_stims(): comp_doc.append(calstim.stateDict()) temp['noise_doc'] = comp_doc return temp
Gets the template documentation for the both the tone curve calibration and noise calibration :returns: dict -- all information necessary to recreate calibration objects
def startLoading(self): if super(XBatchItem, self).startLoading(): tree = self.treeWidget() if not isinstance(tree, XOrbTreeWidget): self.takeFromTree() return next_batch = self.batch() tree._loadBatch(self, next_batch)
Starts loading this item for the batch.
def add_grammar(self, customization_id, grammar_name, grammar_file, content_type, allow_overwrite=None, **kwargs): if customization_id is None: raise ValueError('customization_id must be provided') if grammar_name is None: raise ValueError('grammar_name must be provided') if grammar_file is None: raise ValueError('grammar_file must be provided') if content_type is None: raise ValueError('content_type must be provided') headers = {'Content-Type': content_type} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_grammar') headers.update(sdk_headers) params = {'allow_overwrite': allow_overwrite} data = grammar_file url = '/v1/customizations/{0}/grammars/{1}'.format( *self._encode_path_vars(customization_id, grammar_name)) response = self.request( method='POST', url=url, headers=headers, params=params, data=data, accept_json=True) return response
Add a grammar. Adds a single grammar file to a custom language model. Submit a plain text file in UTF-8 format that defines the grammar. Use multiple requests to submit multiple grammar files. You must use credentials for the instance of the service that owns a model to add a grammar to it. Adding a grammar does not affect the custom language model until you train the model for the new data by using the **Train a custom language model** method. The call returns an HTTP 201 response code if the grammar is valid. The service then asynchronously processes the contents of the grammar and automatically extracts new words that it finds. This can take a few seconds to complete depending on the size and complexity of the grammar, as well as the current load on the service. You cannot submit requests to add additional resources to the custom model or to train the model until the service's analysis of the grammar for the current request completes. Use the **Get a grammar** method to check the status of the analysis. The service populates the model's words resource with any word that is recognized by the grammar that is not found in the model's base vocabulary. These are referred to as out-of-vocabulary (OOV) words. You can use the **List custom words** method to examine the words resource and use other words-related methods to eliminate typos and modify how words are pronounced as needed. To add a grammar that has the same name as an existing grammar, set the `allow_overwrite` parameter to `true`; otherwise, the request fails. Overwriting an existing grammar causes the service to process the grammar file and extract OOV words anew. Before doing so, it removes any OOV words associated with the existing grammar from the model's words resource unless they were also added by another resource or they have been modified in some way with the **Add custom words** or **Add a custom word** method. The service limits the overall amount of data that you can add to a custom model to a maximum of 10 million total words from all sources combined. Also, you can add no more than 30 thousand OOV words to a model. This includes words that the service extracts from corpora and grammars and words that you add directly. **See also:** * [Working with grammars](https://cloud.ibm.com/docs/services/speech-to-text/) * [Add grammars to the custom language model](https://cloud.ibm.com/docs/services/speech-to-text/). :param str customization_id: The customization ID (GUID) of the custom language model that is to be used for the request. You must make the request with credentials for the instance of the service that owns the custom model. :param str grammar_name: The name of the new grammar for the custom language model. Use a localized name that matches the language of the custom model and reflects the contents of the grammar. * Include a maximum of 128 characters in the name. * Do not include spaces, slashes, or backslashes in the name. * Do not use the name of an existing grammar or corpus that is already defined for the custom model. * Do not use the name `user`, which is reserved by the service to denote custom words that are added or modified by the user. :param str grammar_file: A plain text file that contains the grammar in the format specified by the `Content-Type` header. Encode the file in UTF-8 (ASCII is a subset of UTF-8). Using any other encoding can lead to issues when compiling the grammar or to unexpected results in decoding. The service ignores an encoding that is specified in the header of the grammar. :param str content_type: The format (MIME type) of the grammar file: * `application/srgs` for Augmented Backus-Naur Form (ABNF), which uses a plain-text representation that is similar to traditional BNF grammars. * `application/srgs+xml` for XML Form, which uses XML elements to represent the grammar. :param bool allow_overwrite: If `true`, the specified grammar overwrites an existing grammar with the same name. If `false`, the request fails if a grammar with the same name already exists. The parameter has no effect if a grammar with the same name does not already exist. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def get_object_detail(self, request, obj): if self.display_detail_fields: display_fields = self.display_detail_fields else: display_fields = self.display_fields data = self.serialize(obj, ['id'] + list(display_fields)) return HttpResponse(self.json_dumps(data), content_type='application/json')
Handles get requests for the details of the given object.
def addSourceLocation(self, sourceLocationUri, weight): assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer" self.topicPage["sourceLocations"].append({"uri": sourceLocationUri, "wgt": weight})
add a list of relevant sources by identifying them by their geographic location @param sourceLocationUri: uri of the location where the sources should be geographically located @param weight: importance of the provided list of sources (typically in range 1 - 50)
def systemInformationType13(): a = L2PseudoLength(l2pLength=0x00) b = TpPd(pd=0x6) c = MessageType(mesType=0x0) d = Si13RestOctets() packet = a / b / c / d return packet
SYSTEM INFORMATION TYPE 13 Section 9.1.43a
def save_params(step_num, model, trainer, ckpt_dir): param_path = os.path.join(ckpt_dir, '%07d.params'%step_num) trainer_path = os.path.join(ckpt_dir, '%07d.states'%step_num) logging.info('[step %d] Saving checkpoints to %s, %s.', step_num, param_path, trainer_path) model.save_parameters(param_path) trainer.save_states(trainer_path)
Save the model parameter, marked by step_num.
def getXML(self): s = '' for element in self._svgElements: s += element.getXML() return s
Retrieves the pysvg elements that make up the turtles path and returns them as String in an xml representation.
def eccentricity(self, directed=None, weighted=None): sp = self.shortest_path(directed=directed, weighted=weighted) return sp.max(axis=0)
Maximum distance from each vertex to any other vertex.
def _MultiStream(cls, fds): missing_chunks_by_fd = {} for chunk_fd_pairs in collection.Batch( cls._GenerateChunkPaths(fds), cls.MULTI_STREAM_CHUNKS_READ_AHEAD): chunks_map = dict(chunk_fd_pairs) contents_map = {} for chunk_fd in FACTORY.MultiOpen( chunks_map, mode="r", token=fds[0].token): if isinstance(chunk_fd, AFF4Stream): fd = chunks_map[chunk_fd.urn] contents_map[chunk_fd.urn] = chunk_fd.read() for chunk_urn, fd in chunk_fd_pairs: if chunk_urn not in contents_map or not contents_map[chunk_urn]: missing_chunks_by_fd.setdefault(fd, []).append(chunk_urn) for chunk_urn, fd in chunk_fd_pairs: if fd in missing_chunks_by_fd: continue yield fd, contents_map[chunk_urn], None for fd, missing_chunks in iteritems(missing_chunks_by_fd): e = MissingChunksError( "%d missing chunks (multi-stream)." % len(missing_chunks), missing_chunks=missing_chunks) yield fd, None, e
Effectively streams data from multiple opened AFF4ImageBase objects. Args: fds: A list of opened AFF4Stream (or AFF4Stream descendants) objects. Yields: Tuples (chunk, fd, exception) where chunk is a binary blob of data and fd is an object from the fds argument. If one or more chunks are missing, exception will be a MissingChunksError while chunk will be None. _MultiStream does its best to skip the file entirely if one of its chunks is missing, but in case of very large files it's still possible to yield a truncated file.
def get_batch_header_values(self): lines = self.getOriginalFile().data.splitlines() reader = csv.reader(lines) batch_headers = batch_data = [] for row in reader: if not any(row): continue if row[0].strip().lower() == 'batch header': batch_headers = [x.strip() for x in row][1:] continue if row[0].strip().lower() == 'batch data': batch_data = [x.strip() for x in row][1:] break if not (batch_data or batch_headers): return None if not (batch_data and batch_headers): self.error("Missing batch headers or data") return None values = dict(zip(batch_headers, batch_data)) return values
Scrape the "Batch Header" values from the original input file
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): raise NotImplementedError
Fetches an object.
def load(path: str) -> "Store": if _gpg.is_encrypted(path): src_bytes = _gpg.decrypt(path) else: src_bytes = open(path, "rb").read() src = src_bytes.decode("utf-8") ext = _gpg.unencrypted_ext(path) assert ext not in [ ".yml", ".yaml", ], "YAML support was removed in version 0.12.0" entries = _parse_entries(src) return Store(path, entries)
Load password store from file.
def parse_spec(self): recruiters = [] spec = get_config().get("recruiters") for match in self.SPEC_RE.finditer(spec): name = match.group(1) count = int(match.group(2)) recruiters.append((name, count)) return recruiters
Parse the specification of how to recruit participants. Example: recruiters = bots: 5, mturk: 1
async def read_line(stream: asyncio.StreamReader) -> bytes: line = await stream.readline() if len(line) > MAX_LINE: raise ValueError("Line too long") if not line.endswith(b"\r\n"): raise ValueError("Line without CRLF") return line[:-2]
Read a single line from ``stream``. ``stream`` is an :class:`~asyncio.StreamReader`. Return :class:`bytes` without CRLF.
def mousePressEvent(self, event): self.parent.raise_() self.raise_() if event.button() == Qt.RightButton: pass
override Qt method
def gunzip(gzip_file, file_gunzip=None): if file_gunzip is None: file_gunzip = os.path.splitext(gzip_file)[0] gzip_open_to(gzip_file, file_gunzip) return file_gunzip
Unzip .gz file. Return filename of unzipped file.
def getKendallTauScore(myResponse, otherResponse): kt = 0 list1 = myResponse.values() list2 = otherResponse.values() if len(list1) <= 1: return kt for itr1 in range(0, len(list1) - 1): for itr2 in range(itr1 + 1, len(list2)): if ((list1[itr1] > list1[itr2] and list2[itr1] < list2[itr2]) or (list1[itr1] < list1[itr2] and list2[itr1] > list2[itr2])): kt += 1 kt = (kt * 2) / (len(list1) * (len(list1) - 1)) return kt
Returns the Kendall Tau Score
def return_standard_conf(): result = resource_string(__name__, 'daemon/dagobahd.yml') result = result % {'app_secret': os.urandom(24).encode('hex')} return result
Return the sample config file.
def get_response_example(self, resp_spec): if 'schema' in resp_spec.keys(): if '$ref' in resp_spec['schema']: definition_name = self.get_definition_name_from_ref(resp_spec['schema']['$ref']) return self.definitions_example[definition_name] elif 'items' in resp_spec['schema'] and resp_spec['schema']['type'] == 'array': if '$ref' in resp_spec['schema']['items']: definition_name = self.get_definition_name_from_ref(resp_spec['schema']['items']['$ref']) else: if 'type' in resp_spec['schema']['items']: definition_name = self.get_definition_name_from_ref(resp_spec['schema']['items']) return [definition_name] else: logging.warn("No item type in: " + resp_spec['schema']) return '' return [self.definitions_example[definition_name]] elif 'type' in resp_spec['schema']: return self.get_example_from_prop_spec(resp_spec['schema']) else: return ''
Get a response example from a response spec.
async def monitor_mode(self, poll_devices=False, device=None, workdir=None): print("Running monitor mode") await self.connect(poll_devices, device, workdir) self.plm.monitor_mode()
Place the IM in monitoring mode.
def getSignalParameters(fitParams, n_std=3): signal = getSignalPeak(fitParams) mx = signal[1] + n_std * signal[2] mn = signal[1] - n_std * signal[2] if mn < fitParams[0][1]: mn = fitParams[0][1] return mn, signal[1], mx
return minimum, average, maximum of the signal peak
def set_series_resistance(self, channel, value, resistor_index=None): if resistor_index is None: resistor_index = self.series_resistor_index(channel) try: if channel == 0: self.calibration.R_hv[resistor_index] = value else: self.calibration.R_fb[resistor_index] = value except: pass return self._set_series_resistance(channel, value)
Set the current series resistance value for the specified channel. Parameters ---------- channel : int Analog channel index. value : float Series resistance value. resistor_index : int, optional Series resistor channel index. If :data:`resistor_index` is not specified, the resistor-index from the current context _(i.e., the result of :attr:`series_resistor_index`)_ is used. Otherwise, the series-resistor is temporarily set to the value of :data:`resistor_index` to set the resistance before restoring back to the original value. See definition of :meth:`safe_series_resistor_index_read` decorator. Returns ------- int Return code from embedded call.
def join(self,timeout=None): if timeout is None: for thread in self.__threads: thread.join() else: deadline = _time() + timeout for thread in self.__threads: delay = deadline - _time() if delay <= 0: return False if not thread.join(delay): return False return True
Join all threads in this group. If the optional "timeout" argument is given, give up after that many seconds. This method returns True is the threads were successfully joined, False if a timeout occurred.
def get_subreddit_image(self, subreddit, id): url = self._base_url + "/3/gallery/r/{0}/{1}".format(subreddit, id) resp = self._send_request(url) return Gallery_image(resp, self)
Return the Gallery_image with the id submitted to subreddit gallery :param subreddit: The subreddit the image has been submitted to. :param id: The id of the image we want.
def public(self): req = self.request(self.mist_client.uri+'/keys/'+self.id+"/public") public = req.get().json() return public
Return the public ssh-key :returns: The public ssh-key as string
def main(): r = Random(42) startSerializationTime = time.time() for i in xrange(_SERIALIZATION_LOOPS): builderProto = RandomProto.new_message() r.write(builderProto) elapsedSerializationTime = time.time() - startSerializationTime builderBytes = builderProto.to_bytes() startDeserializationTime = time.time() deserializationCount = 0 while deserializationCount < _DESERIALIZATION_LOOPS: readerProto = RandomProto.from_bytes( builderBytes, traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS, nesting_limit=_NESTING_LIMIT) numReads = min(_DESERIALIZATION_LOOPS - deserializationCount, _MAX_DESERIALIZATION_LOOPS_PER_READER) for _ in xrange(numReads): r.read(readerProto) deserializationCount += numReads elapsedDeserializationTime = time.time() - startDeserializationTime print _SERIALIZATION_LOOPS, "Serialization loops in", \ elapsedSerializationTime, "seconds." print "\t", elapsedSerializationTime/_SERIALIZATION_LOOPS, "seconds per loop." print deserializationCount, "Deserialization loops in", \ elapsedDeserializationTime, "seconds." print "\t", elapsedDeserializationTime/deserializationCount, "seconds per loop."
Measure capnp serialization performance of Random