code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_downbeat_steps(self): if self.downbeat is None: return [] downbeat_steps = np.nonzero(self.downbeat)[0].tolist() return downbeat_steps
Return the indices of time steps that contain downbeats. Returns ------- downbeat_steps : list The indices of time steps that contain downbeats.
def parse_blast(blast_string): soup = BeautifulSoup(str(blast_string), "html.parser") all_blasts = list() all_blast_ids = list() pattern = '></a>....:' prog = re.compile(pattern) for item in soup.find_all('pre'): if len(item.find_all('a'))==1: all_blasts.append(item) blast_id = re.findall(pattern, str(item) )[0][-5:-1] all_blast_ids.append(blast_id) out = (all_blast_ids, all_blasts) return out
Clean up HTML BLAST results This function requires BeautifulSoup and the re module It goes throught the complicated output returned by the BLAST search and provides a list of matches, as well as the raw text file showing the alignments for each of the matches. This function works best with HTML formatted Inputs ------ get_blast() uses this function internally Parameters ---------- blast_string : str A complete webpage of standard BLAST results Returns ------- out : 2-tuple A tuple consisting of a list of PDB matches, and a list of their alignment text files (unformatted)
def get_bundle_by_id(self, bundle_id): if bundle_id == 0: return self with self.__bundles_lock: if bundle_id not in self.__bundles: raise BundleException("Invalid bundle ID {0}".format(bundle_id)) return self.__bundles[bundle_id]
Retrieves the bundle with the given ID :param bundle_id: ID of an installed bundle :return: The requested bundle :raise BundleException: The ID is invalid
def resetToPreviousLoc(self): self.rect.left = self.startDraggingX self.rect.top = self.startDraggingY
Resets the loc of the dragger to place where dragging started. This could be used in a test situation if the dragger was dragged to an incorrect location.
def check_imts(self, imts): for trt in self.values: for gsim in self.values[trt]: for attr in dir(gsim): coeffs = getattr(gsim, attr) if not isinstance(coeffs, CoeffsTable): continue for imt in imts: if imt.startswith('SA'): try: coeffs[from_string(imt)] except KeyError: raise ValueError( '%s is out of the period range defined ' 'for %s' % (imt, gsim))
Make sure the IMTs are recognized by all GSIMs in the logic tree
def the_one(cls): if cls.THE_ONE is None: cls.THE_ONE = cls(settings.HELP_TOKENS_INI_FILE) return cls.THE_ONE
Get the single global HelpUrlExpert object.
def _gregorian_to_ssweek(date_value): "Sundaystarting-week year, week and day for the given Gregorian calendar date" yearStart = _ssweek_year_start(date_value.year) weekNum = ((date_value - yearStart).days) // 7 + 1 dayOfWeek = date_value.weekday()+1 return (date_value.year, weekNum, dayOfWeek)
Sundaystarting-week year, week and day for the given Gregorian calendar date
def post_authenticate(self): goldman.sess.login = self now = dt.now() if not self.login_date: self.login_date = now else: sec_since_updated = (now - self.login_date).seconds min_since_updated = sec_since_updated / 60 if min_since_updated > 15: self.login_date = now if self.dirty: store = goldman.sess.store store.update(self)
Update the login_date timestamp Initialize the thread local sess.login property with the authenticated login model. The login_date update will be debounced so writes don't occur on every hit of the the API. If the login_date was modified within 15 minutes then don't update it.
def format_style(number: int) -> str: if str(number) not in _stylenums: raise InvalidStyle(number) return codeformat(number)
Return an escape code for a style, by number. This handles invalid style numbers.
def generate_output_path(args, project_path): milisec = datetime.now().microsecond dirname = 'results_{}_{}'.format(time.strftime('%Y.%m.%d_%H.%M.%S', time.localtime()), str(milisec)) return os.path.join(project_path, 'results', dirname)
Generate default output directory
def ec_construct_private(num): pub_ecpn = ec.EllipticCurvePublicNumbers(num['x'], num['y'], NIST2SEC[as_unicode(num['crv'])]()) priv_ecpn = ec.EllipticCurvePrivateNumbers(num['d'], pub_ecpn) return priv_ecpn.private_key(default_backend())
Given a set of values on public and private attributes build a elliptic curve private key instance. :param num: A dictionary with public and private attributes and their values :return: A cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey instance.
def _pickle_batch(self): payload = pickle.dumps(self.batch) header = struct.pack("!L", len(payload)) message = header + payload return message
Pickle the metrics into a form that can be understood by the graphite pickle connector.
def on_message(self, handler, msg): if self.remote_debugging: for h in self.handlers: if h != handler: h.write_message(msg, True) else: print(msg)
In remote debugging mode this simply acts as a forwarding proxy for the two clients.
def update_many(cls, filter, update, upsert=False): return cls.collection.update_many(filter, update, upsert).raw_result
Updates all documents that pass the filter with the update value Will upsert a new document if upsert=True and no document is filtered
async def _register(self): if self.registered: return self._registration_attempts += 1 self.connection.throttle = False if self.password: await self.rawmsg('PASS', self.password) await self.set_nickname(self._attempt_nicknames.pop(0)) await self.rawmsg('USER', self.username, '0', '*', self.realname)
Perform IRC connection registration.
def DropConnection(self, connection): try: connection.cursor.close() except MySQLdb.Error: pass try: connection.dbh.close() except MySQLdb.Error: pass
Attempt to cleanly drop the connection.
def check_conditions(f, args, kwargs): member_function = is_member_function(f) check_preconditions(f, args, kwargs) base_classes = [] if member_function: base_classes = inspect.getmro(type(args[0]))[1:-1] for clz in base_classes: super_fn = getattr(clz, f.func_name, None) check_preconditions(super_fn, args, kwargs) return_value = f(*args, **kwargs) check_postconditions(f, return_value) if member_function: for clz in base_classes: super_fn = getattr(clz, f.func_name, None) check_postconditions(super_fn, return_value) return return_value
This is what runs all of the conditions attached to a method, along with the conditions on the superclasses.
def shutdown(self): if self.lifecycle.is_live: self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTTING_DOWN) self.near_cache_manager.destroy_all_near_caches() self.statistics.shutdown() self.partition_service.shutdown() self.heartbeat.shutdown() self.cluster.shutdown() self.reactor.shutdown() self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTDOWN) self.logger.info("Client shutdown.", extra=self._logger_extras)
Shuts down this HazelcastClient.
def size(self): if self.children: return sum( ( c.size() for c in self.children.values() ) ) + 1 else: return 1
Size is number of nodes under the trie, including the current node
def rewrite_references_json(json_content, rewrite_json): for ref in json_content: if ref.get("id") and ref.get("id") in rewrite_json: for key, value in iteritems(rewrite_json.get(ref.get("id"))): ref[key] = value return json_content
general purpose references json rewriting by matching the id value
def correct_rytov_output(radius, sphere_index, medium_index, radius_sampling): r params = get_params(radius_sampling) x = sphere_index / medium_index - 1 radius_sc = radius * (params["ra"] * x**2 + params["rb"] * x + params["rc"]) sphere_index_sc = sphere_index + medium_index * (params["na"] * x**2 + params["nb"] * x) return radius_sc, sphere_index_sc
r"""Error-correction of refractive index and radius for Rytov This method corrects the fitting results for `radius` :math:`r_\text{Ryt}` and `sphere_index` :math:`n_\text{Ryt}` obtained using :func:`qpsphere.models.rytov` using the approach described in :cite:`Mueller2018` (eqns. 3,4, and 5). .. math:: n_\text{Ryt-SC} &= n_\text{Ryt} + n_\text{med} \cdot \left( a_n x^2 + b_n x + c_n \right) r_\text{Ryt-SC} &= r_\text{Ryt} \cdot \left( a_r x^2 +b_r x + c_r \right) &\text{with} x = \frac{n_\text{Ryt}}{n_\text{med}} - 1 The correction factors are given in :data:`qpsphere.models.mod_rytov_sc.RSC_PARAMS`. Parameters ---------- radius: float Fitted radius of the sphere :math:`r_\text{Ryt}` [m] sphere_index: float Fitted refractive index of the sphere :math:`n_\text{Ryt}` medium_index: float Refractive index of the surrounding medium :math:`n_\text{med}` radius_sampling: int Number of pixels used to sample the sphere radius when computing the Rytov field. Returns ------- radius_sc: float Systematically corrected radius of the sphere :math:`r_\text{Ryt-SC}` [m] sphere_index_sc: float Systematically corrected refractive index of the sphere :math:`n_\text{Ryt-SC}` See Also -------- correct_rytov_sc_input: the inverse of this method
def complete_worker(self, text, line, begidx, endidx): return [i for i in PsiturkNetworkShell.worker_commands if \ i.startswith(text)]
Tab-complete worker command.
def can_add_post(self, topic, user): can_add_post = self._perform_basic_permission_check( topic.forum, user, 'can_reply_to_topics', ) can_add_post &= ( not topic.is_locked or self._perform_basic_permission_check(topic.forum, user, 'can_reply_to_locked_topics') ) return can_add_post
Given a topic, checks whether the user can append posts to it.
def CMS(data, format="PEM"): bio = Membio(data) if format == "PEM": ptr = libcrypto.PEM_read_bio_CMS(bio.bio, None, None, None) else: ptr = libcrypto.d2i_CMS_bio(bio.bio, None) if ptr is None: raise CMSError("Error parsing CMS data") typeoid = Oid(libcrypto.OBJ_obj2nid(libcrypto.CMS_get0_type(ptr))) if typeoid.shortname() == "pkcs7-signedData": return SignedData(ptr) elif typeoid.shortname() == "pkcs7-envelopedData": return EnvelopedData(ptr) elif typeoid.shortname() == "pkcs7-encryptedData": return EncryptedData(ptr) else: raise NotImplementedError("cannot handle "+typeoid.shortname())
Factory function to create CMS objects from received messages. Parses CMS data and returns either SignedData or EnvelopedData object. format argument can be either "PEM" or "DER". It determines object type from the contents of received CMS structure.
def get_index_url(self, resource=None, **kwargs): default_kwargs = self.default_kwargs_for_urls() \ if resource == self.get_resource_name() else {} default_kwargs.update(kwargs) return self.get_full_url( self.app.reverse( '{}_index'.format(resource or self.get_resource_name()), **default_kwargs ) )
Builds the url of the resource's index. :param resource: name of the resource or None :param kwargs: additional keyword arguments to build the url :return: url of the resource's index
def _exit(self, status_code): exit_func = os._exit if threading.active_count() > 1 else sys.exit exit_func(status_code)
Properly kill Python process including zombie threads.
def member_profile_view(request, targetUsername): if targetUsername == request.user.username and targetUsername != ANONYMOUS_USERNAME: return HttpResponseRedirect(reverse('my_profile')) page_name = "{0}'s Profile".format(targetUsername) targetUser = get_object_or_404(User, username=targetUsername) targetProfile = get_object_or_404(UserProfile, user=targetUser) number_of_threads = Thread.objects.filter(owner=targetProfile).count() number_of_messages = Message.objects.filter(owner=targetProfile).count() number_of_requests = Request.objects.filter(owner=targetProfile).count() rooms = Room.objects.filter(current_residents=targetProfile) prev_rooms = PreviousResident.objects.filter(resident=targetProfile) return render_to_response('member_profile.html', { 'page_name': page_name, 'targetUser': targetUser, 'targetProfile': targetProfile, 'number_of_threads': number_of_threads, 'number_of_messages': number_of_messages, 'number_of_requests': number_of_requests, "rooms": rooms, "prev_rooms": prev_rooms, }, context_instance=RequestContext(request))
View a member's Profile.
def activate_next(self, _previous=False): current = self.get_current_value() options = sorted(self.values.keys()) try: index = options.index(current) except ValueError: index = 0 if _previous: index -= 1 else: index += 1 next_option = options[index % len(options)] self.values[next_option]()
Activate next value.
def getfile2(url, auth=None, outdir=None): import requests print("Retrieving: %s" % url) fn = os.path.split(url)[-1] if outdir is not None: fn = os.path.join(outdir, fn) if auth is not None: r = requests.get(url, stream=True, auth=auth) else: r = requests.get(url, stream=True) chunk_size = 1000000 with open(fn, 'wb') as fd: for chunk in r.iter_content(chunk_size): fd.write(chunk)
Function to fetch files using requests Works with https authentication
def GetCacheValueByObject(self, vfs_object): for identifier, cache_value in iter(self._values.items()): if not cache_value: raise RuntimeError('Missing cache value.') if cache_value.vfs_object == vfs_object: return identifier, cache_value return None, None
Retrieves the cache value for the cached object. Args: vfs_object (object): VFS object that was cached. Returns: tuple[str, ObjectsCacheValue]: identifier and cache value object or (None, None) if not cached. Raises: RuntimeError: if the cache value is missing.
def session_demo_danger_callback(da_children, session_state=None, **kwargs): 'Update output based just on state' if not session_state: return "Session state not yet available" return "Session state contains: " + str(session_state.get('bootstrap_demo_state', "NOTHING")) + " and the page render count is " + str(session_state.get("ind_use", "NOT SET"))
Update output based just on state
def import_participant_element(diagram_graph, participants_dictionary, participant_element): participant_id = participant_element.getAttribute(consts.Consts.id) name = participant_element.getAttribute(consts.Consts.name) process_ref = participant_element.getAttribute(consts.Consts.process_ref) if participant_element.getAttribute(consts.Consts.process_ref) == '': diagram_graph.add_node(participant_id) diagram_graph.node[participant_id][consts.Consts.type] = consts.Consts.participant diagram_graph.node[participant_id][consts.Consts.process] = participant_id participants_dictionary[participant_id] = {consts.Consts.name: name, consts.Consts.process_ref: process_ref}
Adds 'participant' element to the collaboration dictionary. :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param participants_dictionary: dictionary with participant element attributes. Key is participant ID, value is a dictionary of participant attributes, :param participant_element: object representing a BPMN XML 'participant' element.
def _build_meta(text: str, title: str) -> DocstringMeta: meta = _sections[title] if meta == "returns" and ":" not in text.split()[0]: return DocstringMeta([meta], description=text) before, desc = text.split(":", 1) if desc: desc = desc[1:] if desc[0] == " " else desc if "\n" in desc: first_line, rest = desc.split("\n", 1) desc = first_line + "\n" + inspect.cleandoc(rest) desc = desc.strip("\n") m = re.match(r"(\S+) \((\S+)\)$", before) if meta == "param" and m: arg_name, type_name = m.group(1, 2) args = [meta, type_name, arg_name] else: args = [meta, before] return DocstringMeta(args, description=desc)
Build docstring element. :param text: docstring element text :param title: title of section containing element :return:
def _set_italian_leading_zeros_for_phone_number(national_number, numobj): if len(national_number) > 1 and national_number[0] == U_ZERO: numobj.italian_leading_zero = True number_of_leading_zeros = 1 while (number_of_leading_zeros < len(national_number) - 1 and national_number[number_of_leading_zeros] == U_ZERO): number_of_leading_zeros += 1 if number_of_leading_zeros != 1: numobj.number_of_leading_zeros = number_of_leading_zeros
A helper function to set the values related to leading zeros in a PhoneNumber.
def normalize_version(version): if version is None: return None error = False try: version = int(version) error = version < 1 except (ValueError, TypeError): try: version = consts.MICRO_VERSION_MAPPING[version.upper()] except (KeyError, AttributeError): error = True if error or not 0 < version < 41 and version not in consts.MICRO_VERSIONS: raise VersionError('Unsupported version "{0}". ' 'Supported: {1} and 1 .. 40' .format(version, ', '.join(sorted(consts.MICRO_VERSION_MAPPING.keys())))) return version
\ Canonicalizes the provided `version`. If the `version` is ``None``, this function returns ``None``. Otherwise this function checks if `version` is an integer or a Micro QR Code version. In case the string represents a Micro QR Code version, an uppercased string identifier is returned. If the `version` does not represent a valid version identifier (aside of ``None``, a VersionError is raised. :param version: An integer, a string or ``None``. :raises: VersionError: In case the version is not ``None`` and does not represent a valid (Micro) QR Code version. :rtype: int, str or ``None``
def get_protein_data_pgrouped(proteindata, p_acc, headerfields): report = get_protein_data_base(proteindata, p_acc, headerfields) return get_cov_protnumbers(proteindata, p_acc, report)
Parses protein data for a certain protein into tsv output dictionary
def mutex(): mutex_num = [50, 50, 50, 500, 500, 500, 1000, 1000, 1000] locks = [10000, 25000, 50000, 10000, 25000, 50000, 10000, 25000, 50000] mutex_locks = [] mutex_locks.extend(locks) mutex_loops = [2500, 5000, 10000, 10000, 2500, 5000, 5000, 10000, 2500] test_command = 'sysbench --num-threads=250 --test=mutex ' test_command += '--mutex-num={0} --mutex-locks={1} --mutex-loops={2} run ' result = None ret_val = {} for num, locks, loops in zip(mutex_num, mutex_locks, mutex_loops): key = 'Mutex: {0} Locks: {1} Loops: {2}'.format(num, locks, loops) run_command = test_command.format(num, locks, loops) result = __salt__['cmd.run'](run_command) ret_val[key] = _parser(result) return ret_val
Tests the implementation of mutex CLI Examples: .. code-block:: bash salt '*' sysbench.mutex
def full_name(self): if self.prefix is not None: return '.'.join([self.prefix, self.member]) return self.member
Return full name of member
def _hide_loading_page(self): self.infowidget.hide() self.shellwidget.show() self.info_page = self.blank_page self.set_info_page() self.shellwidget.sig_prompt_ready.disconnect(self._hide_loading_page)
Hide animation shown while the kernel is loading.
def chunks(iterable, size): it = iter(iterable) item = list(islice(it, size)) while item: yield item item = list(islice(it, size))
Splits a very large list into evenly sized chunks. Returns an iterator of lists that are no more than the size passed in.
def uninit_ui(self): self.lay.removeWidget(self.tool_pb) self.tooltip.deleteLater() self.tool_pb.deleteLater()
Delete the tooltip :returns: None :rtype: None :raises: None
def _get_subject_alternative_names(self, ext): values = [] for san in ext.value: if isinstance(san.value, string): values.append(san.value) elif isinstance(san.value, x509.Name): values.extend( self._name_attribute_to_string(rdn) for rdn in san.value.rdns ) return values
Return a list of Subject Alternative Name values for the given x509 extension object.
def _findbytes(self, bytes_, start, end, bytealigned): assert self._datastore.offset == 0 assert bytealigned is True bytepos = (start + 7) // 8 found = False p = bytepos finalpos = end // 8 increment = max(1024, len(bytes_) * 10) buffersize = increment + len(bytes_) while p < finalpos: buf = bytearray(self._datastore.getbyteslice(p, min(p + buffersize, finalpos))) pos = buf.find(bytes_) if pos != -1: found = True p += pos break p += increment if not found: return () return (p * 8,)
Quicker version of find when everything's whole byte and byte aligned.
def _array_io(self, action, array, frames): if (array.ndim not in (1, 2) or array.ndim == 1 and self.channels != 1 or array.ndim == 2 and array.shape[1] != self.channels): raise ValueError("Invalid shape: {0!r}".format(array.shape)) if not array.flags.c_contiguous: raise ValueError("Data must be C-contiguous") ctype = self._check_dtype(array.dtype.name) assert array.dtype.itemsize == _ffi.sizeof(ctype) cdata = _ffi.cast(ctype + '*', array.__array_interface__['data'][0]) return self._cdata_io(action, cdata, ctype, frames)
Check array and call low-level IO function.
def give_satellite_json(self): daemon_properties = ['type', 'name', 'uri', 'spare', 'configuration_sent', 'realm_name', 'manage_sub_realms', 'active', 'reachable', 'alive', 'passive', 'last_check', 'polling_interval', 'max_check_attempts'] (livestate, livestate_output) = self.get_livestate() res = { "livestate": livestate, "livestate_output": livestate_output } for sat_prop in daemon_properties: res[sat_prop] = getattr(self, sat_prop, 'not_yet_defined') return res
Get the json information for a satellite. This to provide information that will be exposed by a daemon on its HTTP interface. :return: dictionary of information common to all the links :rtype: dict
def _set_expressions(self, expressions): self.expressions = {} for key, item in expressions.items(): self.expressions[key] = {'function': item}
Extract expressions and variables from the user provided expressions.
def query(cls, database, map_fun, reduce_fun, language='javascript', **options): return database.query(map_fun, reduce_fun=reduce_fun, language=language, wrapper=cls._wrap_row, **options)
Execute a CouchDB temporary view and map the result values back to objects of this mapping. Note that by default, any properties of the document that are not included in the values of the view will be treated as if they were missing from the document. If you want to load the full document for every row, set the ``include_docs`` option to ``True``.
def container_config_delete(name, config_key, remote_addr=None, cert=None, key=None, verify_cert=True): container = container_get( name, remote_addr, cert, key, verify_cert, _raw=True ) return _delete_property_dict_item( container, 'config', config_key )
Delete a container config value name : Name of the container config_key : The config key to delete remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates.
def items(self): if type(self.transaction['items']['item']) == list: return self.transaction['items']['item'] else: return [self.transaction['items']['item'],]
Lista dos items do pagamento
def entity_categories(self, entity_id): attributes = self.entity_attributes(entity_id) return attributes.get(ENTITY_CATEGORY, [])
Get a list of entity categories for an entity id. :param entity_id: Entity id :return: Entity categories :type entity_id: string :rtype: [string]
def log_to_api(self): if self.entries: try: headers = {'Content-Type': 'application/json'} self.session.post('/v2/logs/app', headers=headers, json=self.entries) except Exception: pass
Best effort API logger. Send logs to the ThreatConnect API and do nothing if the attempt fails.
def outfile_maker(inname, outext='.out', outname='', outdir='', append_to_name=''): orig_dir, orig_name, orig_ext = split_folder_and_path(inname) if not outname: outname = orig_name if not outdir: outdir = orig_dir if append_to_name: outname += append_to_name final_outfile = op.join(outdir, '{}{}'.format(outname, outext)) return final_outfile
Create a default name for an output file based on the inname name, unless a output name is specified. Args: inname: Path to input file outext: Optional specified extension for output file (with the "."). Default is ".out". outfile: Optional specified name of output file. outdir: Optional path to output directory. Returns: str: Path to final output destination. Examples: >>> outfile_maker(inname='P00001.fasta') 'P00001.out' >>> outfile_maker(inname='P00001') 'P00001.out' >>> outfile_maker(inname='P00001.fasta', append_to_name='_new') 'P00001_new.out' >>> outfile_maker(inname='P00001.fasta', outext='.mao') 'P00001.mao' >>> outfile_maker(inname='P00001.fasta', outext='.mao', append_to_name='_new') 'P00001_new.mao' >>> outfile_maker(inname='P00001.fasta', outext='.new', outname='P00001_aligned') 'P00001_aligned.new' >>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned') 'P00001_aligned.out' >>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', append_to_name='_new') 'P00001_aligned_new.out' >>> outfile_maker(inname='P00001.fasta', outname='P00001_aligned', outdir='/my/dir/') '/my/dir/P00001_aligned.out' >>> outfile_maker(inname='/test/other/dir/P00001.fasta', append_to_name='_new') '/test/other/dir/P00001_new.out' >>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned') '/test/other/dir/P00001_aligned.out' >>> outfile_maker(inname='/test/other/dir/P00001.fasta', outname='P00001_aligned', outdir='/my/dir/') '/my/dir/P00001_aligned.out'
def generate_sample_sls_module(env_root, module_dir=None): if module_dir is None: module_dir = os.path.join(env_root, 'sampleapp.sls') generate_sample_module(module_dir) for i in ['config-dev-us-east-1.json', 'handler.py', 'package.json', 'serverless.yml']: shutil.copyfile( os.path.join(ROOT, 'templates', 'serverless', i), os.path.join(module_dir, i), ) LOGGER.info("Sample Serverless module created at %s", module_dir)
Generate skeleton Serverless sample module.
def format_expose(expose): if isinstance(expose, six.string_types): return expose, elif isinstance(expose, collections.Iterable): return map(six.text_type, expose) return six.text_type(expose),
Converts a port number or multiple port numbers, as used in the Dockerfile ``EXPOSE`` command, to a tuple. :param: Port numbers, can be as integer, string, or a list/tuple of those. :type expose: int | unicode | str | list | tuple :return: A tuple, to be separated by spaces before inserting in a Dockerfile. :rtype: tuple
def decisionviz(model, X, y, colors=None, classes=None, features=None, show_scatter=True, step_size=0.0025, markers=None, pcolormesh_alpha=0.8, scatter_alpha=1.0, title=None, **kwargs): visualizer = DecisionBoundariesVisualizer(model, X, y, colors=colors, classes=classes, features=features, show_scatter=show_scatter, step_size=step_size, markers=markers, pcolormesh_alpha=pcolormesh_alpha, scatter_alpha=scatter_alpha, title=title, **kwargs) visualizer.fit_draw_poof(X, y, **kwargs) return visualizer.ax
DecisionBoundariesVisualizer is a bivariate data visualization algorithm that plots the decision boundaries of each class. This helper function is a quick wrapper to utilize the DecisionBoundariesVisualizers for one-off analysis. Parameters ---------- model : the Scikit-Learn estimator, required Should be an instance of a classifier, else the __init__ will return an error. x : matrix, required The feature name that corresponds to a column name or index postion in the matrix that will be plotted against the x-axis y : array, required The feature name that corresponds to a column name or index postion in the matrix that will be plotted against the y-axis classes : a list of class names for the legend, default: None If classes is None and a y value is passed to fit then the classes are selected from the target vector. features : list of strings, default: None The names of the features or columns show_scatter : boolean, default: True If boolean is True, then a scatter plot with points will be drawn on top of the decision boundary graph step_size : float percentage, default: 0.0025 Determines the step size for creating the numpy meshgrid that will later become the foundation of the decision boundary graph. The default value of 0.0025 means that the step size for constructing the meshgrid will be 0.25%% of differenes of the max and min of x and y for each feature. markers : iterable of strings, default: ,od*vh+ Matplotlib style markers for points on the scatter plot points pcolormesh_alpha : float, default: 0.8 Sets the alpha transparency for the meshgrid of model boundaries scatter_alpha : float, default: 1.0 Sets the alpha transparency for the scatter plot points title : string, default: stringified feature_one and feature_two Sets the title of the visualization kwargs : keyword arguments passed to the super class. Returns ------- ax : matplotlib axes Returns the axes that the decision boundaries graph were drawn on.
def _parse_patterns(self, pattern): self.pattern = [] self.npatterns = None npattern = [] for p in pattern: if _wcparse.is_negative(p, self.flags): npattern.append(p[1:]) else: self.pattern.extend( [_wcparse.WcPathSplit(x, self.flags).split() for x in _wcparse.expand_braces(p, self.flags)] ) if npattern: self.npatterns = _wcparse.compile(npattern, self.flags ^ (_wcparse.NEGATE | _wcparse.REALPATH)) if not self.pattern and self.npatterns is not None: self.pattern.append(_wcparse.WcPathSplit((b'**' if self.is_bytes else '**'), self.flags).split())
Parse patterns.
def to_inches(value, units): lookup = {'in': lambda x: x, 'cm': lambda x: x/2.54, 'mm': lambda x: x/(2.54*10)} try: return lookup[units](value) except KeyError: raise PlotnineError("Unknown units '{}'".format(units))
Convert value to inches Parameters ---------- value : float Value to be converted units : str Units of value. Must be one of `['in', 'cm', 'mm']`.
def fetch_entities(self): query = text( ) response = self.perform_query(query) entities = {} domains = set() for [entity] in response: domain = entity.split(".")[0] domains.add(domain) entities.setdefault(domain, []).append(entity) self._domains = list(domains) self._entities = entities print("There are {} entities with data".format(len(entities)))
Fetch entities for which we have data.
def createDocument_(self, initDict = None) : "create and returns a completely empty document or one populated with initDict" if initDict is None : initV = {} else : initV = initDict return self.documentClass(self, initV)
create and returns a completely empty document or one populated with initDict
def recursive_iterator(func): tee_store = {} @_coconut.functools.wraps(func) def recursive_iterator_func(*args, **kwargs): hashable_args_kwargs = _coconut.pickle.dumps((args, kwargs), _coconut.pickle.HIGHEST_PROTOCOL) try: to_tee = tee_store[hashable_args_kwargs] except _coconut.KeyError: to_tee = func(*args, **kwargs) tee_store[hashable_args_kwargs], to_return = _coconut_tee(to_tee) return to_return return recursive_iterator_func
Decorates a function by optimizing it for iterator recursion. Requires function arguments to be pickleable.
def diff_medians(array_one, array_two): array_one = check_array(array_one) array_two = check_array(array_two) diff_medians = np.ma.median(array_one) - np.ma.median(array_two) return diff_medians
Computes the difference in medians between two arrays of values. Given arrays will be flattened (to 1D array) regardless of dimension, and any non-finite/NaN values will be ignored. Parameters ---------- array_one, array_two : iterable Two arrays of values, possibly of different length. Returns ------- diff_medians : float scalar measuring the difference in medians, ignoring NaNs/non-finite values. Raises ------ ValueError If one or more of the arrays are empty.
def unperturbed_hamiltonian(states): r Ne = len(states) H0 = np.zeros((Ne, Ne), complex) for i in range(Ne): H0[i, i] = hbar*states[i].omega return H0
r"""Return the unperturbed atomic hamiltonian for given states. We calcualte the atomic hamiltonian in the basis of the ground states of \ rubidium 87 (in GHz). >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> magnetic_states = make_list_of_states([g], "magnetic") >>> print(np.diag(unperturbed_hamiltonian(magnetic_states))/hbar/2/pi*1e-9) [-4.2717+0.j -4.2717+0.j -4.2717+0.j 2.563 +0.j 2.563 +0.j 2.563 +0.j 2.563 +0.j 2.563 +0.j]
def _run_select(self): return self._connection.select( self.to_sql(), self.get_bindings(), not self._use_write_connection )
Run the query as a "select" statement against the connection. :return: The result :rtype: list
def check_status_logfile(self, checker_func): self.status = checker_func(self.logfile) return self.status
Check on the status of this particular job using the logfile
def _del_flow_entry(self, datapath, in_port, dst, src=None): del_flow = self._del_flow_func.get(datapath.ofproto.OFP_VERSION) assert del_flow del_flow(datapath, in_port, dst, src)
remove a flow entry.
def interpolate_string(self, testString, section): reObj = re.search(r"\$\{.*?\}", testString) while reObj: repString = (reObj).group(0)[2:-1] splitString = repString.split('|') if len(splitString) == 1: try: testString = testString.replace('${'+repString+'}',\ self.get(section,splitString[0])) except ConfigParser.NoOptionError: print("Substitution failed") raise if len(splitString) == 2: try: testString = testString.replace('${'+repString+'}',\ self.get(splitString[0],splitString[1])) except ConfigParser.NoOptionError: print("Substitution failed") raise reObj = re.search(r"\$\{.*?\}", testString) return testString
Take a string and replace all example of ExtendedInterpolation formatting within the string with the exact value. For values like ${example} this is replaced with the value that corresponds to the option called example ***in the same section*** For values like ${common|example} this is replaced with the value that corresponds to the option example in the section [common]. Note that in the python3 config parser this is ${common:example} but python2.7 interprets the : the same as a = and this breaks things Nested interpolation is not supported here. Parameters ---------- testString : String The string to parse and interpolate section : String The current section of the ConfigParser object Returns ---------- testString : String Interpolated string
def validate_uuid(value): if value and not isinstance(value, UUID): try: return UUID(str(value), version=4) except (AttributeError, ValueError): raise ValidationError('not a valid UUID') return value
UUID 128-bit validator
def publish_pdb_state(self): if self._pdb_obj and self._do_publish_pdb_state: state = dict(namespace_view = self.get_namespace_view(), var_properties = self.get_var_properties(), step = self._pdb_step) self.send_spyder_msg('pdb_state', content={'pdb_state': state}) self._do_publish_pdb_state = True
Publish Variable Explorer state and Pdb step through send_spyder_msg.
def _set_repo(self, url): if url.startswith('http'): try: self.repo = Proxy(url) except ProxyError, e: log.exception('Error setting repo: %s' % url) raise GritError(e) else: try: self.repo = Local(url) except NotGitRepository: raise GritError('Invalid url: %s' % url) except Exception, e: log.exception('Error setting repo: %s' % url) raise GritError(e)
sets the underlying repo object
def mouse_release_event(self, x, y, button): if button == 1: print("Left mouse button released @", x, y) if button == 2: print("Right mouse button released @", x, y)
Reports left and right mouse button releases + position
def create_transcript_file(video_id, language_code, file_format, resource_fs, static_dir): transcript_filename = '{video_id}-{language_code}.srt'.format( video_id=video_id, language_code=language_code ) transcript_data = get_video_transcript_data(video_id, language_code) if transcript_data: transcript_content = Transcript.convert( transcript_data['content'], input_format=file_format, output_format=Transcript.SRT ) create_file_in_fs(transcript_content, transcript_filename, resource_fs, static_dir) return transcript_filename
Writes transcript file to file system. Arguments: video_id (str): Video id of the video transcript file is attached. language_code (str): Language code of the transcript. file_format (str): File format of the transcript file. static_dir (str): The Directory to store transcript file. resource_fs (SubFS): The file system to store transcripts.
async def get(self, source_, *args, **kwargs): await self.connect() if isinstance(source_, peewee.Query): query = source_ model = query.model else: query = source_.select() model = source_ conditions = list(args) + [(getattr(model, k) == v) for k, v in kwargs.items()] if conditions: query = query.where(*conditions) try: result = await self.execute(query) return list(result)[0] except IndexError: raise model.DoesNotExist
Get the model instance. :param source_: model or base query for lookup Example:: async def my_async_func(): obj1 = await objects.get(MyModel, id=1) obj2 = await objects.get(MyModel, MyModel.id==1) obj3 = await objects.get(MyModel.select().where(MyModel.id==1)) All will return `MyModel` instance with `id = 1`
def to_dataframe(self, extra_edges_columns=[]): return df_util.to_dataframe( self.session.get(self.__url).json(), edges_attr_cols=extra_edges_columns )
Return this network in pandas DataFrame. :return: Network as DataFrame. This is equivalent to SIF.
def _clauses(lexer, varname, nvars): tok = next(lexer) toktype = type(tok) if toktype is OP_not or toktype is IntegerToken: lexer.unpop_token(tok) first = _clause(lexer, varname, nvars) rest = _clauses(lexer, varname, nvars) return (first, ) + rest else: lexer.unpop_token(tok) return tuple()
Return a tuple of DIMACS CNF clauses.
def get_nts(self, goids=None, sortby=None): nts = [] if goids is None: goids = self.go_sources else: chk_goids(goids, "GoSubDag::get_nts") if goids: ntobj = cx.namedtuple("NtGo", " ".join(self.prt_attr['flds'])) go2nt = self.get_go2nt(goids) for goid, ntgo in self._get_sorted_go2nt(go2nt, sortby): assert ntgo is not None, "{GO} NOT IN go2nt".format(GO=goid) if goid == ntgo.GO: nts.append(ntgo) else: fld2vals = ntgo._asdict() fld2vals['GO'] = goid nts.append(ntobj(**fld2vals)) return nts
Given GO IDs, get a list of namedtuples.
def prevPlot(self): if self.stacker.currentIndex() > 0: self.stacker.setCurrentIndex(self.stacker.currentIndex()-1)
Moves the displayed plot to the previous one
def to_timedelta(value, strict=True): if isinstance(value, int): return timedelta(seconds=value) elif isinstance(value, timedelta): return value elif isinstance(value, str): hours, minutes, seconds = _parse(value, strict) elif isinstance(value, tuple): check_tuple(value, strict) hours, minutes, seconds = value else: raise TypeError( 'Value %s (type %s) not supported' % ( value, type(value).__name__ ) ) return timedelta(hours=hours, minutes=minutes, seconds=seconds)
converts duration string to timedelta strict=True (by default) raises StrictnessError if either hours, minutes or seconds in duration string exceed allowed values
def calibration_stimulus(self, mode): if mode == 'tone': return self.tone_calibrator.stimulus elif mode =='noise': return self.bs_calibrator.stimulus
Gets the stimulus model for calibration :param mode: Type of stimulus to get: tone or noise :type mode: str :returns: :class:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel>`
def get_file_hash(fpath, blocksize=65536, hasher=None, stride=1, hexdigest=False): r if hasher is None: hasher = hashlib.sha1() with open(fpath, 'rb') as file_: buf = file_.read(blocksize) while len(buf) > 0: hasher.update(buf) if stride > 1: file_.seek(blocksize * (stride - 1), 1) buf = file_.read(blocksize) if hexdigest: return hasher.hexdigest() else: return hasher.digest()
r""" For better hashes use hasher=hashlib.sha256, and keep stride=1 Args: fpath (str): file path string blocksize (int): 2 ** 16. Affects speed of reading file hasher (None): defaults to sha1 for fast (but insecure) hashing stride (int): strides > 1 skip data to hash, useful for faster hashing, but less accurate, also makes hash dependant on blocksize. References: http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file http://stackoverflow.com/questions/5001893/when-should-i-use-sha-1-and-when-should-i-use-sha-2 CommandLine: python -m utool.util_hash --test-get_file_hash python -m utool.util_hash --test-get_file_hash:0 python -m utool.util_hash --test-get_file_hash:1 Example: >>> # DISABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> fpath = ut.grab_test_imgpath('patsy.jpg') >>> #blocksize = 65536 # 2 ** 16 >>> blocksize = 2 ** 16 >>> hasher = None >>> stride = 1 >>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride) >>> result = repr(hashbytes_20) >>> print(result) '7\x07B\x0eX<sRu\xa2\x90P\xda\xb2\x84?\x81?\xa9\xd9' '\x13\x9b\xf6\x0f\xa3QQ \xd7"$\xe9m\x05\x9e\x81\xf6\xf2v\xe4' '\x16\x00\x80Xx\x8c-H\xcdP\xf6\x02\x9frl\xbf\x99VQ\xb5' Example: >>> # DISABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> #fpath = ut.grab_file_url('http://en.wikipedia.org/wiki/List_of_comets_by_type') >>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'tmp.txt') >>> ut.write_to(fpath, ut.lorium_ipsum()) >>> blocksize = 2 ** 3 >>> hasher = None >>> stride = 2 >>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride) >>> result = repr(hashbytes_20) >>> print(result) '5KP\xcf>R\xf6\xffO:L\xac\x9c\xd3V+\x0e\xf6\xe1n' Ignore: file_ = open(fpath, 'rb')
def _http_get(self, url, query): if not self.authorization_as_header: query.update({'access_token': self.access_token}) response = None self._normalize_query(query) kwargs = { 'params': query, 'headers': self._request_headers() } if self._has_proxy(): kwargs['proxies'] = self._proxy_parameters() response = requests.get( self._url(url), **kwargs ) if response.status_code == 429: raise RateLimitExceededError(response) return response
Performs the HTTP GET Request.
def eval_nonagg_call(self, exp): "helper for eval_callx; evaluator for CallX that consume a single value" args=self.eval(exp.args) if exp.f=='coalesce': a,b=args return b if a is None else a elif exp.f=='unnest': return self.eval(exp.args)[0] elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split()) else: raise NotImplementedError('unk_function',exp.f)
helper for eval_callx; evaluator for CallX that consume a single value
def search(self, **kwargs): return super(ApiObjectGroupPermissionGeneral, self).get(self.prepare_url('api/v3/object-group-perm-general/', kwargs))
Method to search object group permissions general based on extends search. :param search: Dict containing QuerySets to find object group permissions general. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing object group permissions general
def parse(self, data, extent, desc_tag): if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Unallocated Space Descriptor already initialized') (tag_unused, self.vol_desc_seqnum, num_alloc_descriptors, end_unused) = struct.unpack_from(self.FMT, data, 0) self.desc_tag = desc_tag if num_alloc_descriptors != 0: raise pycdlibexception.PyCdlibInvalidISO('UDF Unallocated Space Descriptor allocated descriptors is not 0') self.orig_extent_loc = extent self._initialized = True
Parse the passed in data into a UDF Unallocated Space Descriptor. Parameters: data - The data to parse. extent - The extent that this descriptor currently lives at. desc_tag - A UDFTag object that represents the Descriptor Tag. Returns: Nothing.
def namedTempFileReader(self) -> NamedTempFileReader: directory = self._directory() assert isinstance(directory, Directory), ( "Expected Directory, receieved %s" % directory) return NamedTempFileReader(directory, self)
Named Temporary File Reader This provides an object compatible with NamedTemporaryFile, used for reading this files contents. This will still delete after the object falls out of scope. This solves the problem on windows where a NamedTemporaryFile can not be read while it's being written to
def ismethoddescriptor(object): return (hasattr(object, "__get__") and not hasattr(object, "__set__") and not ismethod(object) and not isfunction(object) and not isclass(object))
Return true if the object is a method descriptor. But not if ismethod() or isclass() or isfunction() are true. This is new in Python 2.2, and, for example, is true of int.__add__. An object passing this test has a __get__ attribute but not a __set__ attribute, but beyond that the set of attributes varies. __name__ is usually sensible, and __doc__ often is. Methods implemented via descriptors that also pass one of the other tests return false from the ismethoddescriptor() test, simply because the other tests promise more -- you can, e.g., count on having the im_func attribute (etc) when an object passes ismethod().
def provider(func=None, *, singleton=False, injector=None): def decorator(func): wrapped = _wrap_provider_func(func, {'singleton': singleton}) if injector: injector.register_provider(wrapped) return wrapped if func: return decorator(func) return decorator
Decorator to mark a function as a provider. Args: singleton (bool): The returned value should be a singleton or shared instance. If False (the default) the provider function will be invoked again for every time it's needed for injection. injector (Injector): If provided, the function is immediately registered as a provider with the injector instance. Example: @diay.provider(singleton=True) def myfunc() -> MyClass: return MyClass(args)
def construct_chunk(cls, chunk_type, payload, encoding='utf-8'): if isinstance(payload, str): payload = payload.encode(encoding) elif not isinstance(payload, bytes): raise TypeError('cannot encode type: {}'.format(type(payload))) header = struct.pack(cls.HEADER_FMT, len(payload), chunk_type) return header + payload
Construct and return a single chunk.
def resend_transaction_frames(self, connection, transaction): for frame in self._transaction_frames[connection][transaction]: self.send(frame)
Resend the messages that were ACK'd in specified transaction. This is called by the engine when there is an abort command. @param connection: The client connection that aborted the transaction. @type connection: L{coilmq.server.StompConnection} @param transaction: The transaction id (which was aborted). @type transaction: C{str}
def remove(self): for cgroup in self.paths: remove_cgroup(cgroup) del self.paths del self.per_subsystem
Remove all cgroups this instance represents from the system. This instance is afterwards not usable anymore!
def rename(self, new_lid): logger.info("rename(new_lid=\"%s\") [lid=%s]", new_lid, self.__lid) evt = self._client._request_entity_rename(self.__lid, new_lid) self._client._wait_and_except_if_failed(evt) self.__lid = new_lid self._client._notify_thing_lid_change(self.__lid, new_lid)
Rename the Thing. `ADVANCED USERS ONLY` This can be confusing. You are changing the local id of a Thing to `new_lid`. If you create another Thing using the "old_lid", the system will oblige, but it will be a completely _new_ Thing. Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `new_lid` (required) (string) the new local identifier of your Thing
def _FormatIPToken(self, token_data): data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data]) return {'IPv4_Header': data}
Formats an IPv4 packet header token as a dictionary of values. Args: token_data (bsm_token_data_ip): AUT_IP token data. Returns: dict[str, str]: token values.
def probe_git(): try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.warning( "We highly recommend keeping your model in a git repository." " It allows you to track changes and to easily collaborate with" " others via online platforms such as https://github.com.\n") return if repo.is_dirty(): LOGGER.critical( "Please git commit or git stash all changes before running" " the memote suite.") sys.exit(1) return repo
Return a git repository instance if it exists.
def metrics(self): masterThrp, backupThrp = self.getThroughputs(self.instances.masterId) r = self.instance_throughput_ratio(self.instances.masterId) m = [ ("{} Monitor metrics:".format(self), None), ("Delta", self.Delta), ("Lambda", self.Lambda), ("Omega", self.Omega), ("instances started", self.instances.started), ("ordered request counts", {i: r[0] for i, r in self.numOrderedRequests.items()}), ("ordered request durations", {i: r[1] for i, r in self.numOrderedRequests.items()}), ("master request latencies", self.masterReqLatencies), ("client avg request latencies", {i: self.getLatency(i) for i in self.instances.ids}), ("throughput", {i: self.getThroughput(i) for i in self.instances.ids}), ("master throughput", masterThrp), ("total requests", self.totalRequests), ("avg backup throughput", backupThrp), ("master throughput ratio", r)] return m
Calculate and return the metrics.
def get_home(self): if 'HOME_POSITION' in self.master.messages: h = self.master.messages['HOME_POSITION'] return mavutil.mavlink.MAVLink_mission_item_message(self.target_system, self.target_component, 0, 0, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, h.latitude*1.0e-7, h.longitude*1.0e-7, h.altitude*1.0e-3) if self.wploader.count() > 0: return self.wploader.wp(0) return None
get home location
def filter_geometry(queryset, **filters): fieldname = geo_field(queryset).name query = {'%s__%s' % (fieldname, k): v for k, v in filters.items()} return queryset.filter(**query)
Helper function for spatial lookups filters. Provide spatial lookup types as keywords without underscores instead of the usual "geometryfield__lookuptype" format.
def logout(self): data = self._read_uaa_cache() if self.uri in data: for client in data[self.uri]: if client['id'] == self.client['id']: data[self.uri].remove(client) with open(self._cache_path, 'w') as output: output.write(json.dumps(data, sort_keys=True, indent=4))
Log currently authenticated user out, invalidating any existing tokens.
def default(self): if isinstance(self.__default, (str, unicode)): return self.valueFromString(self.__default) else: return self.__default
Returns the default value for this column to return when generating new instances. :return <variant>
def post_worker_init(worker): quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C' sys.stdout.write( "Django version {djangover}, Gunicorn version {gunicornver}, " "using settings {settings!r}\n" "Starting development server at {urls}\n" "Quit the server with {quit_command}.\n".format( djangover=django.get_version(), gunicornver=gunicorn.__version__, settings=os.environ.get('DJANGO_SETTINGS_MODULE'), urls=', '.join('http://{0}/'.format(b) for b in worker.cfg.bind), quit_command=quit_command, ), )
Hook into Gunicorn to display message after launching. This mimics the behaviour of Django's stock runserver command.
def dag(self) -> Tuple[Dict, Dict]: from pipelines import dags operations = self.operations.all().prefetch_related('downstream_operations') def get_downstream(op): return op.downstream_operations.values_list('id', flat=True) return dags.get_dag(operations, get_downstream)
Construct the DAG of this pipeline based on the its operations and their downstream.
def get_supervisor(func: types.AnyFunction) -> types.Supervisor: if not callable(func): raise TypeError("func is not callable") if asyncio.iscoroutinefunction(func): supervisor = _async_supervisor else: supervisor = _sync_supervisor return functools.partial(supervisor, func)
Get the appropriate supervisor to use and pre-apply the function. Args: func: A function.