code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def calc_L(A): I = np.eye(A.shape[0]) if type(A) is pd.DataFrame: return pd.DataFrame(np.linalg.inv(I-A), index=A.index, columns=A.columns) else: return np.linalg.inv(I-A)
Calculate the Leontief L from A Parameters ---------- A : pandas.DataFrame or numpy.array Symmetric input output table (coefficients) Returns ------- pandas.DataFrame or numpy.array Leontief input output table L The type is determined by the type of A. If DataFrame index/columns as A
def by_owner(cls, session, owner_name): return cls.find(session, join=(cls.owners), where=(User.login == owner_name,), order_by=cls.name)
Get packages from a given owner username. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param owner_name: owner username :type owner_name: unicode :return: package instances :rtype: generator of :class:`pyshop.models.Package`
def _write(self, data): stream = yaml.dump(data, default_flow_style=False) self.path.write_text(stream)
Write data to config file.
def import_transformer(name): if name in transformers: return transformers[name] hook = sys.meta_path[0] sys.meta_path = sys.meta_path[1:] try: transformers[name] = __import__(name) if CONSOLE_ACTIVE: if hasattr(transformers[name], "NO_CONSOLE"): print(transformers[name].NO_CONSOLE) transformers[name] = NullTransformer() except ImportError: sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name) transformers[name] = NullTransformer() except Exception as e: sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " % e.__class__.__name__) finally: sys.meta_path.insert(0, hook) return transformers[name]
If needed, import a transformer, and adds it to the globally known dict The code inside a module where a transformer is defined should be standard Python code, which does not need any transformation. So, we disable the import hook, and let the normal module import do its job - which is faster and likely more reliable than our custom method.
def destroy( self, request, pk=None, parent_lookup_seedteam=None, parent_lookup_seedteam__organization=None): self.check_team_permissions( request, parent_lookup_seedteam, parent_lookup_seedteam__organization) return super(TeamPermissionViewSet, self).destroy( request, pk, parent_lookup_seedteam, parent_lookup_seedteam__organization)
Remove a permission from a team.
def reload(self, **params): if not self.bucket: raise ValueError('bucket property not assigned') if not self.key: raise ValueError('key property not assigned') dtype, value, context = self.bucket._client._fetch_datatype( self.bucket, self.key, **params) if not dtype == self.type_name: raise TypeError("Expected datatype {} but " "got datatype {}".format(self.__class__, TYPES[dtype])) self.clear() self._context = context self._set_value(value) return self
Reloads the datatype from Riak. .. warning: This clears any local modifications you might have made. :param r: the read quorum :type r: integer, string, None :param pr: the primary read quorum :type pr: integer, string, None :param basic_quorum: whether to use the "basic quorum" policy for not-founds :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool :param timeout: a timeout value in milliseconds :type timeout: int :param include_context: whether to return the opaque context as well as the value, which is useful for removal operations on sets and maps :type include_context: bool :rtype: :class:`Datatype`
def applications(self): if self._applications is None: self._applications = ApplicationList(self._version, account_sid=self._solution['sid'], ) return self._applications
Access the applications :returns: twilio.rest.api.v2010.account.application.ApplicationList :rtype: twilio.rest.api.v2010.account.application.ApplicationList
def copy(self): c = matrix() c.tt = self.tt.copy() c.n = self.n.copy() c.m = self.m.copy() return c
Creates a copy of the TT-matrix
def complete_upload(self): xml = self.to_xml() return self.bucket.complete_multipart_upload(self.key_name, self.id, xml)
Complete the MultiPart Upload operation. This method should be called when all parts of the file have been successfully uploaded to S3. :rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload` :returns: An object representing the completed upload.
def parse_region(self, include, region_type, region_end, line): if self.coordsys is None: raise DS9RegionParserError("No coordinate system specified and a" " region has been found.") else: helper = DS9RegionParser(coordsys=self.coordsys, include=include, region_type=region_type, region_end=region_end, global_meta=self.global_meta, line=line) helper.parse() self.shapes.append(helper.shape)
Extract a Shape from a region string
def selfcheck(tools): msg = [] for tool_name, check_cli in collections.OrderedDict(tools).items(): try: subprocess.check_output(check_cli, shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError: msg.append('%r not found or not usable.' % tool_name) return '\n'.join(msg) if msg else 'Your system is ready.'
Audit the system for issues. :param tools: Tools description. Use elevation.TOOLS to test elevation.
def _short_ts_regexp(): ts_re = ['^'] for k in it.chain(_short_ts_days, _short_ts_s): ts_re.append(r'(?P<{0}>\d+{0}\s*)?'.format(k)) return re.compile(''.join(ts_re), re.I | re.U)
Generates regexp for parsing of shortened relative timestamps, as shown in the table.
def add_connection_throttle(self, loadbalancer, maxConnectionRate=None, maxConnections=None, minConnections=None, rateInterval=None): settings = {} if maxConnectionRate: settings["maxConnectionRate"] = maxConnectionRate if maxConnections: settings["maxConnections"] = maxConnections if minConnections: settings["minConnections"] = minConnections if rateInterval: settings["rateInterval"] = rateInterval req_body = {"connectionThrottle": settings} uri = "/loadbalancers/%s/connectionthrottle" % utils.get_id(loadbalancer) resp, body = self.api.method_put(uri, body=req_body) return body
Creates or updates the connection throttling information for the load balancer. When first creating the connection throttle, all 4 parameters must be supplied. When updating an existing connection throttle, at least one of the parameters must be supplied.
def install_unicast_keys(self, client_nonce): pmk = self.pmk anonce = self.anonce snonce = client_nonce amac = mac2str(self.mac) smac = mac2str(self.client) self.ptk = customPRF512(pmk, amac, smac, anonce, snonce) self.kck = self.ptk[:16] self.kek = self.ptk[16:32] self.tk = self.ptk[32:48] self.mic_ap_to_sta = self.ptk[48:56] self.mic_sta_to_ap = self.ptk[56:64] self.client_iv = count()
Use the client nonce @client_nonce to compute and install PTK, KCK, KEK, TK, MIC (AP -> STA), MIC (STA -> AP)
def remove(self, attendees): if isinstance(attendees, (list, tuple)): attendees = { attendee.address if isinstance(attendee, Attendee) else attendee for attendee in attendees} elif isinstance(attendees, str): attendees = {attendees} elif isinstance(attendees, Attendee): attendees = {attendees.address} else: raise ValueError('Incorrect parameter type for attendees') new_attendees = [] for attendee in self.__attendees: if attendee.address not in attendees: new_attendees.append(attendee) self.__attendees = new_attendees self._track_changes()
Remove the provided attendees from the event :param attendees: list of attendees to add :type attendees: str or tuple(str, str) or Attendee or list[str] or list[tuple(str,str)] or list[Attendee]
def header_proc(self, inputstring, header="file", initial="initial", use_hash=None, **kwargs): pre_header = self.getheader(initial, use_hash=use_hash, polish=False) main_header = self.getheader(header, polish=False) if self.minify: main_header = minify(main_header) return pre_header + self.docstring + main_header + inputstring
Add the header.
def reset(self): r self._ctx.managed_open() self._ctx.dispose(self, False) self._ctx.backend.reset_device(self._ctx.handle) self._ctx.dispose(self, True)
r"""Reset the device.
def browse_podcasts(self, podcast_genre_id='JZCpodcasttopchartall'): response = self._call( mc_calls.PodcastBrowse, podcast_genre_id=podcast_genre_id ) podcast_series_list = response.body.get('series', []) return podcast_series_list
Get the podcasts for a genre from the Podcasts browse tab. Parameters: podcast_genre_id (str, Optional): A podcast genre ID as found in :meth:`browse_podcasts_genres`. Default: ``'JZCpodcasttopchartall'``. Returns: list: Podcast dicts.
def register(filename): if not os.path.isfile(filename): raise CommandExecutionError( 'The specified filename ({0}) does not exist.'.format(filename) ) cmd = '{0} registervm {1}'.format(vboxcmd(), filename) ret = salt.modules.cmdmod.run_all(cmd) if ret['retcode'] == 0: return True return ret['stderr']
Register a VM CLI Example: .. code-block:: bash salt '*' vboxmanage.register my_vm_filename
def astensor(array: TensorLike) -> BKTensor: tensor = tf.convert_to_tensor(value=array, dtype=CTYPE) return tensor
Covert numpy array to tensorflow tensor
def resolve_rva(self, rva): containing_section = self.get_section_of_rva(rva) in_section_offset = containing_section.PointerToRawData -\ containing_section.VirtualAddress return in_section_offset + rva
RVAs are supposed to be used with the image of the file in memory. There's no direct algorithm to calculate the offset of an RVA in the file. What we do here is to find the section that contains the RVA and then we calculate the offset between the RVA of the section and the offset of the section in the file. With this offset, we can compute the position of the RVA in the file
def reshuffle_batches(self, indices, rng): indices = indices.view(-1, self.global_batch_size) num_batches = indices.shape[0] order = torch.randperm(num_batches, generator=rng) indices = indices[order, :] indices = indices.view(-1) return indices
Permutes global batches :param indices: torch.tensor with batch indices :param rng: instance of torch.Generator
def sollen(tex, command): r return sum(len(a.string) for a in TexSoup(tex).find_all(command))
r"""Measure solution length :param Union[str,buffer] tex: the LaTeX source as a string or file buffer :param str command: the command denoting a solution i.e., if the tex file uses '\answer{<answer here>}', then the command is 'answer'. :return int: the solution length
def _setup_profiles(self, conversion_profiles): for key, path in conversion_profiles.items(): if isinstance(path, str): path = (path, ) for left, right in pair_looper(path): pair = (_format(left), _format(right)) if pair not in self.converters: msg = 'Invalid conversion profile %s, unknown step %s' log.warning(msg % (repr(key), repr(pair))) break else: self.conversion_profiles[key] = path
Add given conversion profiles checking for invalid profiles
def get_signature_request_file(self, signature_request_id, path_or_file=None, file_type=None, filename=None): request = self._get_request() url = self.SIGNATURE_REQUEST_DOWNLOAD_PDF_URL + signature_request_id if file_type: url += '?file_type=%s' % file_type return request.get_file(url, path_or_file or filename)
Download the PDF copy of the current documents Args: signature_request_id (str): Id of the signature request path_or_file (str or file): A writable File-like object or a full path to save the PDF file to. filename (str): [DEPRECATED] Filename to save the PDF file to. This should be a full path. file_type (str): Type of file to return. Either "pdf" for a single merged document or "zip" for a collection of individual documents. Defaults to "pdf" if not specified. Returns: True if file is downloaded and successfully written, False otherwise.
def all_balances(currency, services=None, verbose=False, timeout=None): balances = {} if not services: services = [ x(verbose=verbose, timeout=timeout) for x in ExchangeUniverse.get_authenticated_services() ] for e in services: try: balances[e] = e.get_exchange_balance(currency) except NotImplementedError: if verbose: print(e.name, "balance not implemented") except Exception as exc: if verbose: print(e.name, "failed:", exc.__class__.__name__, str(exc)) return balances
Get balances for passed in currency for all exchanges.
def get_smooth_step_function(min_val, max_val, switch_point, smooth_factor): dif = max_val - min_val def _smooth_step(x): return min_val + dif * tanh((x - switch_point) / smooth_factor) return _smooth_step
Returns a function that moves smoothly between a minimal value and a maximal one when its value increases from a given witch point to infinity. Arguments --------- min_val: float max_val value the function will return when x=switch_point. min_val: float The value the function will converge to when x -> infinity. switch_point: float The point in which the function's value will become min_val. Smaller x values will return values smaller than min_val. smooth_factor: float The bigger the smoother, and less cliff-like, is the function. Returns ------- function The desired smooth function.
def minimum(self): return min([(x, energy) for _, x, energy, _, _ in self.get_kinks()], key=lambda i: i[1])
Finds the minimum reaction energy E_min and corresponding mixing ratio x_min. Returns: Tuple (x_min, E_min).
def _output_function_label(self): if self.asm_code: return True if not self.blocks: return True the_block = next((b for b in self.blocks if b.addr == self.addr), None) if the_block is None: return True if not the_block.instructions: return True if not the_block.instructions[0].labels: return True return False
Determines if we want to output the function label in assembly. We output the function label only when the original instruction does not output the function label. :return: True if we should output the function label, False otherwise. :rtype: bool
def import_lsdinst(self, struct_data): self.name = struct_data['name'] self.automate = struct_data['data']['automate'] self.pan = struct_data['data']['pan'] if self.table is not None: self.table.import_lsdinst(struct_data)
import from an lsdinst struct
def search_track(self, artist, album=None, track=None, full_album_art_uri=False): subcategories = [artist] subcategories.append(album or '') result = self.get_album_artists( full_album_art_uri=full_album_art_uri, subcategories=subcategories, search_term=track, complete_result=True) result._metadata['search_type'] = 'search_track' return result
Search for an artist, an artist's albums, or specific track. Args: artist (str): an artist's name. album (str, optional): an album name. Default `None`. track (str, optional): a track name. Default `None`. full_album_art_uri (bool): whether the album art URI should be absolute (i.e. including the IP address). Default `False`. Returns: A `SearchResult` instance.
def delete_datastore(self): success, result = self._read_from_hdx('datastore', self.data['id'], 'resource_id', self.actions()['datastore_delete'], force=True) if not success: logger.debug(result)
Delete a resource from the HDX datastore Returns: None
def resend_welcome_message(self, user, base_url): user.require_email_confirmation() self.save(user) self.send_welcome_message(user, base_url)
Regenerate email link and resend welcome
def _set_rc(self): base_str = self._get_rc_strings() pattern_base = map(lambda s: s.replace('.', '\.'), base_str) pattern = '(%s)(?=$)' % '|'.join(self._get_formatoptions()) self._rc = rcParams.find_and_replace(base_str, pattern=pattern, pattern_base=pattern_base) user_rc = SubDict(rcParams['plotter.user'], base_str, pattern=pattern, pattern_base=pattern_base) self._rc.update(user_rc.data) self._defaultParams = SubDict(rcParams.defaultParams, base_str, pattern=pattern, pattern_base=pattern_base)
Method to set the rcparams and defaultParams for this plotter
def auto_generate_missing_tabs(self): for config in models_config.get_all_configs(): model_alias = '{}.{}'.format(config.app_label, config.model_name) if model_alias not in self.tabs: @self.register(model_alias) def general_layout(obj): return Layout( Column12( Panel( 'info', DescriptionList(*[f.name for f in obj.get_fields()]) ) ) )
Auto generate tabs for models with no tabs
def dmxData(self, data: tuple): newData = [0]*512 for i in range(0, min(len(data), 512)): newData[i] = data[i] self._dmxData = tuple(newData) self.length = 126 + len(self._dmxData)
For legacy devices and to prevent errors, the length of the DMX data is normalized to 512
def hexdump(stream): if isinstance(stream, six.string_types): stream = BytesIO(stream) row = 0 while True: data = stream.read(16) if not data: break hextets = data.encode('hex').ljust(32) canonical = printable(data) print('%08x %s %s |%s|' % ( row * 16, ' '.join(hextets[x:x + 2] for x in range(0x00, 0x10, 2)), ' '.join(hextets[x:x + 2] for x in range(0x10, 0x20, 2)), canonical, )) row += 1
Display stream contents in hexadecimal and ASCII format. The ``stream`` specified must either be a file-like object that supports the ``read`` method to receive bytes, or it can be a string. To dump a file:: >>> hexdump(file(filename)) # doctest: +SKIP Or to dump stdin:: >>> import sys >>> hexdump(sys.stdin) # doctest: +SKIP :param stream: stream input
def _get_gpu_pci_devices(self): pci_device_list = self._get_pci_devices() gpu_list = [] items = pci_device_list['Items'] for item in items: if item['ClassCode'] in CLASSCODE_FOR_GPU_DEVICES: if item['SubclassCode'] in SUBCLASSCODE_FOR_GPU_DEVICES: gpu_list.append(item) return gpu_list
Returns the list of gpu devices.
def get_filtering_contenthandler(element): from ligo.lw.ligolw import FilteringLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return ~element.CheckProperties(name, attrs) else: def _element_filter(name, _): return name != element.tagName return build_content_handler(FilteringLIGOLWContentHandler, _element_filter)
Build a `FilteringLIGOLWContentHandler` to exclude this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element to exclude (and its children) Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.FilteringLIGOLWContentHandler` to exclude an element and its children
def update(self, **kwargs): return self.__class__(self.resource.update(kwargs), self.client, wallet=self.wallet)
Update the Account resource with specified content. Args: name (str): Human-readable name for the account Returns: the updated Account object.
def search(self): safeEnvDict = { 'freeSearch': self.freeSearch, 'extentSearch': self.extentSearch, 'indexSearch': self.indexSearch } for col in self._dataFrame.columns: safeEnvDict[col] = self._dataFrame[col] try: searchIndex = eval(self._filterString, { '__builtins__': None}, safeEnvDict) except NameError: return [], False except SyntaxError: return [], False except ValueError: return [], False except TypeError: return [], False return searchIndex, True
Applies the filter to the stored dataframe. A safe environment dictionary will be created, which stores all allowed functions and attributes, which may be used for the filter. If any object in the given `filterString` could not be found in the dictionary, the filter does not apply and returns `False`. Returns: tuple: A (indexes, success)-tuple, which indicates identified objects by applying the filter and if the operation was successful in general.
def name(self, address): reversed_domain = address_to_reverse_domain(address) return self.resolve(reversed_domain, get='name')
Look up the name that the address points to, using a reverse lookup. Reverse lookup is opt-in for name owners. :param address: :type address: hex-string
def delivery_note_pdf(self, delivery_note_id): return self._create_get_request(resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=PDF)
Opens a pdf of a delivery note :param delivery_note_id: the delivery note id :return: dict
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc='median', stdfunc='std', axis=None, masked=True, return_bounds=False, copy=True): sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc) return sigclip(data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy)
Perform sigma-clipping on the provided data. The data will be iterated over, each time rejecting values that are less or more than a specified number of standard deviations from a center value. Clipped (rejected) pixels are those where:: data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int])) data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int])) Invalid data values (i.e. NaN or inf) are automatically clipped. For an object-oriented interface to sigma clipping, see :class:`SigmaClip`. .. note:: `scipy.stats.sigmaclip <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_ provides a subset of the functionality in this class. Also, its input data cannot be a masked array and it does not handle data that contains invalid values (i.e. NaN or inf). Also note that it uses the mean as the centering function. If your data is a `~numpy.ndarray` with no invalid values and you want to use the mean as the centering function with ``axis=None`` and iterate to convergence, then `scipy.stats.sigmaclip` is ~25-30% faster than the equivalent settings here (``sigma_clip(data, cenfunc='mean', maxiters=None, axis=None)``). Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` The data to be sigma clipped. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. masked : bool, optional If `True`, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If `False`, then a `~numpy.ndarray` and the minimum and maximum clipping thresholds are returned. The default is `True`. return_bounds : bool, optional If `True`, then the minimum and maximum clipping bounds are also returned. copy : bool, optional If `True`, then the ``data`` array will be copied. If `False` and ``masked=True``, then the returned masked array data will contain the same array as the input ``data`` (if ``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is `True`. Returns ------- result : flexible If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned, where the mask is `True` for clipped values. If ``masked=False``, then a `~numpy.ndarray` is returned. If ``return_bounds=True``, then in addition to the (masked) array above, the minimum and maximum clipping bounds are returned. If ``masked=False`` and ``axis=None``, then the output array is a flattened 1D `~numpy.ndarray` where the clipped values have been removed. If ``return_bounds=True`` then the returned minimum and maximum thresholds are scalars. If ``masked=False`` and ``axis`` is specified, then the output `~numpy.ndarray` will have the same shape as the input ``data`` and contain ``np.nan`` where values were clipped. If ``return_bounds=True`` then the returned minimum and maximum clipping thresholds will be be `~numpy.ndarray`\\s. See Also -------- SigmaClip, sigma_clipped_stats Examples -------- This example uses a data array of random variates from a Gaussian distribution. We clip all points that are more than 2 sample standard deviations from the median. The result is a masked array, where the mask is `True` for clipped data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5) This example clips all points that are more than 3 sigma relative to the sample *mean*, clips until convergence, returns an unmasked `~numpy.ndarray`, and does not copy the data:: >>> from astropy.stats import sigma_clip >>> from numpy.random import randn >>> from numpy import mean >>> randvar = randn(10000) >>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None, ... cenfunc=mean, masked=False, copy=False) This example sigma clips along one axis:: >>> from astropy.stats import sigma_clip >>> from numpy.random import normal >>> from numpy import arange, diag, ones >>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5)) >>> filtered_data = sigma_clip(data, sigma=2.3, axis=0) Note that along the other axis, no points would be clipped, as the standard deviation is higher.
def basename(self, suffix=''): return os.path.basename(self._file, suffix) if self._file else None
The basename of the template file.
def get_my_subscribed_partitions(self, groupid, topic): path = "/consumers/{group_id}/offsets/{topic}".format( group_id=groupid, topic=topic, ) return self.get_children(path)
Get the list of partitions of a topic that a consumer is subscribed to :param: groupid: The consumer group ID for the consumer :param: topic: The topic name :returns list of partitions :rtype: list
def pick_event_handler(self, event): info = {'options': self.get_available_channels(), 'guiEvent': event.mouseevent.guiEvent, } if hasattr(self, 'xlabel_artist') and (event.artist == self.xlabel_artist): info['axis_num'] = 0 self.callback(Event('axis_click', info)) if hasattr(self, 'ylabel_artist') and (event.artist == self.ylabel_artist): info['axis_num'] = 1 self.callback(Event('axis_click', info))
Handles pick events
def rooms(request, template="rooms.html"): context = {"rooms": ChatRoom.objects.all()} return render(request, template, context)
Homepage - lists all rooms.
def get_pycons3rt_home_dir(): if platform.system() == 'Linux': return os.path.join(os.path.sep, 'etc', 'pycons3rt') elif platform.system() == 'Windows': return os.path.join('C:', os.path.sep, 'pycons3rt') elif platform.system() == 'Darwin': return os.path.join(os.path.expanduser('~'), '.pycons3rt') else: raise OSError('Unsupported Operating System')
Returns the pycons3rt home directory based on OS :return: (str) Full path to pycons3rt home :raises: OSError
def _unpickle_collection(self, collection): for mkey in collection: if isinstance(collection[mkey], list): for item in collection[mkey]: item.unpickle(self) else: collection[mkey].unpickle(self)
Unpickles all members of the specified dictionary.
def _to_ned(self): if self.ref_frame is 'USE': return utils.use_to_ned(self.tensor), \ utils.use_to_ned(self.tensor_sigma) elif self.ref_frame is 'NED': return self.tensor, self.tensor_sigma else: raise ValueError('Reference frame %s not recognised - cannot ' 'transform to NED!' % self.ref_frame)
Switches the reference frame to NED
def get_available_references(self, datas): names = [] for k, v in datas.items(): if k.startswith(RULE_REFERENCE): names.append(k[len(RULE_REFERENCE)+1:]) return names
Get available manifest reference names. Every rules starting with prefix from ``nomenclature.RULE_REFERENCE`` are available references. Only name validation is performed on these references. Arguments: datas (dict): Data where to search for reference declarations. Returns: list: List of every available reference names. This is the real name unprefixed.
def metadata(self): if self._metadata is None: try: with open(self.paths.metadata()) as metadata_fd: self._metadata = json.load(metadata_fd) except IOError: self._metadata = {} return self._metadata
Retrieve the metadata info for this prefix Returns: dict: metadata info
def CreateGallery(): url = 'http://min.us/api/CreateGallery' response = _dopost(url) _editor_id = response["editor_id"] _reader_id = response["reader_id"] return Gallery(_reader_id, editor_id=_editor_id)
Creates a Gallery on the server. Returns a Gallery object with the editor_id and reader_id.
def get_indelcaller(d_or_c): config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c indelcaller = config["algorithm"].get("indelcaller", "") if not indelcaller: indelcaller = "" if isinstance(indelcaller, (list, tuple)): indelcaller = indelcaller[0] if (len(indelcaller) > 0) else "" return indelcaller
Retrieve string for indelcaller to use, or empty string if not specified.
def to_header(self): if self.star_tag: return "*" return ", ".join( ['"%s"' % x for x in self._strong] + ['W/"%s"' % x for x in self._weak] )
Convert the etags set into a HTTP header string.
def populate_csv_headers(rows, partial_headers, column_headers_count=1): result = [''] * (len(rows) - column_headers_count) for i_index in range(0, len(partial_headers)): for k_index in range(0, len(partial_headers[i_index])): if not partial_headers[i_index][k_index] and i_index - 1 >= 0: for t_index in range(i_index - 1, -1, -1): partial_value = partial_headers[t_index][k_index] if partial_value: partial_headers[i_index][k_index] = partial_value break result[i_index] = " ".join(map(str, partial_headers[i_index])) return result
Populate csv rows headers when are empty, extending the superior or upper headers.
def LogHttpFrontendAccess(self, request, source=None, message_count=None): event_id = self.GetNewEventId() log_msg = "%s-%s [%s]: %s %s %s %s (%d)" % ( event_id, request.source_ip, source or "<unknown>", request.method, request.url, request.user_agent, request.user, message_count or 0) logging.info(log_msg)
Write a log entry for a Frontend or UI Request. Args: request: A HttpRequest protobuf. source: Client id of the client initiating the request. Optional. message_count: Number of messages received from the client. Optional.
def use_plenary_repository_view(self): self._repository_view = PLENARY for session in self._get_provider_sessions(): try: session.use_plenary_repository_view() except AttributeError: pass
Pass through to provider AssetRepositorySession.use_plenary_repository_view
def transcriptions(self): if self._transcriptions is None: self._transcriptions = TranscriptionList(self._version, account_sid=self._solution['sid'], ) return self._transcriptions
Access the transcriptions :returns: twilio.rest.api.v2010.account.transcription.TranscriptionList :rtype: twilio.rest.api.v2010.account.transcription.TranscriptionList
def set_rows(self, ids): assert all(isinstance(i, int) for i in ids) sort_col, sort_dir = self.current_sort default_sort_col, default_sort_dir = self.default_sort sort_col = sort_col or default_sort_col sort_dir = sort_dir or default_sort_dir or 'desc' logger.log(5, "Set %d rows in the table.", len(ids)) items = [self._get_row(id) for id in ids] data = _create_json_dict(items=items, cols=self.column_names, ) self.eval_js('table.setData({});'.format(data)) if sort_col: self.sort_by(sort_col, sort_dir)
Set the rows of the table.
def _history_buffer_pos_changed(self, _): if self.app.current_buffer == self.history_buffer: line_no = self.history_buffer.document.cursor_position_row if line_no in self.history_mapping.selected_lines: default_lineno = sorted(self.history_mapping.selected_lines).index(line_no) + \ self.history_mapping.result_line_offset self.default_buffer.cursor_position = \ self.default_buffer.document.translate_row_col_to_index(default_lineno, 0)
When the cursor changes in the history buffer. Synchronize.
def generate_schema_file(config_file): config = utils.load_config_from_ini_file(config_file) schema = {} for section_name in config: for option_name in config[section_name]: schema.setdefault(section_name, {}).setdefault(option_name, {}) schema[section_name][option_name]['description'] = 'No description provided.' return utils.dump_schema_file(schema)
Generates a basic confirm schema file from a configuration file.
def tickets(self, extra_params=None): params = { 'per_page': settings.MAX_PER_PAGE, 'report': 0, } if extra_params: params.update(extra_params) return self.api._get_json( Ticket, space=self, rel_path=self._build_rel_path('tickets'), extra_params=params, get_all=True, )
All Tickets in this Space
def get_version(): if all([VERSION, UPDATED, any([isinstance(UPDATED, date), isinstance(UPDATED, datetime), ]), ]): return FORMAT_STRING.format(**{"version": VERSION, "updated": UPDATED, }) elif VERSION: return VERSION elif UPDATED: return localize(UPDATED) if any([isinstance(UPDATED, date), isinstance(UPDATED, datetime), ]) else "" else: return ""
Return formatted version string. Returns: str: string with project version or empty string.
def _initVirtualOutputs(self): self.virtualOutputs = {} for product in self.outputNames: self.virtualOutputs[product] = None
Sets up the structure to hold all the output data arrays for this image in memory.
def update_vlan(self, name, vid, vni): cmd = 'vxlan vlan %s vni %s' % (vid, vni) return self.configure_interface(name, cmd)
Adds a new vlan to vni mapping for the interface EosVersion: 4.13.7M Args: vlan (str, int): The vlan id to map to the vni vni (str, int): The vni value to use Returns: True if the command completes successfully
def initialize(self, app: Flask, app_config): debug = app_config["debug"] port = app_config["http.port"] if debug: self.started_on_port = port app.run(host="0.0.0.0", debug=True, port=port) else: for port in range(port, port + 50): self.http_server = WSGIServer(('0.0.0.0', port), app) try: self.http_server.start() except OSError: continue self.started_on_port = port break
Prepare the server to run and determine the port. :param app: The Flask Application. :param app_config: Configuration dictionary. This module uses the `debug` (`True`/`False`) and `http.port` attributes.
def count_protein_group_hits(lineproteins, groups): hits = [] for group in groups: hits.append(0) for protein in lineproteins: if protein in group: hits[-1] += 1 return [str(x) for x in hits]
Takes a list of protein accessions and a list of protein groups content from DB. Counts for each group in list how many proteins are found in lineproteins. Returns list of str amounts.
def set_monitoring_transaction_name(name, group=None, priority=None): if not newrelic: return newrelic.agent.set_transaction_name(name, group, priority)
Sets the transaction name for monitoring. This is not cached, and only support reporting to New Relic.
def parse_list_objects_v2(data, bucket_name): root = S3Element.fromstring('ListObjectV2Result', data) is_truncated = root.get_child_text('IsTruncated').lower() == 'true' continuation_token = root.get_child_text('NextContinuationToken', strict=False) objects, object_dirs = _parse_objects_from_xml_elts( bucket_name, root.findall('Contents'), root.findall('CommonPrefixes') ) return objects + object_dirs, is_truncated, continuation_token
Parser for list objects version 2 response. :param data: Response data for list objects. :param bucket_name: Response for the bucket. :return: Returns three distinct components: - List of :class:`Object <Object>` - True if list is truncated, False otherwise. - Continuation Token for the next request.
def verify(ctx): oks = run_configurations( skipper(verify_environments), read_sections, ) ctx.exit(0 if False not in oks else 1)
Upgrade locked dependency versions
def line_intersects_itself(lons, lats, closed_shape=False): assert len(lons) == len(lats) if len(lons) <= 3: return False west, east, north, south = get_spherical_bounding_box(lons, lats) proj = OrthographicProjection(west, east, north, south) xx, yy = proj(lons, lats) if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple: return True if closed_shape: xx, yy = proj(numpy.roll(lons, 1), numpy.roll(lats, 1)) if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple: return True return False
Return ``True`` if line of points intersects itself. Line with the last point repeating the first one considered intersecting itself. The line is defined by lists (or numpy arrays) of points' longitudes and latitudes (depth is not taken into account). :param closed_shape: If ``True`` the line will be checked twice: first time with its original shape and second time with the points sequence being shifted by one point (the last point becomes first, the first turns second and so on). This is useful for checking that the sequence of points defines a valid :class:`~openquake.hazardlib.geo.polygon.Polygon`.
def set_approvers(self, approver_ids=[], approver_group_ids=[], **kwargs): path = '%s/%s/approvers' % (self._parent.manager.path, self._parent.get_id()) data = {'approver_ids': approver_ids, 'approver_group_ids': approver_group_ids} self.gitlab.http_put(path, post_data=data, **kwargs)
Change MR-level allowed approvers and approver groups. Args: approver_ids (list): User IDs that can approve MRs approver_group_ids (list): Group IDs whose members can approve MRs Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server failed to perform the request
def get_type(type_name): parts = type_name.split('.') if len(parts) < 2: raise SphinxError( 'Type must be fully-qualified, ' 'of the form ``module.MyClass``. Got: {}'.format(type_name) ) module_name = ".".join(parts[0:-1]) name = parts[-1] return getattr(import_module(module_name), name)
Get a type given its importable name. Parameters ---------- task_name : `str` Name of the Python type, such as ``mypackage.MyClass``. Returns ------- object The object.
def ignore_reports(self): url = self.reddit_session.config['ignore_reports'] data = {'id': self.fullname} return self.reddit_session.request_json(url, data=data)
Ignore future reports on this object. This prevents future reports from causing notifications or appearing in the various moderation listing. The report count will still increment.
def _check_tagmode_and_tmos_version(self, **kwargs): tmos_version = self._meta_data['bigip']._meta_data['tmos_version'] if LooseVersion(tmos_version) < LooseVersion('11.6.0'): msg = "The parameter, 'tagMode', is not allowed against the " \ "following version of TMOS: %s" % (tmos_version) if 'tagMode' in kwargs or hasattr(self, 'tagMode'): raise TagModeDisallowedForTMOSVersion(msg)
Raise an exception if tagMode in kwargs and tmos version < 11.6.0 :param kwargs: dict -- keyword arguments for request :raises: TagModeDisallowedForTMOSVersion
def create_domain(self, domain_name): params = {'DomainName':domain_name} d = self.get_object('CreateDomain', params, Domain) d.name = domain_name return d
Create a SimpleDB domain. :type domain_name: string :param domain_name: The name of the new domain :rtype: :class:`boto.sdb.domain.Domain` object :return: The newly created domain
def add_predicate(self, key, value, predicate_type='equals'): if predicate_type not in operators: predicate_type = operator_lkup.get(predicate_type) if predicate_type: self.predicates.append({'type': predicate_type, 'key': key, 'value': value }) else: raise Exception("predicate type not a valid operator")
add key, value, type combination of a predicate :param key: query KEY parameter :param value: the value used in the predicate :param predicate_type: the type of predicate (e.g. ``equals``)
def positional(max_positional_args): def positional_decorator(wrapped): @functools.wraps(wrapped) def positional_wrapper(*args, **kwargs): if len(args) > max_positional_args: plural_s = '' if max_positional_args != 1: plural_s = 's' raise TypeError('%s() takes at most %d positional argument%s ' '(%d given)' % (wrapped.__name__, max_positional_args, plural_s, len(args))) return wrapped(*args, **kwargs) return positional_wrapper if isinstance(max_positional_args, six.integer_types): return positional_decorator else: args, _, _, defaults = inspect.getargspec(max_positional_args) if defaults is None: raise ValueError( 'Functions with no keyword arguments must specify ' 'max_positional_args') return positional(len(args) - len(defaults))(max_positional_args)
A decorator that declares only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write: def fn(pos1, *, kwonly1=None, kwonly1=None): ... All named parameters after * must be a keyword: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example: To define a function like above, do: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for 'self' and 'cls': class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... One can omit the argument to 'positional' altogether, and then no arguments with default values may be passed positionally. This would be equivalent to placing a '*' before the first argument with a default value in Python 3. If there are no arguments with default values, and no argument is given to 'positional', an error is raised. @positional def fn(arg1, arg2, required_kw1=None, required_kw2=0): ... fn(1, 3, 5) # Raises exception. fn(1, 3) # Ok. fn(1, 3, required_kw1=5) # Ok. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after the this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError if a keyword-only argument is provided as a positional parameter. ValueError if no maximum number of arguments is provided and the function has no arguments with default values.
def _validate(self, data, owner=None): if isinstance(data, DynamicValue): data = data() if data is None and not self.nullable: raise ValueError('Value can not be null') elif data is not None: isdict = isinstance(data, dict) for name, schema in iteritems(self.getschemas()): if name == 'default': continue if name in self.required: if ( (isdict and name not in data) or (not isdict and not hasattr(data, name)) ): part1 = ( 'Mandatory property {0} by {1} is missing in {2}.'. format(name, self, data) ) part2 = '{0} expected.'.format(schema) error = '{0} {1}'.format(part1, part2) raise ValueError(error) elif (isdict and name in data) or hasattr(data, name): value = data[name] if isdict else getattr(data, name) schema._validate(data=value, owner=self)
Validate input data in returning an empty list if true. :param data: data to validate with this schema. :param Schema owner: schema owner. :raises: Exception if the data is not validated.
def add_entry_point(self, destination): self.routes.setdefault('__entry_point', set()).add(destination) return self.routes['__entry_point']
\ Add an entry point :param destination: node to route to initially :type destination: str
def write(self, session, directory, name, maskMap): name_split = name.split('.') name = name_split[0] extension = '' if len(name_split) >= 2: extension = name_split[-1] try: name = self._namePreprocessor(name) except: 'DO NOTHING' if extension == '': filename = '{0}.{1}'.format(name, self.fileExtension) else: filename = '{0}.{1}'.format(name, extension) filePath = os.path.join(directory, filename) with open(filePath, 'w') as openFile: self._write(session=session, openFile=openFile, maskMap=maskMap)
Write from database to file. *session* = SQLAlchemy session object\n *directory* = to which directory will the files be written (e.g.: '/example/path')\n *name* = name of file that will be written (e.g.: 'my_project.ext')\n
async def start(self) -> None: if self._is_running: raise SublemonRuntimeError( 'Attempted to start an already-running `Sublemon` instance') self._poll_task = asyncio.ensure_future(self._poll()) self._is_running = True
Coroutine to run this server.
def sca_xsect(scatterer, h_pol=True): if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "sca_xsect") old_geom = scatterer.get_geometry() def d_xsect(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) Z = scatterer.get_Z() I = sca_intensity(scatterer, h_pol) return I * np.sin(thet) try: xsect = dblquad(d_xsect, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return xsect
Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section.
def init(cls, path): if not os.path.exists( path ): block_header_serializer = BlockHeaderSerializer() genesis_block_header = BlockHeader() if USE_MAINNET: genesis_block_header.version = 1 genesis_block_header.prev_block = 0 genesis_block_header.merkle_root = int(GENESIS_BLOCK_MERKLE_ROOT, 16 ) genesis_block_header.timestamp = 1231006505 genesis_block_header.bits = int( "1d00ffff", 16 ) genesis_block_header.nonce = 2083236893 genesis_block_header.txns_count = 0 with open(path, "wb") as f: bin_data = block_header_serializer.serialize( genesis_block_header ) f.write( bin_data )
Set up an SPV client. If the locally-stored headers do not exist, then create a stub headers file with the genesis block information.
def find_syslog_address(): if sys.platform == 'darwin' and os.path.exists(LOG_DEVICE_MACOSX): return LOG_DEVICE_MACOSX elif os.path.exists(LOG_DEVICE_UNIX): return LOG_DEVICE_UNIX else: return 'localhost', logging.handlers.SYSLOG_UDP_PORT
Find the most suitable destination for system log messages. :returns: The pathname of a log device (a string) or an address/port tuple as supported by :class:`~logging.handlers.SysLogHandler`. On Mac OS X this prefers :data:`LOG_DEVICE_MACOSX`, after that :data:`LOG_DEVICE_UNIX` is checked for existence. If both of these device files don't exist the default used by :class:`~logging.handlers.SysLogHandler` is returned.
def string2identifier(s): if len(s) == 0: return "_" if s[0] not in string.ascii_letters: s = "_" + s valids = string.ascii_letters + string.digits + "_" out = "" for i, char in enumerate(s): if char in valids: out += char else: out += "_" return out
Turn a string into a valid python identifier. Currently only allows ASCII letters and underscore. Illegal characters are replaced with underscore. This is slightly more opinionated than python 3 itself, and may be refactored in future (see PEP 3131). Parameters ---------- s : string string to convert Returns ------- str valid python identifier.
def samples(self): names = self.series.dimensions for n, offset in enumerate(self.series.offsets): dt = datetime.timedelta(microseconds=offset * 1000) d = {"ts": self.ts + dt} for name in names: d[name] = getattr(self.series, name)[n] yield d
Yield samples as dictionaries, keyed by dimensions.
def parse_date(table_data): text = table_data.text.split('Added on ') if len(text) < 2: return date.today() return datetime.strptime(text[1], Parser.DATE_STRPTIME_FORMAT).date()
Static method that parses a given table data element with `Url.DATE_STRPTIME_FORMAT` and creates a `date` object from td's text contnet. :param lxml.HtmlElement table_data: table_data tag to parse :return: date object from td's text date :rtype: datetime.date
def end_segment(self, end_time=None): entity = self.get_trace_entity() if not entity: log.warning("No segment to end") return if self._is_subsegment(entity): entity.parent_segment.close(end_time) else: entity.close(end_time)
End the current active segment. :param int end_time: epoch in seconds. If not specified the current system time will be used.
def make_action_list(self, item_list, **kwargs): action_list = [] es_index = get2(kwargs, "es_index", self.es_index) action_type = kwargs.get("action_type","index") action_settings = {'_op_type': action_type, '_index': es_index} doc_type = kwargs.get("doc_type", self.doc_type) if not doc_type: doc_type = "unk" id_field = kwargs.get("id_field") for item in item_list: action = get_es_action_item(item, action_settings, doc_type, id_field) action_list.append(action) return action_list
Generates a list of actions for sending to Elasticsearch
def position(self): return {k.upper(): v for k, v in self._position.items()}
Instead of sending M114.2 we are storing target values in self._position since movement and home commands are blocking and assumed to go the correct place. Cases where Smoothie would not be in the correct place (such as if a belt slips) would not be corrected by getting position with M114.2 because Smoothie would also not be aware of slippage.
def send(self, data): _vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100]) self.context.send(Message.pickled(data, handle=self.dst_handle))
Send `data` to the remote end.
def write_multi(self, frames, encoded_frames=None): if encoded_frames is None: encoded_frames = iter(lambda: None, 1) for (frame, encoded_frame) in zip(frames, encoded_frames): self.write(frame, encoded_frame)
Writes multiple video frames.
def serialize(self): commands = [] for cmd in self.commands: commands.append(cmd.serialize()) out = {'commands': commands, 'deviceURL': self.__device_url} return out
Serialize action.
def build_flags(library, type_, path): pkg_config_path = [path] if "PKG_CONFIG_PATH" in os.environ: pkg_config_path.append(os.environ['PKG_CONFIG_PATH']) if "LIB_DIR" in os.environ: pkg_config_path.append(os.environ['LIB_DIR']) pkg_config_path.append(os.path.join(os.environ['LIB_DIR'], "pkgconfig")) options = [ "--static", { 'I': "--cflags-only-I", 'L': "--libs-only-L", 'l': "--libs-only-l" }[type_] ] return [ flag.strip("-{}".format(type_)) for flag in subprocess.check_output( ["pkg-config"] + options + [library], env=dict(os.environ, PKG_CONFIG_PATH=":".join(pkg_config_path)) ).decode("UTF-8").split() ]
Return separated build flags from pkg-config output
def update(self): step = self.geometry.astep s = self.extract() self.mean = np.mean(s[2]) gradient, gradient_error = self._get_gradient(step) previous_gradient = self.gradient if not previous_gradient: previous_gradient = -0.05 if gradient >= (previous_gradient / 3.): gradient, gradient_error = self._get_gradient(2 * step) if gradient >= (previous_gradient / 3.): gradient = previous_gradient * 0.8 gradient_error = None self.gradient = gradient self.gradient_error = gradient_error if gradient_error: self.gradient_relative_error = gradient_error / np.abs(gradient) else: self.gradient_relative_error = None
Update this `~photutils.isophote.EllipseSample` instance. This method calls the :meth:`~photutils.isophote.EllipseSample.extract` method to get the values that match the current ``geometry`` attribute, and then computes the the mean intensity, local gradient, and other associated quantities.
def list(self): backups = [] for d in glob(join(self.backup_directory, '*')): backups.append(WorkspaceBackup.from_path(d)) backups.sort(key=lambda b: b.lastmod, reverse=True) return backups
List all backups as WorkspaceBackup objects, sorted descending by lastmod.
def cases(arg, case_result_pairs, default=None): builder = arg.case() for case, result in case_result_pairs: builder = builder.when(case, result) if default is not None: builder = builder.else_(default) return builder.end()
Create a case expression in one shot. Returns ------- case_expr : SimpleCase