code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def dispatch_hook(cls, _pkt=b"", *args, **kargs): if _pkt and len(_pkt) >= 1: if orb(_pkt[0]) == 0x41: return LoWPANUncompressedIPv6 if orb(_pkt[0]) == 0x42: return LoWPAN_HC1 if orb(_pkt[0]) >> 3 == 0x18: return LoWPANFragmentationFirst elif orb(_pkt[0]) >> 3 == 0x1C: return LoWPANFragmentationSubsequent elif orb(_pkt[0]) >> 6 == 0x02: return LoWPANMesh elif orb(_pkt[0]) >> 6 == 0x01: return LoWPAN_IPHC return cls
Depending on the payload content, the frame type we should interpretate
def add_taxes(self, taxes): _idx = len(self.taxes) for idx, tax in enumerate(taxes): tax_key = "tax_" + str(idx + _idx) self.taxes[tax_key] = {"name": tax[0], "amount": tax[1]}
Appends the data to the 'taxes' key in the request object 'taxes' should be in format: [("tax_name", "tax_amount")] For example: [("Other TAX", 700), ("VAT", 5000)]
def get_items_of_media_type(self, media_type): return (item for item in self.items if item.media_type == media_type)
Returns all items of specified media type. :Args: - media_type: Media type for items we are searching for :Returns: Returns found items as tuple.
def discover_config_path(self, config_filename: str) -> str: if config_filename and os.path.isfile(config_filename): return config_filename for place in _common_places: config_path = os.path.join(place, config_filename) if os.path.isfile(config_path): return config_path return
Search for config file in a number of places. If there is no config file found, will return None. :param config_filename: Config file name or custom path to filename with config. :return: Path to the discovered config file or None.
def set_operation_voltage_level(self): mv_station_v_level_operation = float(cfg_ding0.get('mv_routing_tech_constraints', 'mv_station_v_level_operation')) self.v_level_operation = mv_station_v_level_operation * self.grid.v_level
Set operation voltage level
def readRGBA(self): self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() a = self.readUI8() return (a << 24) | (r << 16) | (g << 8) | b
Read a RGBA color
def _combine_sets(self, sets, final_set): self.cls.get_connection().sinterstore(final_set, list(sets)) return final_set
Given a list of set, combine them to create the final set that will be used to make the final redis call.
def ResourcePath(package_name, filepath): if not getattr(sys, "frozen", None): target = _GetPkgResources(package_name, filepath) if target and os.access(target, os.R_OK): return target target = os.path.join(sys.prefix, filepath) if target and os.access(target, os.R_OK): return target return None
Computes a path to the specified package resource. Args: package_name: A name of the package where the resource is located. filepath: A path to the resource relative to the package location. Returns: A path to the resource or `None` if the resource cannot be found.
def _parse_header(self, header_string): header_content = header_string.strip().split('\t') if len(header_content) != self._snv_enum.HEADER_LEN.value: raise MTBParserException( "Only {} header columns found, {} expected!" .format(len(header_content), self._snv_enum.HEADER_LEN.value)) counter = 0 for column in header_content: for enum_type in self._snv_enum: if column == enum_type.value: self._header_to_column_mapping[enum_type.name] = counter continue counter+=1 if len(self._header_to_column_mapping) != self._snv_enum.HEADER_LEN.value: debug_string = self._header_to_column_mapping.keys() raise MTBParserException("Parsing incomplete: Not all columns have been " "matched to speficied column types. Identified {} columns, but expected {}. {}" .format(len(self._header_to_column_mapping), self._snv_enum.HEADER_LEN.value, debug_string))
Parses the header and determines the column type and its column index.
def get_collection(self, **kwargs): from pymongo import MongoClient if self.host and self.port: client = MongoClient(host=config.host, port=config.port) else: client = MongoClient() db = client[self.dbname] if self.user and self.password: db.autenticate(self.user, password=self.password) return db[self.collection]
Establish a connection with the database. Returns MongoDb collection
def save(self, *args, **kwargs): if self.gen_description: self.description = strip_tags(self.description_from_content()) super(MetaData, self).save(*args, **kwargs)
Set the description field on save.
def timezone(self, lat, lon, datetime, language=None, sensor=None): parameters = dict( location="%f,%f" % (lat, lon), timestamp=unixtimestamp(datetime), language=language, sensor=sensor, ) return self._make_request(self.TIMEZONE_URL, parameters, None)
Get time offset data for given location. :param lat: Latitude of queried point :param lon: Longitude of queried point :param language: The language in which to return results. For full list of laguages go to Google Maps API docs :param datetime: Desired time. The Time Zone API uses the timestamp to determine whether or not Daylight Savings should be applied. datetime should be timezone aware. If it isn't the UTC timezone is assumed. :type datetime: datetime.datetime :param sensor: Override default client sensor parameter
def get_option(self, optionname, default=0): global_options = ('verify', 'all_logs', 'log_size', 'plugin_timeout') if optionname in global_options: return getattr(self.commons['cmdlineopts'], optionname) for name, parms in zip(self.opt_names, self.opt_parms): if name == optionname: val = parms['enabled'] if val is not None: return val return default
Returns the first value that matches 'optionname' in parameters passed in via the command line or set via set_option or via the global_plugin_options dictionary, in that order. optionaname may be iterable, in which case the first option that matches any of the option names is returned.
def get_user_id(username): uid = single_line_stdout('id -u {0}'.format(username), expected_errors=(1,), shell=False) return check_int(uid)
Returns the user id to a given user name. Returns ``None`` if the user does not exist. :param username: User name. :type username: unicode :return: User id. :rtype: int
def structural_imbalance(S, sampler=None, **sampler_args): h, J = structural_imbalance_ising(S) response = sampler.sample_ising(h, J, **sampler_args) sample = next(iter(response)) colors = {v: (spin + 1) // 2 for v, spin in iteritems(sample)} frustrated_edges = {} for u, v, data in S.edges(data=True): sign = data['sign'] if sign > 0 and colors[u] != colors[v]: frustrated_edges[(u, v)] = data elif sign < 0 and colors[u] == colors[v]: frustrated_edges[(u, v)] = data return frustrated_edges, colors
Returns an approximate set of frustrated edges and a bicoloring. A signed social network graph is a graph whose signed edges represent friendly/hostile interactions between nodes. A signed social network is considered balanced if it can be cleanly divided into two factions, where all relations within a faction are friendly, and all relations between factions are hostile. The measure of imbalance or frustration is the minimum number of edges that violate this rule. Parameters ---------- S : NetworkX graph A social graph on which each edge has a 'sign' attribute with a numeric value. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrainted Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- frustrated_edges : dict A dictionary of the edges that violate the edge sign. The imbalance of the network is the length of frustrated_edges. colors: dict A bicoloring of the nodes into two factions. Raises ------ ValueError If any edge does not have a 'sign' attribute. Examples -------- >>> import dimod >>> sampler = dimod.ExactSolver() >>> S = nx.Graph() >>> S.add_edge('Alice', 'Bob', sign=1) # Alice and Bob are friendly >>> S.add_edge('Alice', 'Eve', sign=-1) # Alice and Eve are hostile >>> S.add_edge('Bob', 'Eve', sign=-1) # Bob and Eve are hostile >>> frustrated_edges, colors = dnx.structural_imbalance(S, sampler) >>> print(frustrated_edges) {} >>> print(colors) # doctest: +SKIP {'Alice': 0, 'Bob': 0, 'Eve': 1} >>> S.add_edge('Ted', 'Bob', sign=1) # Ted is friendly with all >>> S.add_edge('Ted', 'Alice', sign=1) >>> S.add_edge('Ted', 'Eve', sign=1) >>> frustrated_edges, colors = dnx.structural_imbalance(S, sampler) >>> print(frustrated_edges) {('Ted', 'Eve'): {'sign': 1}} >>> print(colors) # doctest: +SKIP {'Bob': 1, 'Ted': 1, 'Alice': 1, 'Eve': 0} Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. References ---------- `Ising model on Wikipedia <https://en.wikipedia.org/wiki/Ising_model>`_ .. [FIA] Facchetti, G., Iacono G., and Altafini C. (2011). Computing global structural balance in large-scale signed social networks. PNAS, 108, no. 52, 20953-20958
def consume(self, callback, bindings=None, queues=None, exchanges=None): self._bindings = bindings or config.conf["bindings"] self._queues = queues or config.conf["queues"] self._exchanges = exchanges or config.conf["exchanges"] if inspect.isclass(callback): cb_obj = callback() if not callable(cb_obj): raise ValueError( "Callback must be a class that implements __call__" " or a function." ) self._consumer_callback = cb_obj elif callable(callback): self._consumer_callback = callback else: raise ValueError( "Callback must be a class that implements __call__" " or a function." ) self._running = True self.connect() self._connection.ioloop.start()
Consume messages from a message queue. Simply define a callable to be used as the callback when messages are delivered and specify the queue bindings. This call blocks. The callback signature should accept a single positional argument which is an instance of a :class:`Message` (or a sub-class of it). Args: callback (callable): The callable to pass the message to when one arrives. bindings (list of dict): A list of dictionaries describing bindings for queues. Refer to the :ref:`conf-bindings` configuration documentation for the format. queues (dict): A dictionary of queues to ensure exist. Refer to the :ref:`conf-queues` configuration documentation for the format. exchanges (dict): A dictionary of exchanges to ensure exist. Refer to the :ref:`conf-exchanges` configuration documentation for the format. Raises: HaltConsumer: Raised when the consumer halts. ValueError: If the callback isn't a callable object or a class with __call__ defined.
def get_invalid_txn_info(self, batch_id): with self._lock: return [info.copy() for info in self._invalid.get(batch_id, [])]
Fetches the id of the Transaction that failed within a particular Batch, as well as any error message or other data about the failure. Args: batch_id (str): The id of the Batch containing an invalid txn Returns: list of dict: A list of dicts with three possible keys: * 'id' - the header_signature of the invalid Transaction * 'message' - the error message sent by the TP * 'extended_data' - any additional data sent by the TP
def list_file_extensions(path: str, reportevery: int = 1) -> List[str]: extensions = set() count = 0 for root, dirs, files in os.walk(path): count += 1 if count % reportevery == 0: log.debug("Walking directory {}: {!r}", count, root) for file in files: filename, ext = os.path.splitext(file) extensions.add(ext) return sorted(list(extensions))
Returns a sorted list of every file extension found in a directory and its subdirectories. Args: path: path to scan reportevery: report directory progress after every *n* steps Returns: sorted list of every file extension found
def message_channel(self, message): self.log(None, message) super(BaseBot, self).message_channel(message)
We won't receive our own messages, so log them manually.
def apply_analysis_request_partition_interface(portal): logger.info("Applying 'IAnalysisRequestPartition' marker interface ...") query = dict(portal_type="AnalysisRequest", isRootAncestor=False) brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING) total = len(brains) for num, brain in enumerate(brains): if num % 100 == 0: logger.info("Applying 'IAnalysisRequestPartition' interface: {}/{}" .format(num, total)) ar = api.get_object(brain) if IAnalysisRequestPartition.providedBy(ar): continue if ar.getParentAnalysisRequest(): alsoProvides(ar, IAnalysisRequestPartition) commit_transaction(portal)
Walks trhough all AR-like partitions registered in the system and applies the IAnalysisRequestPartition marker interface to them
def page(self, email=values.unset, status=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): params = values.of({ 'Email': email, 'Status': status, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return AuthorizationDocumentPage(self._version, response, self._solution)
Retrieve a single page of AuthorizationDocumentInstance records from the API. Request is executed immediately :param unicode email: Email. :param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AuthorizationDocumentInstance :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
def get_api_connector(cls): if cls._api is None: cls.load_config() cls.debug('initialize connection to remote server') apihost = cls.get('api.host') if not apihost: raise MissingConfiguration() apienv = cls.get('api.env') if apienv and apienv in cls.apienvs: apihost = cls.apienvs[apienv] cls._api = XMLRPCClient(host=apihost, debug=cls.verbose) return cls._api
Initialize an api connector for future use.
def top_games(self, limit=10, offset=0): r = self.kraken_request('GET', 'games/top', params={'limit': limit, 'offset': offset}) return models.Game.wrap_topgames(r)
Return the current top games :param limit: the maximum amount of top games to query :type limit: :class:`int` :param offset: the offset in the top games :type offset: :class:`int` :returns: a list of top games :rtype: :class:`list` of :class:`models.Game` :raises: None
def nuclear_norm(data): r u, s, v = np.linalg.svd(data) return np.sum(s)
r"""Nuclear norm This method computes the nuclear (or trace) norm of the input data. Parameters ---------- data : np.ndarray Input data array Returns ------- float nuclear norm value Examples -------- >>> from modopt.math.matrix import nuclear_norm >>> a = np.arange(9).reshape(3, 3) >>> nuclear_norm(a) 15.49193338482967 Notes ----- Implements the following equation: .. math:: \|\mathbf{A}\|_* = \sum_{i=1}^{\min\{m,n\}} \sigma_i (\mathbf{A})
def delete(self, request, uri): uri = self.decode_uri(uri) uris = cio.delete(uri) if uri not in uris: raise Http404 return self.render_to_response()
Delete versioned uri and return empty text response on success.
def columns(self, *args) -> List[List[Well]]: col_dict = self._create_indexed_dictionary(group=2) keys = sorted(col_dict, key=lambda x: int(x)) if not args: res = [col_dict[key] for key in keys] elif isinstance(args[0], int): res = [col_dict[keys[idx]] for idx in args] elif isinstance(args[0], str): res = [col_dict[idx] for idx in args] else: raise TypeError return res
Accessor function used to navigate through a labware by column. With indexing one can treat it as a typical python nested list. To access row A for example, simply write: labware.columns()[0] This will output ['A1', 'B1', 'C1', 'D1'...]. Note that this method takes args for backward-compatibility, but use of args is deprecated and will be removed in future versions. Args can be either strings or integers, but must all be the same type (e.g.: `self.columns(1, 4, 8)` or `self.columns('1', '2')`, but `self.columns('1', 4)` is invalid. :return: A list of column lists
async def stop_pages(self): await self.bot.delete_message(self.message) self.paginating = False
stops the interactive pagination session
def get_revisions(page, page_num=1): revisions = page.revisions.order_by('-created_at') current = page.get_latest_revision() if current: revisions.exclude(id=current.id) paginator = Paginator(revisions, 5) try: revisions = paginator.page(page_num) except PageNotAnInteger: revisions = paginator.page(1) except EmptyPage: revisions = paginator.page(paginator.num_pages) return revisions
Returns paginated queryset of PageRevision instances for specified Page instance. :param page: the page instance. :param page_num: the pagination page number. :rtype: django.db.models.query.QuerySet.
def get_min_row_num(mention): span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.row_start else: return None
Return the lowest row number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None
def send_updates(self): d = datetime.now() if self.timeaddr: self.tunnel.group_write(self.timeaddr, time_to_knx(d)) if self.dateaddr: self.tunnel.group_write(self.dateaddr, date_to_knx(d)) if self.datetimeaddr: self.tunnel.group_write(self.datetimeaddr, datetime_to_knx(d)) if self.daynightaddr: from pysolar.solar import get_altitude alt = get_altitude(self.lat, self.long, d) if alt > 0: self.tunnel.group_write(self.daynightaddr, 1) else: self.tunnel.group_write(self.daynightaddr, 0)
Send updated to the KNX bus.
def selectin(table, field, value, complement=False): return select(table, field, lambda v: v in value, complement=complement)
Select rows where the given field is a member of the given value.
def _RoundTowardZero(value, divider): result = value // divider remainder = value % divider if result < 0 and remainder > 0: return result + 1 else: return result
Truncates the remainder part after division.
def time_emd(emd_type, data): emd = { 'cause': _CAUSE_EMD, 'effect': pyphi.subsystem.effect_emd, 'hamming': pyphi.utils.hamming_emd }[emd_type] def statement(): for (d1, d2) in data: emd(d1, d2) results = timeit.repeat(statement, number=NUMBER, repeat=REPEAT) return min(results)
Time an EMD command with the given data as arguments
def clean_upload(self, query='/content/uploads/'): query = query + self.uid + '/' _r = self.connector.delete(query) if _r.status_code == Constants.PULP_DELETE_OK: juicer.utils.Log.log_info("Cleaned up after upload request.") else: _r.raise_for_status()
pulp leaves droppings if you don't specifically tell it to clean up after itself. use this to do so.
def get_latlon(self, use_cached=True): device_json = self.get_device_json(use_cached) lat = device_json.get("dpMapLat") lon = device_json.get("dpMapLong") return (float(lat) if lat else None, float(lon) if lon else None, )
Get a tuple with device latitude and longitude... these may be None
def _op(self, _, obj, app): if obj.responses == None: return tmp = {} for k, v in six.iteritems(obj.responses): if isinstance(k, six.integer_types): tmp[str(k)] = v else: tmp[k] = v obj.update_field('responses', tmp)
convert status code in Responses from int to string
def post(self, request): request.session['next'] = self.get_next(request) client = self.get_client()() request.session[self.get_client().get_session_key()] = client url = client.get_redirect_url(request=request) logger.debug("Redirecting to %s", url) try: return HttpResponseRedirect(url) except OAuthError, error: return self.error_to_response(request, {'error': error}) except socket.timeout: return self.error_to_response(request, {'error': _('Could not connect to service (timed out)')})
Create a client, store it in the user's session and redirect the user to the API provider to authorize our app and permissions.
def start(self): self.log.debug('Starting the installation process') self.browser.open(self.url) self.system_check()
Start the installation wizard
def info(message, domain): if domain in Logger._ignored_domains: return Logger._log(None, message, INFO, domain)
Log simple info
async def get( self, stream: str, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_event: int = 0, max_count: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): correlation_id = correlation_id cmd = convo.ReadStreamEvents( stream, from_event, max_count, resolve_links, require_master, direction=direction, ) result = await self.dispatcher.start_conversation(cmd) return await result
Read a range of events from a stream. Args: stream: The name of the stream to read direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_event (optional): The first event to read. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events from a stream >>> async for event in conn.get("my-stream", max_count=5): >>> print(event) Read events 21 to 30 >>> async for event in conn.get("my-stream", max_count=10, from_event=21): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get( "my-stream", max_count=10, direction=StreamDirection.Backward ): >>> print(event)
def is_link(url, processed, files): if url not in processed: is_file = url.endswith(BAD_TYPES) if is_file: files.add(url) return False return True return False
Determine whether or not a link should be crawled A url should not be crawled if it - Is a file - Has already been crawled Args: url: str Url to be processed processed: list[str] List of urls that have already been crawled Returns: bool If `url` should be crawled
def stream_interactions(self): timestamps = sorted(self.time_to_edge.keys()) for t in timestamps: for e in self.time_to_edge[t]: yield (e[0], e[1], e[2], t)
Generate a temporal ordered stream of interactions. Returns ------- nd_iter : an iterator The iterator returns a 4-tuples of (node, node, op, timestamp). Examples -------- >>> G = dn.DynGraph() >>> G.add_path([0,1,2,3], t=0) >>> G.add_path([3,4,5,6], t=1) >>> list(G.stream_interactions()) [(0, 1, '+', 0), (1, 2, '+', 0), (2, 3, '+', 0), (3, 4, '+', 1), (4, 5, '+', 1), (5, 6, '+', 1)]
def selecteq(table, field, value, complement=False): return selectop(table, field, value, operator.eq, complement=complement)
Select rows where the given field equals the given value.
def right_click_specimen_equalarea(self, event): if event.LeftIsDown() or event.ButtonDClick(): return elif self.specimen_EA_setting == "Zoom": self.specimen_EA_setting = "Pan" try: self.toolbar2.pan('off') except TypeError: pass elif self.specimen_EA_setting == "Pan": self.specimen_EA_setting = "Zoom" try: self.toolbar2.zoom() except TypeError: pass
toggles between zoom and pan effects for the specimen equal area on right click Parameters ---------- event : the wx.MouseEvent that triggered the call of this function Alters ------ specimen_EA_setting, toolbar2 setting
def shrink_file(in_filepath, api_key=None, out_filepath=None): info = get_shrink_file_info(in_filepath, api_key, out_filepath) write_shrunk_file(info) return info
Shrink png file and write it back to a new file The default file path replaces ".png" with ".tiny.png". returns api_info (including info['ouput']['filepath'])
def listBlockOrigin(self, origin_site_name="", dataset="", block_name=""): try: return self.dbsBlock.listBlocksOrigin(origin_site_name, dataset, block_name) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listBlocks. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
API to list blocks first generated in origin_site_name. :param origin_site_name: Origin Site Name (Optional, No wildcards) :type origin_site_name: str :param dataset: dataset ( No wildcards, either dataset or block name needed) :type dataset: str :param block_name: :type block_name: str :returns: List of dictionaries containing the following keys (create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, block_size) :rtype: list of dicts
def getMonitorById(self, monitorId): url = self.baseUrl url += "getMonitors?apiKey=%s&monitors=%s" % (self.apiKey, monitorId) url += "&noJsonCallback=1&format=json" success, response = self.requestApi(url) if success: status = response.get('monitors').get('monitor')[0].get('status') alltimeuptimeratio = response.get('monitors').get('monitor')[0].get('alltimeuptimeratio') return status, alltimeuptimeratio return None, None
Returns monitor status and alltimeuptimeratio for a MonitorId.
def delete_trigger(self, trigger): assert trigger is not None assert isinstance(trigger.id, str), "Value must be a string" status, _ = self.http_client.delete( NAMED_TRIGGER_URI % trigger.id, params={'appid': self.API_key}, headers={'Content-Type': 'application/json'})
Deletes from the Alert API the trigger record identified by the ID of the provided `pyowm.alertapi30.trigger.Trigger`, along with all related alerts :param trigger: the `pyowm.alertapi30.trigger.Trigger` object to be deleted :type trigger: `pyowm.alertapi30.trigger.Trigger` :returns: `None` if deletion is successful, an exception otherwise
def time(hour, minute=0, second=0, microsecond=0): return Time(hour, minute, second, microsecond)
Create a new Time instance.
def call_after(lag): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): wrapper.timer.cancel() wrapper.timer = threading.Timer(lag, func, args=args, kwargs=kwargs) wrapper.timer.start() wrapper.timer = threading.Timer(0, lambda: None) return wrapper return decorator
Parametrized decorator for calling a function after a time ``lag`` given in milliseconds. This cancels simultaneous calls.
def flatten(nested, containers=(list, tuple)): flat = list(nested) i = 0 while i < len(flat): while isinstance(flat[i], containers): if not flat[i]: flat.pop(i) i -= 1 break else: flat[i:i + 1] = (flat[i]) i += 1 return flat
Flatten a nested list in-place and return it.
def merge(self, cluster_ids, to=None): if not _is_array_like(cluster_ids): raise ValueError("The first argument should be a list or " "an array.") cluster_ids = sorted(cluster_ids) if not set(cluster_ids) <= set(self.cluster_ids): raise ValueError("Some clusters do not exist.") if to is None: to = self.new_cluster_id() if to < self.new_cluster_id(): raise ValueError("The new cluster numbers should be higher than " "{0}.".format(self.new_cluster_id())) spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids) up = self._do_merge(spike_ids, cluster_ids, to) undo_state = self.emit('request_undo_state', up) self._undo_stack.add((spike_ids, [to], undo_state)) self.emit('cluster', up) return up
Merge several clusters to a new cluster. Parameters ---------- cluster_ids : array-like List of clusters to merge. to : integer or None The id of the new cluster. By default, this is `new_cluster_id()`. Returns ------- up : UpdateInfo instance
def roughpage(request, url): if settings.APPEND_SLASH and not url.endswith('/'): return redirect(url + '/', permanent=True) filename = url_to_filename(url) template_filenames = get_backend().prepare_filenames(filename, request=request) root = settings.ROUGHPAGES_TEMPLATE_DIR template_filenames = [os.path.join(root, x) for x in template_filenames] try: t = loader.select_template(template_filenames) return render_roughpage(request, t) except TemplateDoesNotExist: if settings.ROUGHPAGES_RAISE_TEMPLATE_DOES_NOT_EXISTS: raise raise Http404
Public interface to the rough page view.
def add_columns(tree_view, df_py_dtypes, list_store): tree_view.set_model(list_store) for column_i, (i, dtype_i) in df_py_dtypes[['i', 'dtype']].iterrows(): tree_column_i = gtk.TreeViewColumn(column_i) tree_column_i.set_name(column_i) if dtype_i in (int, long): property_name = 'text' cell_renderer_i = gtk.CellRendererSpin() elif dtype_i == float: property_name = 'text' cell_renderer_i = gtk.CellRendererSpin() elif dtype_i in (bool, ): property_name = 'active' cell_renderer_i = gtk.CellRendererToggle() elif dtype_i in (str, ): property_name = 'text' cell_renderer_i = gtk.CellRendererText() else: raise ValueError('No cell renderer for dtype: %s' % dtype_i) cell_renderer_i.set_data('column_i', i) cell_renderer_i.set_data('column', tree_column_i) tree_column_i.pack_start(cell_renderer_i, True) tree_column_i.add_attribute(cell_renderer_i, property_name, i) tree_view.append_column(tree_column_i)
Add columns to a `gtk.TreeView` for the types listed in `df_py_dtypes`. Args: tree_view (gtk.TreeView) : Tree view to append columns to. df_py_dtypes (pandas.DataFrame) : Data frame containing type information for one or more columns in `list_store`. list_store (gtk.ListStore) : Model data. Returns: None
def erase_devices(): server = objects.Server() for controller in server.controllers: drives = [x for x in controller.unassigned_physical_drives if (x.get_physical_drive_dict().get('erase_status', '') == 'OK')] if drives: controller.erase_devices(drives) while not has_erase_completed(): time.sleep(300) server.refresh() status = {} for controller in server.controllers: drive_status = {x.id: x.erase_status for x in controller.unassigned_physical_drives} sanitize_supported = controller.properties.get( 'Sanitize Erase Supported', 'False') if sanitize_supported == 'False': msg = ("Drives overwritten with zeros because sanitize erase " "is not supported on the controller.") else: msg = ("Sanitize Erase performed on the disks attached to " "the controller.") drive_status.update({'Summary': msg}) status[controller.id] = drive_status return status
Erase all the drives on this server. This method performs sanitize erase on all the supported physical drives in this server. This erase cannot be performed on logical drives. :returns: a dictionary of controllers with drives and the erase status. :raises exception.HPSSAException, if none of the drives support sanitize erase.
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None): if out_dir is None: out_dir = os.path.dirname(in_file) base, ext = os.path.splitext(os.path.basename(in_file)) out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: _rewrite_bed_with_chrom(in_file, tx_out_file, chrom) return out_file
Subset a BED file to only have items from the specified chromosome.
def change_password(self, previous, new_password): if not self.verify_password(previous): raise exceptions.Unauthorized('Incorrect password') if len(new_password) < options.min_length_password: msg = ('Passwords must be at least {} characters' .format(options.min_length_password)) raise exceptions.ValidationError(msg) if len(new_password) > options.max_length_password: msg = ('Passwords must be at no more than {} characters' .format(options.max_length_password)) raise exceptions.ValidationError(msg) self.password = self.hash_password(new_password) yield self._save()
Change the user's password and save to the database :param previous: plain text previous password :param new_password: plain text new password :raises: ValidationError
def add_parser(subparsers, parent_parser): INIT_HELP = "Initialize DVC in the current directory." INIT_DESCRIPTION = ( "Initialize DVC in the current directory. Expects directory\n" "to be a Git repository unless --no-scm option is specified." ) init_parser = subparsers.add_parser( "init", parents=[parent_parser], description=append_doc_link(INIT_DESCRIPTION, "init"), help=INIT_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) init_parser.add_argument( "--no-scm", action="store_true", default=False, help="Initiate dvc in directory that is " "not tracked by any scm tool (e.g. git).", ) init_parser.add_argument( "-f", "--force", action="store_true", default=False, help=( "Overwrite existing '.dvc' directory. " "This operation removes local cache." ), ) init_parser.set_defaults(func=CmdInit)
Setup parser for `dvc init`.
def _isinstance(self, model, raise_error=True): rv = isinstance(model, self.__model__) if not rv and raise_error: raise ValueError('%s is not of type %s' % (model, self.__model__)) return rv
Checks if the specified model instance matches the class model. By default this method will raise a `ValueError` if the model is not of expected type. Args: model (Model) : The instance to be type checked raise_error (bool) : Flag to specify whether to raise error on type check failure Raises: ValueError: If `model` is not an instance of the respective Model class
def get_json_log_data(data): log_data = data for param in LOG_HIDDEN_JSON_PARAMS: if param in data['params']: if log_data is data: log_data = copy.deepcopy(data) log_data['params'][param] = "**********" return log_data
Returns a new `data` dictionary with hidden params for log purpose.
def sort_by_name(self): super(JSSObjectList, self).sort(key=lambda k: k.name)
Sort list elements by name.
def _reconnect_delay(self): if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED: if self._reconnect_attempts >= len(self.RECONNECT_DELAYS): return self.RECONNECT_DELAYS[-1] else: return self.RECONNECT_DELAYS[self._reconnect_attempts] else: return 0
Calculate reconnection delay.
def write_device_config(self, device_config): if not self.capabilities.have_usb_mode(device_config._mode): raise yubikey_base.YubiKeyVersionError("USB mode: %02x not supported for %s" % (device_config._mode, self)) return self._device._write_config(device_config, SLOT.DEVICE_CONFIG)
Write a DEVICE_CONFIG to the YubiKey NEO.
def _configure_nve_member(self, vni, device_id, mcast_group, host_id): host_nve_connections = self._get_switch_nve_info(host_id) for switch_ip in host_nve_connections: if cfg.CONF.ml2_cisco.vxlan_global_config: nve_bindings = nxos_db.get_nve_switch_bindings(switch_ip) if len(nve_bindings) == 1: LOG.debug("Nexus: create NVE interface") loopback = self.get_nve_loopback(switch_ip) self.driver.enable_vxlan_feature(switch_ip, const.NVE_INT_NUM, loopback) member_bindings = nxos_db.get_nve_vni_switch_bindings(vni, switch_ip) if len(member_bindings) == 1: LOG.debug("Nexus: add member") self.driver.create_nve_member(switch_ip, const.NVE_INT_NUM, vni, mcast_group)
Add "member vni" configuration to the NVE interface. Called during update postcommit port event.
def perform_service_validate( self, ticket=None, service_url=None, headers=None, ): url = self._get_service_validate_url(ticket, service_url=service_url) logging.debug('[CAS] ServiceValidate URL: {}'.format(url)) return self._perform_cas_call(url, ticket=ticket, headers=headers)
Fetch a response from the remote CAS `serviceValidate` endpoint.
def get_segment_length( linestring: LineString, p: Point, q: Optional[Point] = None ) -> float: d_p = linestring.project(p) if q is not None: d_q = linestring.project(q) d = abs(d_p - d_q) else: d = d_p return d
Given a Shapely linestring and two Shapely points, project the points onto the linestring, and return the distance along the linestring between the two points. If ``q is None``, then return the distance from the start of the linestring to the projection of ``p``. The distance is measured in the native coordinates of the linestring.
def unbind(self, format, *args): return lib.zsock_unbind(self._as_parameter_, format, *args)
Unbind a socket from a formatted endpoint. Returns 0 if OK, -1 if the endpoint was invalid or the function isn't supported.
def addSkip(self, test, reason): super().addSkip(test, reason) self.test_info(test) self._call_test_results('addSkip', test, reason)
registers a test as skipped :param test: test to register :param reason: reason why the test was skipped
def template_exists(template_name): try: template.loader.get_template(template_name) return True except template.TemplateDoesNotExist: return False
Determine if a given template exists so that it can be loaded if so, or a default alternative can be used if not.
def select_star_cb(self, widget, res_dict): keys = list(res_dict.keys()) if len(keys) == 0: self.selected = [] self.replot_stars() else: idx = int(keys[0]) star = self.starlist[idx] if not self._select_flag: self.mark_selection(star, fromtable=True) return True
This method is called when the user selects a star from the table.
def _assert_equal_channels(axis): for i0 in axis: for i1 in axis: if not all(i0 == i1): raise ValueError('The channels for all the trials should have ' 'the same labels, in the same order.')
check that all the trials have the same channels, in the same order. Parameters ---------- axis : ndarray of ndarray one of the data axis Raises ------
def _wake(self): try: self.transmit_side.write(b(' ')) except OSError: e = sys.exc_info()[1] if e.args[0] != errno.EBADF: raise
Wake the multiplexer by writing a byte. If Broker is midway through teardown, the FD may already be closed, so ignore EBADF.
def nearly_unique(arr, rel_tol=1e-4, verbose=0): results = np.array([arr[0]]) for x in arr: if np.abs(results - x).min() > rel_tol: results = np.append(results, x) return results
Heuristic method to return the uniques within some precision in a numpy array
def _ParseVSSProcessingOptions(self, options): vss_only = False vss_stores = None self._process_vss = not getattr(options, 'no_vss', False) if self._process_vss: vss_only = getattr(options, 'vss_only', False) vss_stores = getattr(options, 'vss_stores', None) if vss_stores: try: self._ParseVolumeIdentifiersString(vss_stores, prefix='vss') except ValueError: raise errors.BadConfigOption('Unsupported VSS stores') self._vss_only = vss_only self._vss_stores = vss_stores
Parses the VSS processing options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid.
def podcast_episodes_iter(self, *, device_id=None, page_size=250): if device_id is None: device_id = self.device_id start_token = None prev_items = None while True: response = self._call( mc_calls.PodcastEpisode, device_id, max_results=page_size, start_token=start_token ) items = response.body.get('data', {}).get('items', []) if items != prev_items: yield items prev_items = items else: break start_token = response.body.get('nextPageToken') if start_token is None: break
Get a paged iterator of podcast episode for all subscribed podcasts. Parameters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Podcast episode dicts.
def state_entry(self, args=None, **kwargs): state = self.state_blank(**kwargs) if not args and state.addr.method.name == 'main' and \ state.addr.method.params[0] == 'java.lang.String[]': cmd_line_args = SimSootExpr_NewArray.new_array(state, "java.lang.String", BVS('argc', 32)) cmd_line_args.add_default_value_generator(self.generate_symbolic_cmd_line_arg) args = [SootArgument(cmd_line_args, "java.lang.String[]")] state.globals['cmd_line_args'] = cmd_line_args SimEngineSoot.setup_arguments(state, args) return state
Create an entry state. :param args: List of SootArgument values (optional).
def start_sikuli_process(self, port=None): if port is None or int(port) == 0: port = self._get_free_tcp_port() self.port = port start_retries = 0 started = False while start_retries < 5: try: self._start_sikuli_java_process() except RuntimeError as err: print('error........%s' % err) if self.process: self.process.terminate_process() self.port = self._get_free_tcp_port() start_retries += 1 continue started = True break if not started: raise RuntimeError('Start sikuli java process failed!') self.remote = self._connect_remote_library()
This keyword is used to start sikuli java process. If library is inited with mode "OLD", sikuli java process is started automatically. If library is inited with mode "NEW", this keyword should be used. :param port: port of sikuli java process, if value is None or 0, a random free port will be used :return: None
def get_queryset(self): query_params = self.request.query_params url_params = self.kwargs queryset_filters = self.get_db_filters(url_params, query_params) db_filters = queryset_filters['db_filters'] db_excludes = queryset_filters['db_excludes'] queryset = Team.objects.prefetch_related( 'players' ).all() return queryset.filter(**db_filters).exclude(**db_excludes)
Optionally restricts the queryset by filtering against query parameters in the URL.
def on_rule(self, *args): if self.rule is None: return self.rule.connect(self._listen_to_rule)
Make sure to update when the rule changes
def next(self): if self.row < self.data.num_instances: index = self.row self.row += 1 return self.data.get_instance(index) else: raise StopIteration()
Returns the next row from the Instances object. :return: the next Instance object :rtype: Instance
def is_code(filename): with open(filename, "r") as file: for line in file: if not is_comment(line) \ and '{' in line: if '${' not in line: return True return False
This function returns True, if a line of the file contains bracket '{'.
def align_and_build_tree(seqs, moltype, best_tree=False, params=None): aln = align_unaligned_seqs(seqs, moltype=moltype, params=params) tree = build_tree_from_alignment(aln, moltype, best_tree, params) return {'Align':aln, 'Tree':tree}
Returns an alignment and a tree from Sequences object seqs. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: if True (default:False), uses a slower but more accurate algorithm to build the tree. params: dict of parameters to pass in to the Muscle app controller. The result will be a tuple containing a cogent.core.alignment.Alignment and a cogent.core.tree.PhyloNode object (or None for the alignment and/or tree if either fails).
def pause(self): if self.state_machine_manager.active_state_machine_id is None: logger.info("'Pause' is not a valid action to initiate state machine execution.") return if self.state_machine_manager.get_active_state_machine() is not None: self.state_machine_manager.get_active_state_machine().root_state.recursively_pause_states() logger.debug("Pause execution ...") self.set_execution_mode(StateMachineExecutionStatus.PAUSED)
Set the execution mode to paused
def list_projects(page_size=200, page_index=0, sort="", q=""): content = list_projects_raw(page_size=page_size, page_index=page_index, sort=sort, q=q) if content: return utils.format_json_list(content)
List all Projects
def parse(self, data): graph = self._init_graph() if len(data) != 0: if "links" not in data[0]: raise ParserError('Parse error, "links" key not found') for node in data: for link in node['links']: cost = (link['txRate'] + link['rxRate']) / 2.0 graph.add_edge(node['name'], link['name'], weight=cost, tx_rate=link['txRate'], rx_rate=link['rxRate']) return graph
Converts a BMX6 b6m JSON to a NetworkX Graph object which is then returned.
def pop(self, symbol): last_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)]) if last_metadata is None: raise NoDataFoundException('No metadata found for symbol {}'.format(symbol)) self.find_one_and_delete({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)]) mongo_retry(self.find_one_and_update)({'symbol': symbol}, {'$unset': {'end_time': ''}}, sort=[('start_time', pymongo.DESCENDING)]) return last_metadata
Delete current metadata of `symbol` Parameters ---------- symbol : `str` symbol name to delete Returns ------- Deleted metadata
def parse_expmethodresponse(self, tup_tree): raise CIMXMLParseError( _format("Internal Error: Parsing support for element {0!A} is not " "implemented", name(tup_tree)), conn_id=self.conn_id)
This function not implemented.
def bias_correct(params, data, acf=None): bias = RB_bias(data, params, acf=acf) i = 0 for p in params: if 'theta' in p: continue if params[p].vary: params[p].value -= bias[i] i += 1 return
Calculate and apply a bias correction to the given fit parameters Parameters ---------- params : lmfit.Parameters The model parameters. These will be modified. data : 2d-array The data which was used in the fitting acf : 2d-array ACF of the data. Default = None. Returns ------- None See Also -------- :func:`AegeanTools.fitting.RB_bias`
def LocalPathToCanonicalPath(path): path_components = path.split("/") result = [] for component in path_components: m = re.match(r"\\\\.\\", component) if not m: component = component.replace("\\", "/") result.append(component) return utils.JoinPath(*result)
Converts path from the local system's convention to the canonical.
def receive_pong(self, pong: Pong): message_id = ('ping', pong.nonce, pong.sender) async_result = self.messageids_to_asyncresults.get(message_id) if async_result is not None: self.log_healthcheck.debug( 'Pong received', sender=pex(pong.sender), message_id=pong.nonce, ) async_result.set(True) else: self.log_healthcheck.warn( 'Unknown pong received', message_id=message_id, )
Handles a Pong message.
def getProcList(self, fields=('pid', 'user', 'cmd',), threads=False, **kwargs): field_list = list(fields) for key in kwargs: col = re.sub('(_ic)?(_regex)?$', '', key) if not col in field_list: field_list.append(col) pinfo = self.parseProcCmd(field_list, threads) if pinfo: if len(kwargs) > 0: pfilter = util.TableFilter() pfilter.registerFilters(**kwargs) stats = pfilter.applyFilters(pinfo['headers'], pinfo['stats']) return {'headers': pinfo['headers'], 'stats': stats} else: return pinfo else: return None
Execute ps command with custom output format with columns columns from fields, select lines using the filters defined by kwargs and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: List of headers and list of rows and columns.
def wait_until_element_present(self, element, timeout=None): return self._wait_until(self._expected_condition_find_element, element, timeout)
Search element and wait until it is found :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param timeout: max time to wait :returns: the web element if it is present :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element is not found after the timeout
def get(cls, reactor, source='graphite', **options): acls = cls.alerts[source] return acls(reactor, **options)
Get Alert Class by source.
def print_help(self, prog_name, subcommand): parser = self.get_parser(prog_name, subcommand) parser.print_help()
Prints parser's help. :param prog_name: vcs main script name :param subcommand: command name
def _checkResponseWriteData(payload, writedata): _checkString(payload, minlength=4, description='payload') _checkString(writedata, minlength=2, maxlength=2, description='writedata') BYTERANGE_FOR_WRITEDATA = slice(2, 4) receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA] if receivedWritedata != writedata: raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format( \ receivedWritedata, writedata, payload))
Check that the write data as given in the response is correct. The bytes 2 and 3 (zero based counting) in the payload holds the write data. Args: * payload (string): The payload * writedata (string): The data to write, length should be 2 bytes. Raises: TypeError, ValueError
def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps, save_filename='media/output.avi'): width, height = get_frame_dimensions(orig_video[0]) fourcc = cv2.VideoWriter_fourcc(*'MJPG') print("Outputting to %s" % save_filename) writer = cv2.VideoWriter(save_filename, fourcc, fps, (width, height), 1) for x in range(0, g_video.shape[0]): img = np.ndarray(shape=g_video[x].shape, dtype='float') img[:] = g_video[x] for i in range(enlarge_multiple): img = cv2.pyrUp(img) img[:height, :width] = img[:height, :width] + orig_video[x] res = cv2.convertScaleAbs(img[:height, :width]) writer.write(res)
Combine a gaussian video representation with the original and save to file
def read_interfaces(path: str) -> Interfaces: with open(path, encoding='utf-8') as f: return json.load(f)
Reads an Interfaces JSON file at the given path and returns it as a dictionary.
def validate_confusables_email(value): if '@' not in value: return local_part, domain = value.split('@') if confusables.is_dangerous(local_part) or \ confusables.is_dangerous(domain): raise ValidationError(CONFUSABLE_EMAIL, code='invalid')
Validator which disallows 'dangerous' email addresses likely to represent homograph attacks. An email address is 'dangerous' if either the local-part or the domain, considered on their own, are mixed-script and contain one or more characters appearing in the Unicode Visually Confusable Characters file.
def get_shots(self): shots = self.response.json()['resultSets'][0]['rowSet'] headers = self.response.json()['resultSets'][0]['headers'] return pd.DataFrame(shots, columns=headers)
Returns the shot chart data as a pandas DataFrame.
def write_to_cache(self, data, filename): json_data = json.dumps(data, sort_keys=True, indent=2) cache = open(filename, 'w') cache.write(json_data) cache.close()
Writes data in JSON format to a file