code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def all_nbrs(self, node): l = dict.fromkeys( self.inc_nbrs(node) + self.out_nbrs(node) ) return list(l)
List of nodes connected by incoming and outgoing edges
def plot_cylinder(ax, start, end, start_radius, end_radius, color='black', alpha=1., linspace_count=_LINSPACE_COUNT): assert not np.all(start == end), 'Cylinder must have length' x, y, z = generate_cylindrical_points(start, end, start_radius, end_radius, linspace_count=linspace_count) ax.plot_surface(x, y, z, color=color, alpha=alpha)
plot a 3d cylinder
def rank(self, value): i = 0 n = len(self._tree) rank = 0 count = 0 while i < n: cur = self._tree[i] if value < cur: i = 2 * i + 1 continue elif value > cur: rank += self._counts[i] nexti = 2 * i + 2 if nexti < n: rank -= self._counts[nexti] i = nexti continue else: return (rank, count) else: count = self._counts[i] lefti = 2 * i + 1 if lefti < n: nleft = self._counts[lefti] count -= nleft rank += nleft righti = lefti + 1 if righti < n: count -= self._counts[righti] return (rank, count) return (rank, count)
Returns the rank and count of the value in the btree.
def transform(function): def transform_fn(_, result): if isinstance(result, Nothing): return result lgr.debug("Transforming %r with %r", result, function) try: return function(result) except: exctype, value, tb = sys.exc_info() try: new_exc = StyleFunctionError(function, exctype, value) new_exc.__cause__ = None six.reraise(StyleFunctionError, new_exc, tb) finally: del tb return transform_fn
Return a processor for a style's "transform" function.
def send_explode(self): self.send_struct('<B', 20) self.player.own_ids.clear() self.player.cells_changed() self.ingame = False self.subscriber.on_death()
In earlier versions of the game, sending this caused your cells to split into lots of small cells and die.
def plot(self, x, y, color="black"): p = Point(x, y) p.fill(color) p.draw(self)
Uses coordinant system.
def pwm_min_score(self): if self.min_score is None: score = 0 for row in self.pwm: score += log(min(row) / 0.25 + 0.01) self.min_score = score return self.min_score
Return the minimum PWM score. Returns ------- score : float Minimum PWM score.
def subscribe_account(self, username, password, service): data = { 'service': service, 'username': username, 'password': password, } return self._perform_post_request(self.subscribe_account_endpoint, data, self.token_header)
Subscribe an account for a service.
def post(self, resource): response = self.api.execute( "POST", self.endpoint, json=(resource.as_dict())) if not response.ok: raise Error.parse(response.json()) return self._cls.parse(response.json())
Creates a new instance of the resource. Args: resource - gophish.models.Model - The resource instance
def validate_unset_command(self, line: str, position: int, annotation: str) -> None: if annotation not in self.annotations: raise MissingAnnotationKeyWarning(self.get_line_number(), line, position, annotation)
Raise an exception when trying to ``UNSET X`` if ``X`` is not already set. :raises: MissingAnnotationKeyWarning
def marshall_key(self, key): if key in self.__keys: return self.__keys[key] skey = pickle.dumps(key, protocol = 0) if self.compressKeys: skey = zlib.compress(skey, zlib.Z_BEST_COMPRESSION) if self.escapeKeys: skey = skey.encode('hex') if self.binaryKeys: skey = buffer(skey) self.__keys[key] = skey return skey
Marshalls a Crash key to be used in the database. @see: L{__init__} @type key: L{Crash} key. @param key: Key to convert. @rtype: str or buffer @return: Converted key.
def get_unicodedata(): import unicodedata fail = False uver = unicodedata.unidata_version path = os.path.join(os.path.dirname(__file__), 'tools') fp, pathname, desc = imp.find_module('unidatadownload', [path]) try: unidatadownload = imp.load_module('unidatadownload', fp, pathname, desc) unidatadownload.get_unicodedata(uver, no_zip=True) except Exception: print(traceback.format_exc()) fail = True finally: fp.close() assert not fail, "Failed to obtain unicodedata!" return uver
Download the `unicodedata` version for the given Python version.
def vm_info(vm_=None): with _get_xapi_session() as xapi: def _info(vm_): vm_rec = _get_record_by_label(xapi, 'VM', vm_) if vm_rec is False: return False vm_metrics_rec = _get_metrics_record(xapi, 'VM', vm_rec) return {'cpu': vm_metrics_rec['VCPUs_number'], 'maxCPU': _get_val(vm_rec, ['VCPUs_max']), 'cputime': vm_metrics_rec['VCPUs_utilisation'], 'disks': get_disks(vm_), 'nics': get_nics(vm_), 'maxMem': int(_get_val(vm_rec, ['memory_dynamic_max'])), 'mem': int(vm_metrics_rec['memory_actual']), 'state': _get_val(vm_rec, ['power_state']) } info = {} if vm_: ret = _info(vm_) if ret is not None: info[vm_] = ret else: for vm_ in list_domains(): ret = _info(vm_) if ret is not None: info[vm_] = _info(vm_) return info
Return detailed information about the vms. If you pass a VM name in as an argument then it will return info for just the named VM, otherwise it will return all VMs. CLI Example: .. code-block:: bash salt '*' virt.vm_info
def load( self, stats ): rows = self.rows for func, raw in stats.iteritems(): try: rows[func] = row = PStatRow( func,raw ) except ValueError, err: log.info( 'Null row: %s', func ) for row in rows.itervalues(): row.weave( rows ) return self.find_root( rows )
Build a squaremap-compatible model from a pstats class
def error_count(self): count = 0 for error_list in self.error_dict.values(): count += len(error_list) return count
Returns the total number of validation errors for this row.
def level(self): res = len(self._scope_stack) if self._parent is not None: res += self._parent.level() return res
Return the current scope level
def get_postcodedata(self, postcode, nr, addition="", **params): endpoint = 'rest/addresses/%s/%s' % (postcode, nr) if addition: endpoint += '/' + addition retValue = self._API__request(endpoint, params=params) if addition and addition.upper() not in \ [a.upper() for a in retValue['houseNumberAdditions']]: raise PostcodeError( "ERRHouseNumberAdditionInvalid", {"exceptionId": "ERRHouseNumberAdditionInvalid", "exception": "Invalid housenumber addition: '%s'" % retValue['houseNumberAddition'], "validHouseNumberAdditions": retValue['houseNumberAdditions']}) return retValue
get_postcodedata - fetch information for 'postcode'. Parameters ---------- postcode : string The full (dutch) postcode nr : int The housenumber addition : string (optional) the extension to a housenumber params : dict (optional) a list of parameters to send with the request. returns : a response dictionary
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]): for field in self.fields.values(): field.count_vocab_items(counter)
Increments counts in the given ``counter`` for all of the vocabulary items in all of the ``Fields`` in this ``Instance``.
def __pkg_upgrade_flags(self, flags): flag, skip = [], "" if flags[0] in self.args: for arg in self.args[3:]: if arg.startswith(flags[1]): skip = Regex(arg.split("=")[1]).get() self.args.remove(arg) if arg in flags: flag.append(arg) if arg in self.args: self.args.remove(arg) return flag, skip
Manage flags for package upgrade option
def FromData(cls, stream, json_data, http=None, auto_transfer=None, **kwds): info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) download = cls.FromStream(stream, **kwds) if auto_transfer is not None: download.auto_transfer = auto_transfer else: download.auto_transfer = info['auto_transfer'] setattr(download, '_Download__progress', info['progress']) setattr(download, '_Download__total_size', info['total_size']) download._Initialize( http, info['url']) return download
Create a new Download object from a stream and serialized data.
def _set_attributes(self): for parameter, data in self._data.items(): if isinstance(data, dict) or isinstance(data, OrderedDict): field_names, field_values = zip(*data.items()) sorted_indices = np.argsort(field_names) attr = namedtuple( parameter, [field_names[i] for i in sorted_indices] ) setattr( self, parameter, attr(*[field_values[i] for i in sorted_indices]) ) else: setattr(self, parameter, data)
Traverse the internal dictionary and set the getters
def decode_netloc(self): rv = _decode_idna(self.host or '') if ':' in rv: rv = '[%s]' % rv port = self.port if port is not None: rv = '%s:%d' % (rv, port) auth = ':'.join(filter(None, [ _url_unquote_legacy(self.raw_username or '', '/:%@'), _url_unquote_legacy(self.raw_password or '', '/:%@'), ])) if auth: rv = '%s@%s' % (auth, rv) return rv
Decodes the netloc part into a string.
def current_task(self, args): ctask = self.nice_name if self.nice_name is not None else self.name if args is not None: if args.update: ctask = ctask.replace('%pre', 'Updating') else: ctask = ctask.replace('%pre', 'Loading') return ctask
Name of current action for progress-bar output. The specific task string is depends on the configuration via `args`. Returns ------- ctask : str String representation of this task.
def convert_feature_layers_to_dict(feature_layers): features_by_layer = {} for feature_layer in feature_layers: layer_name = feature_layer['name'] features = feature_layer['features'] features_by_layer[layer_name] = features return features_by_layer
takes a list of 'feature_layer' objects and converts to a dict keyed by the layer name
def register_callback(self, key): if self.pending_callbacks is None: self.pending_callbacks = set() self.results = {} if key in self.pending_callbacks: raise KeyReuseError("key %r is already pending" % (key,)) self.pending_callbacks.add(key)
Adds ``key`` to the list of callbacks.
def log_error(self, callback, error=None): print("Uncaught error during callback: {}".format(callback)) print("Error: {}".format(error))
Log the error that occurred when running the given callback.
def mute(model: Model): if not isinstance(model, Model): raise TypeError("Expected a Model, not %r." % model) restore = model.__dict__.get("_notify_model_views") model._notify_model_views = lambda e: None try: yield finally: if restore is None: del model._notify_model_views else: model._notify_model_views = restore
Block a model's views from being notified. All changes within a "mute" context will be blocked. No content is yielded to the user as in :func:`hold`, and the views of the model are never notified that changes took place. Parameters: mode: The model whose change events will be blocked. Examples: The view is never called due to the :func:`mute` context: .. code-block:: python from spectate import mvc l = mvc.List() @mvc.view(l) def raises(events): raise ValueError("Events occured!") with mvc.mute(l): l.append(1)
def show_busy(self): self.progress_bar.show() self.parent.pbnNext.setEnabled(False) self.parent.pbnBack.setEnabled(False) self.parent.pbnCancel.setEnabled(False) self.parent.repaint() enable_busy_cursor() QgsApplication.processEvents()
Lock buttons and enable the busy cursor.
def findport(self, port): for p in self.ports: if p[0] == p: return p p = (port, []) self.ports.append(p) return p
Find and return a port tuple for the specified port. Created and added when not found. @param port: A port. @type port: I{service.Port} @return: A port tuple. @rtype: (port, [method])
async def builds(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
List of Builds A paginated list of builds that have been run in Taskcluster. Can be filtered on various git-specific fields. This method gives output: ``v1/build-list.json#`` This method is ``experimental``
def config_dict(config): return dict( (key, getattr(config, key)) for key in config.values )
Given a Sphinx config object, return a dictionary of config values.
def bundle_attacks_with_goal(sess, model, x, y, adv_x, attack_configs, run_counts, goal, report, report_path, attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE): goal.start(run_counts) _logger.info("Running criteria for new goal...") criteria = goal.get_criteria(sess, model, adv_x, y, batch_size=eval_batch_size) assert 'correctness' in criteria _logger.info("Accuracy: " + str(criteria['correctness'].mean())) assert 'confidence' in criteria while not goal.is_satisfied(criteria, run_counts): run_batch_with_goal(sess, model, x, y, adv_x, criteria, attack_configs, run_counts, goal, report, report_path, attack_batch_size=attack_batch_size) report.completed = True save(criteria, report, report_path, adv_x)
Runs attack bundling, working on one specific AttackGoal. This function is mostly intended to be called by `bundle_attacks`. Reference: https://openreview.net/forum?id=H1g0piA9tQ :param sess: tf.session.Session :param model: cleverhans.model.Model :param x: numpy array containing clean example inputs to attack :param y: numpy array containing true labels :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param attack_configs: list of AttackConfigs to run :param run_counts: dict mapping AttackConfigs to numpy arrays specifying how many times they have been run on each example :param goal: AttackGoal to run :param report: ConfidenceReport :param report_path: str, the path the report will be saved to :param attack_batch_size: int, batch size for generating adversarial examples :param eval_batch_size: int, batch size for evaluating the model on adversarial examples
def _repr_mimebundle_(self, *args, **kwargs): try: if self.logo: p = pn.Row( self.logo_panel, self.panel, margin=0) return p._repr_mimebundle_(*args, **kwargs) else: return self.panel._repr_mimebundle_(*args, **kwargs) except: raise RuntimeError("Panel does not seem to be set up properly")
Display in a notebook or a server
def general_setting(key, default=None, expected_type=None, qsettings=None): if qsettings is None: qsettings = QSettings() try: if isinstance(expected_type, type): return qsettings.value(key, default, type=expected_type) else: return qsettings.value(key, default) except TypeError as e: LOGGER.debug('exception %s' % e) LOGGER.debug('%s %s %s' % (key, default, expected_type)) return qsettings.value(key, default)
Helper function to get a value from settings. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object Note: The API for QSettings to get a value is different for PyQt and Qt C++. In PyQt we can specify the expected type. See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value
def buildprior(self, prior, mopt=None, extend=False): " Extract the model's parameters from prior. " newprior = {} intercept, slope = gv.get_dictkeys( prior, [self.intercept, self.slope] ) newprior[intercept] = prior[intercept] if mopt is None: newprior[slope] = prior[slope] return newprior
Extract the model's parameters from prior.
def find(self, collection, query): obj = getattr(self.db, collection) result = obj.find(query) return result
Search a collection for the query provided. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results.
def get(self, request, *args, **kwargs): self.get_layer() nodes = self.get_nodes(request, *args, **kwargs) return Response(nodes)
Retrieve list of nodes of the specified layer
def kde_peak(self, name, npoints=_npoints, **kwargs): data = self.get(name,**kwargs) return kde_peak(data,npoints)
Calculate peak of kernel density estimator
def create(self, permission): parent_url = self.client.get_url(self.parent_object._manager._URL_KEY, 'GET', 'single', {'id': self.parent_object.id}) target_url = parent_url + self.client.get_url_path(self._URL_KEY, 'POST', 'single') r = self.client.request('POST', target_url, json=permission._serialize()) return permission._deserialize(r.json(), self)
Create single permission for the given object. :param Permission permission: A single Permission object to be set.
def get_as_type_with_default(self, key, value_type, default_value): value = self.get(key) return TypeConverter.to_type_with_default(value_type, value, default_value)
Converts map element into a value defined by specied typecode. If conversion is not possible it returns default value. :param key: an index of element to get. :param value_type: the TypeCode that defined the type of the result :param default_value: the default value :return: element value defined by the typecode or default value if conversion is not supported.
def _get_cputemp_with_lmsensors(self, zone=None): sensors = None command = ["sensors"] if zone: try: sensors = self.py3.command_output(command + [zone]) except self.py3.CommandError: pass if not sensors: sensors = self.py3.command_output(command) m = re.search("(Core 0|CPU Temp).+\+(.+).+\(.+", sensors) if m: cpu_temp = float(m.groups()[1].strip()[:-2]) else: cpu_temp = "?" return cpu_temp
Tries to determine CPU temperature using the 'sensors' command. Searches for the CPU temperature by looking for a value prefixed by either "CPU Temp" or "Core 0" - does not look for or average out temperatures of all codes if more than one.
def preprocess(content, options): lines_enum = enumerate(content.splitlines(), start=1) lines_enum = join_lines(lines_enum) lines_enum = ignore_comments(lines_enum) lines_enum = skip_regex(lines_enum, options) lines_enum = expand_env_variables(lines_enum) return lines_enum
Split, filter, and join lines, and return a line iterator :param content: the content of the requirements file :param options: cli options
def create_results_dir(self): self._current_results_dir = self._cache_manager._results_dir_path(self.cache_key, stable=False) self._results_dir = self._cache_manager._results_dir_path(self.cache_key, stable=True) if not self.valid: safe_mkdir(self._current_results_dir, clean=True) relative_symlink(self._current_results_dir, self._results_dir) self.ensure_legal()
Ensure that the empty results directory and a stable symlink exist for these versioned targets.
def is_installed(self, bug: Bug) -> bool: return self.__installation.build.is_installed(bug.image)
Determines whether or not the Docker image for a given bug has been installed onto this server. See: `BuildManager.is_installed`
def get_paginator( self, dataset, per_page, orphans=0, allow_empty_first_page=True, **kwargs ): return IndefinitePaginator( dataset, per_page, orphans=orphans, allow_empty_first_page=allow_empty_first_page, **kwargs )
Return an instance of the paginator for this view.
def guess_format(url): import requests from requests.exceptions import InvalidSchema from rowgenerators import parse_url_to_dict parts = parse_url_to_dict(url) if parts.get('path'): type, encoding = mimetypes.guess_type(url) elif parts['scheme'] in ('http', 'https'): type, encoding = 'text/html', None else: type, encoding = None, None if type is None: try: r = requests.head(url, allow_redirects=False) type = r.headers['Content-Type'] if ';' in type: type, encoding = [e.strip() for e in type.split(';')] except InvalidSchema: pass return type, mime_map.get(type)
Try to guess the format of a resource, possibly with a HEAD request
async def get_friendly_name(self) -> Text: if 'first_name' not in self._user: user = await self._get_full_user() else: user = self._user return user.get('first_name')
Let's use the first name of the user as friendly name. In some cases the user object is incomplete, and in those cases the full user is fetched.
def wait_for_close( raiden: 'RaidenService', payment_network_id: PaymentNetworkID, token_address: TokenAddress, channel_ids: List[ChannelID], retry_timeout: float, ) -> None: return wait_for_channel_in_states( raiden=raiden, payment_network_id=payment_network_id, token_address=token_address, channel_ids=channel_ids, retry_timeout=retry_timeout, target_states=CHANNEL_AFTER_CLOSE_STATES, )
Wait until all channels are closed. Note: This does not time out, use gevent.Timeout.
def calcTemperature(self): try: return eq.MeanPlanetTemp(self.albedo, self.star.T, self.star.R, self.a).T_p except (ValueError, HierarchyError): return np.nan
Calculates the temperature using which uses equations.MeanPlanetTemp, albedo assumption and potentially equations.starTemperature. issues - you cant get the albedo assumption without temp but you need it to calculate the temp.
def activate(self, resource=None, timeout=3, wait_for_finish=False): return Task.execute(self, 'activate', json={'resource': resource}, timeout=timeout, wait_for_finish=wait_for_finish)
Activate this package on the SMC :param list resource: node href's to activate on. Resource is only required for software upgrades :param int timeout: timeout between queries :raises TaskRunFailed: failure during activation (downloading, etc) :rtype: TaskOperationPoller
def lookup(instruction, instructions = None): if instructions is None: instructions = default_instructions if isinstance(instruction, str): return instructions[instruction] elif hasattr(instruction, "__call__"): rev = dict(((v,k) for (k,v) in instructions.items())) return rev[instruction] else: raise errors.MachineError(KeyError("Unknown instruction: %s" % str(instruction)))
Looks up instruction, which can either be a function or a string. If it's a string, returns the corresponding method. If it's a function, returns the corresponding name.
def place_items_in_square(items, t): rows = [(t, y, []) for y in range(t)] for item in items: x = item % t y = item // t inverse_length, _, row_contents = rows[y] heapq.heappush(row_contents, (x, item)) rows[y] = inverse_length - 1, y, row_contents assert all(inv_len == t - len(rows) for inv_len, _, rows in rows) heapq.heapify(rows) return [row for row in rows if row[2]]
Returns a list of rows that are stored as a priority queue to be used with heapq functions. >>> place_items_in_square([1,5,7], 4) [(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])] >>> place_items_in_square([1,5,7], 3) [(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])]
def oldapi_request(self, method, endpoint, **kwargs): headers = kwargs.setdefault('headers', {}) headers['Client-ID'] = CLIENT_ID url = TWITCH_APIURL + endpoint return self.request(method, url, **kwargs)
Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
def correct_entry(self, entry): entry.correction.update(self.get_correction(entry)) return entry
Corrects a single entry. Args: entry: A DefectEntry object. Returns: An processed entry. Raises: CompatibilityError if entry is not compatible.
def partition(self, ref=None, **kwargs): from .exc import NotFoundError from six import text_type if ref: for p in self.partitions: if (text_type(ref) == text_type(p.name) or text_type(ref) == text_type(p.id) or text_type(ref) == text_type(p.vid)): return p raise NotFoundError("Failed to find partition for ref '{}' in dataset '{}'".format(ref, self.name)) elif kwargs: from ..identity import PartitionNameQuery pnq = PartitionNameQuery(**kwargs) return self._find_orm
Returns partition by ref.
def _keys(self, pattern): result = [] for client in self.redis_clients: result.extend(list(client.scan_iter(match=pattern))) return result
Execute the KEYS command on all Redis shards. Args: pattern: The KEYS pattern to query. Returns: The concatenated list of results from all shards.
def _decorate_urlconf(urlpatterns, decorator=require_auth, *args, **kwargs): if isinstance(urlpatterns, (list, tuple)): for pattern in urlpatterns: if getattr(pattern, 'callback', None): pattern._callback = decorator( pattern.callback, *args, **kwargs) if getattr(pattern, 'url_patterns', []): _decorate_urlconf( pattern.url_patterns, decorator, *args, **kwargs) else: if getattr(urlpatterns, 'callback', None): urlpatterns._callback = decorator( urlpatterns.callback, *args, **kwargs)
Decorate all urlpatterns by specified decorator
def _remove(self, handler, send_event=True): for event in self: event.remove_handlers_bound_to_instance(handler) self.handlers.remove(handler) if send_event: self.on_handler_remove(handler)
Remove handler instance and detach any methods bound to it from uninhibited. :param object handler: handler instance :return object: The handler you added is given back so this can be used as a decorator.
def _parse_resource(resource): resource = resource.strip() if resource else resource if resource in {ME_RESOURCE, USERS_RESOURCE}: return resource elif '@' in resource and not resource.startswith(USERS_RESOURCE): return '{}/{}'.format(USERS_RESOURCE, resource) else: return resource
Parses and completes resource information
def register(self, prefix, viewset, basename, factory=None, permission=None): lookup = self.get_lookup(viewset) routes = self.get_routes(viewset) for route in routes: mapping = self.get_method_map(viewset, route.mapping) if not mapping: continue url = route.url.format( prefix=prefix, lookup=lookup, trailing_slash=self.trailing_slash ) view = viewset.as_view(mapping, **route.initkwargs) name = route.name.format(basename=basename) if factory: self.configurator.add_route(name, url, factory=factory) else: self.configurator.add_route(name, url) self.configurator.add_view(view, route_name=name, permission=permission)
Factory and permission are likely only going to exist until I have enough time to write a permissions module for PRF. :param prefix: the uri route prefix. :param viewset: The ViewSet class to route. :param basename: Used to name the route in pyramid. :param factory: Optional, root factory to be used as the context to the route. :param permission: Optional, permission to assign the route.
def log(verbose=False): terminal.log.config(verbose=verbose) terminal.log.info('this is a info message') terminal.log.verbose.info('this is a verbose message')
print a log test :param verbose: show more logs
def _removeHeaderTag(header, tag): if header.startswith(tag): tagPresent = True header = header[len(tag):] else: tagPresent = False return header, tagPresent
Removes a tag from the beginning of a header string. :param header: str :param tag: str :returns: (str, bool), header without the tag and a bool that indicates wheter the tag was present.
def flatten_dict(dict_obj, separator='.', flatten_lists=False): reducer = _get_key_reducer(separator) flat = {} def _flatten_key_val(key, val, parent): flat_key = reducer(parent, key) try: _flatten(val, flat_key) except TypeError: flat[flat_key] = val def _flatten(d, parent=None): try: for key, val in d.items(): _flatten_key_val(key, val, parent) except AttributeError: if isinstance(d, (str, bytes)): raise TypeError for i, value in enumerate(d): _flatten_key_val(str(i), value, parent) _flatten(dict_obj) return flat
Flattens the given dict into a single-level dict with flattend keys. Parameters ---------- dict_obj : dict A possibly nested dict. separator : str, optional The character to use as a separator between keys. Defaults to '.'. flatten_lists : bool, optional If True, list values are also flattened. False by default. Returns ------- dict A shallow dict, where no value is a dict in itself, and keys are concatenations of original key paths separated with the given separator. Example ------- >>> dicti = {'a': 1, 'b': {'g': 4, 'o': 9}, 'x': [4, 'd']} >>> flat = flatten_dict(dicti) >>> sorted(flat.items()) [('a', 1), ('b.g', 4), ('b.o', 9), ('x.0', 4), ('x.1', 'd')]
def convert_upsample(net, node, module, builder): input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) inputs = node['inputs'] args, _ = module.get_params() scale = literal_eval(param['scale']) if 'sample_type' in param.keys(): method = param['sample_type'] if method == 'nearest': mode = 'NN' elif method == '': mode = 'BILINEAR' builder.add_upsample(name, scaling_factor_h=scale, scaling_factor_w=scale, input_name=input_name, output_name=output_name, mode=mode)
Convert a UpSampling layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def generate(self, **kwargs): descriptions = [] subjs = self.layout.get_subjects(**kwargs) kwargs = {k: v for k, v in kwargs.items() if k != 'subject'} for sid in subjs: descriptions.append(self._report_subject(subject=sid, **kwargs)) counter = Counter(descriptions) print('Number of patterns detected: {0}'.format(len(counter.keys()))) print(utils.reminder()) return counter
Generate the methods section. Parameters ---------- task_converter : :obj:`dict`, optional A dictionary with information for converting task names from BIDS filename format to human-readable strings. Returns ------- counter : :obj:`collections.Counter` A dictionary of unique descriptions across subjects in the dataset, along with the number of times each pattern occurred. In cases where all subjects underwent the same protocol, the most common pattern is most likely the most complete. In cases where the dataset contains multiple protocols, each pattern will need to be inspected manually. Examples -------- >>> from os.path import join >>> from bids.layout import BIDSLayout >>> from bids.reports import BIDSReport >>> from bids.tests import get_test_data_path >>> layout = BIDSLayout(join(get_test_data_path(), 'synthetic')) >>> report = BIDSReport(layout) >>> counter = report.generate(session='01') >>> counter.most_common()[0][0]
def add_user(self, name, password=None, read_only=None, db=None, **kwargs): if db is None: return self.get_connection().admin.add_user( name, password=password, read_only=read_only, **kwargs) return self.get_connection()[db].add_user( name, password=password, read_only=read_only, **kwargs)
Adds a user that can be used for authentication
def __remove_temp_logging_handler(): if is_logging_configured(): return __remove_null_logging_handler() root_logger = logging.getLogger() global LOGGING_TEMP_HANDLER for handler in root_logger.handlers: if handler is LOGGING_TEMP_HANDLER: root_logger.removeHandler(LOGGING_TEMP_HANDLER) LOGGING_TEMP_HANDLER = None break if sys.version_info >= (2, 7): logging.captureWarnings(True)
This function will run once logging has been configured. It just removes the temporary stream Handler from the logging handlers.
def validate_input(self): if self.vert[1] <= self.vert[0]: raise ValueError(u'{} must be larger than {}'.format(self.vert[1], self.vert[0]))
Raise appropriate exception if gate was defined incorrectly.
def as_partition(self, **kwargs): return PartitionName(**dict(list(self.dict.items()) + list(kwargs.items())))
Return a PartitionName based on this name.
def output_results(results, metric, options): formatter = options['Formatter'] context = metric.copy() try: context['dimension'] = list(metric['Dimensions'].values())[0] except AttributeError: context['dimension'] = '' for result in results: stat_keys = metric['Statistics'] if not isinstance(stat_keys, list): stat_keys = [stat_keys] for statistic in stat_keys: context['statistic'] = statistic context['Unit'] = result['Unit'] metric_name = (formatter % context).replace('/', '.').lower() line = '{0} {1} {2}\n'.format( metric_name, result[statistic], timegm(result['Timestamp'].timetuple()), ) sys.stdout.write(line)
Output the results to stdout. TODO: add AMPQ support for efficiency
def reduce_to_unit(divider): for unit_size in range(1, len(divider) // 2 + 1): length = len(divider) unit = divider[:unit_size] divider_item = divider[:unit_size * (length // unit_size)] if unit * (length // unit_size) == divider_item: return unit return divider
Reduce a repeating divider to the smallest repeating unit possible. Note: this function is used by make-div :param divider: the divider :return: smallest repeating unit possible :rtype: str :Example: 'XxXxXxX' -> 'Xx'
def Asyncme(func, n=None, interval=0, default_callback=None, loop=None): return coros(n, interval, default_callback, loop)(func)
Wrap coro_function into the function return NewTask.
def make_client(zhmc, userid=None, password=None): global USERID, PASSWORD USERID = userid or USERID or \ six.input('Enter userid for HMC {}: '.format(zhmc)) PASSWORD = password or PASSWORD or \ getpass.getpass('Enter password for {}: '.format(USERID)) session = zhmcclient.Session(zhmc, USERID, PASSWORD) session.logon() client = zhmcclient.Client(session) print('Established logged-on session with HMC {} using userid {}'. format(zhmc, USERID)) return client
Create a `Session` object for the specified HMC and log that on. Create a `Client` object using that `Session` object, and return it. If no userid and password are specified, and if no previous call to this method was made, userid and password are interactively inquired. Userid and password are saved in module-global variables for future calls to this method.
def create_local_arrays(reified_arrays, array_factory=None): if array_factory is None: array_factory = np.empty return { n: array_factory(ra.shape, ra.dtype) for n, ra in reified_arrays.iteritems() }
Function that creates arrays, given the definitions in the reified_arrays dictionary and the array_factory keyword argument. Arguments --------- reified_arrays : dictionary Dictionary keyed on array name and array definitions. Can be obtained via cube.arrays(reify=True) Keyword Arguments ----------------- array_factory : function A function used to create array objects. It's signature should be array_factory(shape, dtype) and should return a constructed array of the supplied shape and data type. If None, numpy.empty will be used. Returns ------- A dictionary of array objects, keyed on array names
def isglove(filepath): with ensure_open(filepath, 'r') as f: header_line = f.readline() vector_line = f.readline() try: num_vectors, num_dim = header_line.split() return int(num_dim) except (ValueError, TypeError): pass vector = vector_line.split()[1:] if len(vector) % 10: print(vector) print(len(vector) % 10) return False try: vector = np.array([float(x) for x in vector]) except (ValueError, TypeError): return False if np.all(np.abs(vector) < 12.): return len(vector) return False
Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False
def append_to_circuit(self, circuit, simplify=True): if simplify: term = self.simplify() else: term = self for op in term.ops[::-1]: gate = op.op.lower() if gate != "i": getattr(circuit, gate)[op.n]
Append Pauli gates to `Circuit`.
async def stop(self) -> None: if not self._is_running: raise SublemonRuntimeError( 'Attempted to stop an already-stopped `Sublemon` instance') await self.block() self._poll_task.cancel() self._is_running = False with suppress(asyncio.CancelledError): await self._poll_task
Coroutine to stop execution of this server.
def current_state_str(self): if self.sample_ok: msg = '' temperature = self._get_value_opc_attr('temperature') if temperature is not None: msg += 'Temp: %s ºC, ' % temperature humidity = self._get_value_opc_attr('humidity') if humidity is not None: msg += 'Humid: %s %%, ' % humidity pressure = self._get_value_opc_attr('pressure') if pressure is not None: msg += 'Press: %s mb, ' % pressure light_level = self._get_value_opc_attr('light_level') if light_level is not None: msg += 'Light: %s lux, ' % light_level return msg[:-2] else: return "Bad sample"
Return string representation of the current state of the sensor.
def activate(self, asset): activate_url = asset['_links']['activate'] return self._get(activate_url, body_type=models.Body).get_body()
Request activation of the specified asset representation. Asset representations are obtained from :py:meth:`get_assets`. :param request dict: An asset representation from the API. :returns: :py:class:`planet.api.models.Body` with no response content :raises planet.api.exceptions.APIException: On API error.
def count_children(obj, type=None): if type is None: return len(obj) else: return sum(1 for x in obj if obj.get(x, getclass=True) is type)
Return the number of children of obj, optionally restricting by class
def run_gdb_command(message): controller = _state.get_controller_from_client_id(request.sid) if controller is not None: try: cmd = message["cmd"] controller.write(cmd, read_response=False) except Exception: err = traceback.format_exc() logger.error(err) emit("error_running_gdb_command", {"message": err}) else: emit("error_running_gdb_command", {"message": "gdb is not running"})
Endpoint for a websocket route. Runs a gdb command. Responds only if an error occurs when trying to write the command to gdb
def get_value_from_set(self, key): key = key.lower() if self._remotelib: while True: value = self._remotelib.run_keyword('get_value_from_set', [key, self._my_id], {}) if value: return value time.sleep(0.1) logger.debug('waiting for a value') else: return _PabotLib.get_value_from_set(self, key, self._my_id)
Get a value from previously reserved value set.
def _multiplexed_buffer_helper(self, response): buf = self._result(response, binary=True) buf_length = len(buf) walker = 0 while True: if buf_length - walker < STREAM_HEADER_SIZE_BYTES: break header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES] _, length = struct.unpack_from('>BxxxL', header) start = walker + STREAM_HEADER_SIZE_BYTES end = start + length walker = end yield buf[start:end]
A generator of multiplexed data blocks read from a buffered response.
def from_array(array): if array is None or not array: return None assert_type_or_raise(array, dict, parameter_name="array") data = {} data['source'] = u(array.get('source')) data['type'] = u(array.get('type')) data['file_hashes'] = PassportElementErrorFiles._builtin_from_array_list(required_type=unicode_type, value=array.get('file_hashes'), list_level=1) data['message'] = u(array.get('message')) instance = PassportElementErrorFiles(**data) instance._raw = array return instance
Deserialize a new PassportElementErrorFiles from a given dictionary. :return: new PassportElementErrorFiles instance. :rtype: PassportElementErrorFiles
def start(self, blocking=True): self.setup_zmq() if blocking: self.serve() else: eventlet.spawn(self.serve) eventlet.sleep(0)
Start the producer. This will eventually fire the ``server_start`` and ``running`` events in sequence, which signify that the incoming TCP request socket is running and the workers have been forked, respectively. If ``blocking`` is False, control .
def unzoom(self, full=False, delay_draw=False): if full: self.zoom_lims = self.zoom_lims[:1] self.zoom_lims = [] elif len(self.zoom_lims) > 0: self.zoom_lims.pop() self.set_viewlimits() if not delay_draw: self.canvas.draw()
unzoom display 1 level or all the way
def node2bracket(docgraph, node_id, child_str=''): node_attrs = docgraph.node[node_id] if istoken(docgraph, node_id): pos_str = node_attrs.get(docgraph.ns+':pos', '') token_str = node_attrs[docgraph.ns+':token'] return u"({pos}{space1}{token}{space2}{child})".format( pos=pos_str, space1=bool(pos_str)*' ', token=token_str, space2=bool(child_str)*' ', child=child_str) label_str = node_attrs.get('label', '') return u"({label}{space}{child})".format( label=label_str, space=bool(label_str and child_str)*' ', child=child_str)
convert a docgraph node into a PTB-style string.
def compose_object(self, file_list, destination_file, content_type): xml_setting_list = ['<ComposeRequest>'] for meta_data in file_list: xml_setting_list.append('<Component>') for key, val in meta_data.iteritems(): xml_setting_list.append('<%s>%s</%s>' % (key, val, key)) xml_setting_list.append('</Component>') xml_setting_list.append('</ComposeRequest>') xml = ''.join(xml_setting_list) if content_type is not None: headers = {'Content-Type': content_type} else: headers = None status, resp_headers, content = self.put_object( api_utils._quote_filename(destination_file) + '?compose', payload=xml, headers=headers) errors.check_status(status, [200], destination_file, resp_headers, body=content)
COMPOSE multiple objects together. Using the given list of files, calls the put object with the compose flag. This call merges all the files into the destination file. Args: file_list: list of dicts with the file name. destination_file: Path to the destination file. content_type: Content type for the destination file.
def get_screenshot_as_png(obj, driver=None, timeout=5, **kwargs): Image = import_required('PIL.Image', 'To use bokeh.io.export_png you need pillow ' + '("conda install pillow" or "pip install pillow")') with _tmp_html() as tmp: html = get_layout_html(obj, **kwargs) with io.open(tmp.path, mode="w", encoding="utf-8") as file: file.write(decode_utf8(html)) web_driver = driver if driver is not None else webdriver_control.get() web_driver.get("file:///" + tmp.path) web_driver.maximize_window() web_driver.execute_script("document.body.style.width = '100%';") wait_until_render_complete(web_driver, timeout) png = web_driver.get_screenshot_as_png() b_rect = web_driver.execute_script(_BOUNDING_RECT_SCRIPT) image = Image.open(io.BytesIO(png)) cropped_image = _crop_image(image, **b_rect) return cropped_image
Get a screenshot of a ``LayoutDOM`` object. Args: obj (LayoutDOM or Document) : a Layout (Row/Column), Plot or Widget object or Document to export. driver (selenium.webdriver) : a selenium webdriver instance to use to export the image. timeout (int) : the maximum amount of time to wait for initialization. It will be used as a timeout for loading Bokeh, then when waiting for the layout to be rendered. Returns: cropped_image (PIL.Image.Image) : a pillow image loaded from PNG. .. warning:: Responsive sizing_modes may generate layouts with unexpected size and aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
def to_date(value, ctx): if isinstance(value, str): temporal = ctx.get_date_parser().auto(value) if temporal is not None: return to_date(temporal, ctx) elif type(value) == datetime.date: return value elif isinstance(value, datetime.datetime): return value.date() raise EvaluationError("Can't convert '%s' to a date" % str(value))
Tries conversion of any value to a date
def bitbucket(branch: str): assert os.environ.get('BITBUCKET_BRANCH') == branch assert not os.environ.get('BITBUCKET_PR_ID')
Performs necessary checks to ensure that the bitbucket build is one that should create releases. :param branch: The branch the environment should be running against.
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) with open(contract_fn) as f: contract = f.read() firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file.
def post(request): res = Result() data = request.POST or json.loads(request.body)['body'] name = data.get('name', None) if not name: res.isError = True res.message = "No name given" return JsonResponse(res.asDict()) tag = Tag.objects.get_or_create(name=name.lower())[0] res.append(tag.json()) return JsonResponse(res.asDict())
Creates a tag object :param name: Name for tag :type name: str :returns: json
def command_u2a(string, vargs): try: l = ARPABETMapper().map_unicode_string( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"], return_as_list=True ) print(vargs["separator"].join(l)) except ValueError as exc: print_error(str(exc))
Print the ARPABEY ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments
def process_contents(self, contents, fname=None): self.stack = [] self.dispatch_table = self.default_table.copy() self.current_file = fname self.tuples = self.tupleize(contents) self.initialize_result(fname) while self.tuples: t = self.tuples.pop(0) self.dispatch_table[t[0]](t) return self.finalize_result(fname)
Pre-processes a file contents. This is the main internal entry point.
def _init_optional_attrs(optional_attrs): if optional_attrs is None: return None opts = OboOptionalAttrs.get_optional_attrs(optional_attrs) if opts: return OboOptionalAttrs(opts)
Create OboOptionalAttrs or return None.
def print_splits(cliques, next_cliques): splits = 0 for i, clique in enumerate(cliques): parent, _ = clique if parent in next_cliques: if len(next_cliques[parent]) > 1: print_split(i + splits, len(cliques) + splits) splits += 1
Print shifts for new forks.
def _findlinestarts(code_object): byte_increments = [c for c in code_object.co_lnotab[0::2]] line_increments = [c for c in code_object.co_lnotab[1::2]] lineno = code_object.co_firstlineno addr = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: yield (addr, lineno) addr += byte_incr lineno += line_incr yield (addr, lineno)
Find the offsets in a byte code which are the start of source lines. Generate pairs (offset, lineno) as described in Python/compile.c. This is a modified version of dis.findlinestarts. This version allows multiple "line starts" with the same line number. (The dis version conditions its yield on a test "if lineno != lastlineno".) FYI: code.co_lnotab is a byte array with one pair of bytes for each effective source line number in the bytecode. An effective line is one that generates code: not blank or comment lines. The first actual line number, typically the number of the "def" statement, is in code.co_firstlineno. An even byte of co_lnotab is the offset to the bytecode generated from the next effective line number. The following odd byte is an increment on the previous line's number to the next line's number. Thus co_firstlineno+co_lnotab[1] is the first effective line's number, and co_lnotab[0] is the number of bytes it generated. Note that an effective line number generates code by definition, hence the even byte cannot be zero; and as line numbers are monotonically increasing, the odd byte cannot be zero either. But what, the curious reader might ask, does Python do if a source line generates more than 255 bytes of code? In that *highly* unlikely case compile.c generates multiple pairs of (255,0) until it has accounted for all the generated code, then a final pair of (offset%256, lineincr). Oh, but what, the curious reader asks, do they do if there is a gap of more than 255 between effective line numbers? It is not unheard of to find blocks of comments larger than 255 lines (like this one?). Then compile.c generates pairs of (0, 255) until it has accounted for the line number difference and a final pair of (offset,lineincr%256). Uh, but...? Yes, what now, annoying reader? Well, does the following code handle these special cases of (255,0) and (0,255) properly? It handles the (0,255) case correctly, because of the "if byte_incr" test which skips the yield() but increments lineno. It does not handle the case of (255,0) correctly; it will yield false pairs (255,0). Fortunately that will only arise e.g. when disassembling some "obfuscated" code where most newlines are replaced with semicolons. Oh, and yes, the to_code() method does properly handle generation of the (255,0) and (0,255) entries correctly.
def htmlSaveFileFormat(self, filename, encoding, format): ret = libxml2mod.htmlSaveFileFormat(filename, self._o, encoding, format) return ret
Dump an HTML document to a file using a given encoding.
def items(self, start=None, stop=None): if start is None: node = self._head[2] else: self._find_lt(start) node = self._path[0][2] while node is not self._tail and (stop is None or node[0] < stop): yield (node[0], node[1]) node = node[2]
Return an iterator yielding pairs. If *start* is specified, iteration starts at the first pair with a key that is larger than or equal to *start*. If not specified, iteration starts at the first pair in the list. If *stop* is specified, iteration stops at the last pair that is smaller than *stop*. If not specified, iteration end with the last pair in the list.