code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def show_message(self, message, timeout=0): self.main.statusBar().showMessage(message, timeout)
Show message in main window's status bar
def grabEmails(emails=None, emailsFile=None, nicks=None, nicksFile=None, domains=EMAIL_DOMAINS, excludeDomains=[]): email_candidates = [] if emails != None: email_candidates = emails elif emailsFile != None: with open(emailsFile, "r") as iF: email_candidates = iF.read().splitlines() elif nicks != None: for n in nicks: for d in domains: if d not in excludeDomains: email_candidates.append(n+"@"+d) elif nicksFile != None: with open(nicksFile, "r") as iF: nicks = iF.read().splitlines() for n in nicks: for d in domains: if d not in excludeDomains: email_candidates.append(n+"@"+d) return email_candidates
Method that generates a list of emails. Args: ----- emails: Any premade list of emails. emailsFile: Filepath to the emails file (one per line). nicks: A list of aliases. nicksFile: Filepath to the aliases file (one per line). domains: Domains where the aliases will be tested. excludeDomains: Domains to be excluded from the created list. Returns: -------- list: the list of emails that will be verified.
def get_en2fr(url='http://www.manythings.org/anki/fra-eng.zip'): download_unzip(url) return pd.read_table(url, compression='zip', header=None, skip_blank_lines=True, sep='\t', skiprows=0, names='en fr'.split())
Download and parse English->French translation dataset used in Keras seq2seq example
def gff3_to_dataframe(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', **kwargs): import pandas recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase'] if attributes: columns += list(attributes) df = pandas.DataFrame.from_records(recs, columns=columns, **kwargs) return df
Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. Returns ------- pandas.DataFrame
def abbreviate(s, maxlength=25): assert maxlength >= 4 skip = False abbrv = None i = 0 for j, c in enumerate(s): if c == '\033': skip = True elif skip: if c == 'm': skip = False else: i += 1 if i == maxlength - 1: abbrv = s[:j] + '\033[0m...' elif i > maxlength: break if i <= maxlength: return s else: return abbrv
Color-aware abbreviator
def run(self, *args): params = self.parser.parse_args(args) api_token = params.api_token genderize_all = params.genderize_all code = self.autogender(api_token=api_token, genderize_all=genderize_all) return code
Autocomplete gender information.
def pspawn_wrapper(self, sh, escape, cmd, args, env): return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
Wrapper function for handling piped spawns. This looks to the calling interface (in Action.py) like a "normal" spawn, but associates the call with the PSPAWN variable from the construction environment and with the streams to which we want the output logged. This gets slid into the construction environment as the SPAWN variable so Action.py doesn't have to know or care whether it's spawning a piped command or not.
def write(self, filename): with open(filename, 'w') as f: f.write(self.ascii) self.print("Detector file saved as '{0}'".format(filename))
Save detx file.
def is_required(self, name): return self.schema_element(name).repetition_type == parquet_thrift.FieldRepetitionType.REQUIRED
Return true iff the schema element with the given name is required.
def sysidpath(ignore_options=False): failover = Path('/tmp/machine-id') if not ignore_options: options = ( Path('/etc/machine-id'), failover, ) for option in options: if (option.exists() and os.access(option, os.R_OK) and option.stat().st_size > 0): return option uuid = uuid4() with open(failover, 'wt') as f: f.write(uuid.hex) return failover
get a unique identifier for the machine running this function
def get_msgbuf(self): values = [] for i in range(len(self.fmt.columns)): if i >= len(self.fmt.msg_mults): continue mul = self.fmt.msg_mults[i] name = self.fmt.columns[i] if name == 'Mode' and 'ModeNum' in self.fmt.columns: name = 'ModeNum' v = self.__getattr__(name) if mul is not None: v /= mul values.append(v) return struct.pack("BBB", 0xA3, 0x95, self.fmt.type) + struct.pack(self.fmt.msg_struct, *values)
create a binary message buffer for a message
def get(self, key, *, encoding=_NOTSET): return self.execute(b'GET', key, encoding=encoding)
Get the value of a key.
def get_session(self, app_path, session_id): if app_path not in self._applications: raise ValueError("Application %s does not exist on this server" % app_path) return self._applications[app_path].get_session(session_id)
Get an active a session by name application path and session ID. Args: app_path (str) : The configured application path for the application to return a session for. session_id (str) : The session ID of the session to retrieve. Returns: ServerSession
def _get_kmeans_lookup_table_and_weight(nbits, w, init='k-means++', tol=1e-2, n_init=1, rand_seed=0): if _HAS_SKLEARN: from sklearn.cluster import KMeans else: raise Exception('sklearn package required for k-means quantization') units = _np.prod(w.shape) lut_len = 1 << nbits n_clusters = units if (units < lut_len) else lut_len wf = w.reshape(-1, 1) kmeans = KMeans(n_clusters=n_clusters, init=init, tol=tol, n_init=n_init, random_state=rand_seed).fit(wf) wq = kmeans.labels_[:units] lut = _np.zeros(lut_len) lut[:n_clusters] = kmeans.cluster_centers_.flatten() return lut, wq
Generate K-Means lookup table given a weight parameter field :param nbits: Number of bits for quantization :param w: Weight as numpy array Returns ------- lut: numpy.array Lookup table, numpy array of shape (1 << nbits, ); wq: numpy.array Quantized weight of type numpy.uint8
def get_response_page(request, return_type, template_location, response_page_type): try: page = models.ResponsePage.objects.get( is_active=True, type=response_page_type, ) template = loader.get_template(template_location) content_type = None body = template.render( RequestContext(request, {'request_path': request.path, 'page': page, }) ) return return_type(body, content_type=content_type) except models.ResponsePage.DoesNotExist: return None
Helper function to get an appropriate response page if it exists. This function is not designed to be used directly as a view. It is a helper function which can be called to check if a ResponsePage exists for a ResponsePage type (which is also active). :param request: :param return_type: :param template_location: :param response_page_type: :return:
def load_filter_plugins(entrypoint_group: str) -> Iterable[Filter]: global loaded_filter_plugins enabled_plugins: List[str] = [] config = BandersnatchConfig().config try: config_blacklist_plugins = config["blacklist"]["plugins"] split_plugins = config_blacklist_plugins.split("\n") if "all" in split_plugins: enabled_plugins = ["all"] else: for plugin in split_plugins: if not plugin: continue enabled_plugins.append(plugin) except KeyError: pass cached_plugins = loaded_filter_plugins.get(entrypoint_group) if cached_plugins: return cached_plugins plugins = set() for entry_point in pkg_resources.iter_entry_points(group=entrypoint_group): plugin_class = entry_point.load() plugin_instance = plugin_class() if "all" in enabled_plugins or plugin_instance.name in enabled_plugins: plugins.add(plugin_instance) loaded_filter_plugins[entrypoint_group] = list(plugins) return plugins
Load all blacklist plugins that are registered with pkg_resources Parameters ========== entrypoint_group: str The entrypoint group name to load plugins from Returns ======= List of Blacklist: A list of objects derived from the Blacklist class
def first(self): results = self.rpc_model.search_read( self.domain, None, 1, self._order_by, self.fields, context=self.context ) return results and results[0] or None
Return the first result of this Query or None if the result doesn't contain any row.
def load_global_config(config_path): config = configparser.RawConfigParser() if os.path.exists(config_path): logger.debug("Checking and setting global parameters...") config.read(config_path) else: _initial_run() logger.info("Unable to find a global sprinter configuration!") logger.info("Creating one now. Please answer some questions" + " about what you would like sprinter to do.") logger.info("") if not config.has_section('global'): config.add_section('global') configure_config(config) write_config(config, config_path) return config
Load a global configuration object, and query for any required variables along the way
def check_data_health(self, props=[], element=None): r health = HealthDict() if props == []: props = self.props(element) else: if type(props) == str: props = [props] for item in props: health[item] = [] if self[item].dtype == 'O': health[item] = 'No checks on object' elif sp.sum(sp.isnan(self[item])) > 0: health[item] = 'Has NaNs' elif sp.shape(self[item])[0] != self._count(item.split('.')[0]): health[item] = 'Wrong Length' return health
r""" Check the health of pore and throat data arrays. Parameters ---------- element : string, optional Can be either 'pore' or 'throat', which will limit the checks to only those data arrays. props : list of pore (or throat) properties, optional If given, will limit the health checks to only the specfied properties. Also useful for checking existance. Returns ------- Returns a HealthDict object which a basic dictionary with an added ``health`` attribute that is True is all entries in the dict are deemed healthy (empty lists), or False otherwise. Examples -------- >>> import openpnm >>> pn = openpnm.network.Cubic(shape=[5, 5, 5]) >>> h = pn.check_data_health() >>> h.health True
def material_to_texture(material): if hasattr(material, 'image'): img = material.image else: img = material.baseColorTexture if img is None: return None with util.BytesIO() as f: img.save(f, format='png') f.seek(0) gl_image = pyglet.image.load(filename='.png', file=f) texture = gl_image.get_texture() return texture
Convert a trimesh.visual.texture.Material object into a pyglet- compatible texture object. Parameters -------------- material : trimesh.visual.texture.Material Material to be converted Returns --------------- texture : pyglet.image.Texture Texture loaded into pyglet form
def visit_ExtSlice(self, node: ast.ExtSlice) -> Tuple[Any, ...]: result = tuple(self.visit(node=dim) for dim in node.dims) self.recomputed_values[node] = result return result
Visit each dimension of the advanced slicing and assemble the dimensions in a tuple.
def findSynonyms(self, word, num): if not isinstance(word, basestring): word = _convert_to_vector(word) words, similarity = self.call("findSynonyms", word, num) return zip(words, similarity)
Find synonyms of a word :param word: a word or a vector representation of word :param num: number of synonyms to find :return: array of (word, cosineSimilarity) .. note:: Local use only
def sort_flavor_list(request, flavors, with_menu_label=True): def get_key(flavor, sort_key): try: return getattr(flavor, sort_key) except AttributeError: LOG.warning('Could not find sort key "%s". Using the default ' '"ram" instead.', sort_key) return getattr(flavor, 'ram') try: flavor_sort = getattr(settings, 'CREATE_INSTANCE_FLAVOR_SORT', {}) sort_key = flavor_sort.get('key', 'ram') rev = flavor_sort.get('reverse', False) if not callable(sort_key): def key(flavor): return get_key(flavor, sort_key) else: key = sort_key if with_menu_label: flavor_list = [(flavor.id, '%s' % flavor.name) for flavor in sorted(flavors, key=key, reverse=rev)] else: flavor_list = sorted(flavors, key=key, reverse=rev) return flavor_list except Exception: exceptions.handle(request, _('Unable to sort instance flavors.')) return []
Utility method to sort a list of flavors. By default, returns the available flavors, sorted by RAM usage (ascending). Override these behaviours with a ``CREATE_INSTANCE_FLAVOR_SORT`` dict in ``local_settings.py``.
def get(self): attrs = ("networks", "security_groups", "floating_ips", "routers", "internet_gateways") for attr in attrs: setattr(self, attr, eval("self.get_{}()". format(attr)))
Get quota from Cloud Provider.
def unsubscribe(self, destination=None, id=None, headers=None, **keyword_headers): assert id is not None or destination is not None, "'id' or 'destination' is required" headers = utils.merge_headers([headers, keyword_headers]) if id: headers[HDR_ID] = id if destination: headers[HDR_DESTINATION] = destination self.send_frame(CMD_UNSUBSCRIBE, headers)
Unsubscribe from a destination by either id or the destination name. :param str destination: the name of the topic or queue to unsubscribe from :param str id: the unique identifier of the topic or queue to unsubscribe from :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires
def find_saas_endurance_space_price(package, size, tier_level): if tier_level != 0.25: tier_level = int(tier_level) key_name = 'STORAGE_SPACE_FOR_{0}_IOPS_PER_GB'.format(tier_level) key_name = key_name.replace(".", "_") for item in package['items']: if key_name not in item['keyName']: continue if 'capacityMinimum' not in item or 'capacityMaximum' not in item: continue capacity_minimum = int(item['capacityMinimum']) capacity_maximum = int(item['capacityMaximum']) if size < capacity_minimum or size > capacity_maximum: continue price_id = _find_price_id(item['prices'], 'performance_storage_space') if price_id: return price_id raise ValueError("Could not find price for endurance storage space")
Find the SaaS endurance storage space price for the size and tier :param package: The Storage As A Service product package :param size: The volume size for which a price is desired :param tier_level: The endurance tier for which a price is desired :return: Returns the price for the size and tier, or an error if not found
def get_command_from_result(script, result, debug=False): if not debug: command = "python waf --run \"" + script + " " + " ".join( ['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" else: command = "python waf --run " + script + " --command-template=\"" +\ "gdb --args %s " + " ".join(['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" return command
Return the command that is needed to obtain a certain result. Args: params (dict): Dictionary containing parameter: value pairs. debug (bool): Whether the command should include the debugging template.
def generate_security_data(self): timestamp = int(time.time()) security_dict = { 'content_type': str(self.target_object._meta), 'object_pk': str(self.target_object._get_pk_val()), 'timestamp': str(timestamp), 'security_hash': self.initial_security_hash(timestamp), } return security_dict
Generate a dict of security data for "initial" data.
def unsticky(self): url = self.reddit_session.config['sticky_submission'] data = {'id': self.fullname, 'state': False} return self.reddit_session.request_json(url, data=data)
Unsticky this post. :returns: The json response from the server
def has_access(user, required_roles, match_all=True): if ROLE_ADMIN in user.roles: return True if isinstance(required_roles, str): if required_roles in user.roles: return True return False if match_all: for role in required_roles: if role not in user.roles: return False return True else: for role in required_roles: if role in user.roles: return True return False
Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply Args: user (:obj:`User`): User object required_roles (`list` of `str`): List of roles that the user must have applied match_all (`bool`): If true, all the required_roles must be applied to the user, else any one match will return `True` Returns: `bool`
def get_settings(): settings = {} for config_file in config_files(): config_contents = load_config(config_file) if config_contents is not None: settings = deep_merge(settings, config_contents) return settings
Get all currently loaded settings.
def wafer_form_helper(context, helper_name): request = context.request module, class_name = helper_name.rsplit('.', 1) if module not in sys.modules: __import__(module) mod = sys.modules[module] class_ = getattr(mod, class_name) return class_(request=request)
Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms.
def deprecate_option(key, msg=None, rkey=None, removal_ver=None): key = key.lower() if key in _deprecated_options: msg = "Option '{key}' has already been defined as deprecated." raise OptionError(msg.format(key=key)) _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
Mark option `key` as deprecated, if code attempts to access this option, a warning will be produced, using `msg` if given, or a default message if not. if `rkey` is given, any access to the key will be re-routed to `rkey`. Neither the existence of `key` nor that if `rkey` is checked. If they do not exist, any subsequence access will fail as usual, after the deprecation warning is given. Parameters ---------- key - the name of the option to be deprecated. must be a fully-qualified option name (e.g "x.y.z.rkey"). msg - (Optional) a warning message to output when the key is referenced. if no message is given a default message will be emitted. rkey - (Optional) the name of an option to reroute access to. If specified, any referenced `key` will be re-routed to `rkey` including set/get/reset. rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). used by the default message if no `msg` is specified. removal_ver - (Optional) specifies the version in which this option will be removed. used by the default message if no `msg` is specified. Returns ------- Nothing Raises ------ OptionError - if key has already been deprecated.
def refresh(self, module=None): module = module._mdl if module is not None else ffi.NULL lib.EnvRefreshAgenda(self._env, module)
Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used.
def get_tuple_version(name, default=DEFAULT_TUPLE_NOT_FOUND, allow_ambiguous=True): def _prefer_int(x): try: return int(x) except ValueError: return x version = get_string_version(name, default=default, allow_ambiguous=allow_ambiguous) if isinstance(version, tuple): return version return tuple(map(_prefer_int, version.split('.')))
Get tuple version from installed package information for easy handling. It will return :attr:`default` value when the named package is not installed. Parameters ----------- name : string An application name used to install via setuptools. default : tuple A default returning value used when the named application is not installed yet allow_ambiguous : boolean ``True`` for allowing ambiguous version information. Returns -------- string A version tuple Examples -------- >>> v = get_tuple_version('app_version', allow_ambiguous=True) >>> len(v) >= 3 True >>> isinstance(v[0], int) True >>> isinstance(v[1], int) True >>> isinstance(v[2], int) True >>> get_tuple_version('distribution_which_is_not_installed') (0, 0, 0)
def fetch_31(self): today = datetime.datetime.today() before = today - datetime.timedelta(days=60) self.fetch_from(before.year, before.month) self.data = self.data[-31:] return self.data
Fetch 31 days data
def _get_add_trustee_cmd(self, trustee): trustee_info = pollster(get_device_info)(trustee) username = trustee._meta_data['username'] password = trustee._meta_data['password'] return 'tmsh::modify cm trust-domain Root ca-devices add ' \ '\\{ %s \\} name %s username %s password %s' % \ (trustee_info.managementIp, trustee_info.name, username, password)
Get tmsh command to add a trusted device. :param trustee: ManagementRoot object -- device to add as trusted :returns: str -- tmsh command to add trustee
def create_continuous_query(self, name, select, database=None, resample_opts=None): r query_string = ( "CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END" ).format(quote_ident(name), quote_ident(database or self._database), ' RESAMPLE ' + resample_opts if resample_opts else '', select) self.query(query_string)
r"""Create a continuous query for a database. :param name: the name of continuous query to create :type name: str :param select: select statement for the continuous query :type select: str :param database: the database for which the continuous query is created. Defaults to current client's database :type database: str :param resample_opts: resample options :type resample_opts: str :Example: :: >> select_clause = 'SELECT mean("value") INTO "cpu_mean" ' \ ... 'FROM "cpu" GROUP BY time(1m)' >> client.create_continuous_query( ... 'cpu_mean', select_clause, 'db_name', 'EVERY 10s FOR 2m' ... ) >> client.get_list_continuous_queries() [ { 'db_name': [ { 'name': 'cpu_mean', 'query': 'CREATE CONTINUOUS QUERY "cpu_mean" ' 'ON "db_name" ' 'RESAMPLE EVERY 10s FOR 2m ' 'BEGIN SELECT mean("value") ' 'INTO "cpu_mean" FROM "cpu" ' 'GROUP BY time(1m) END' } ] } ]
def has_autolog(self, user_id): try: with open("local/init", "rb") as f: s = f.read() s = security.protege_data(s, False) self.autolog = json.loads(s).get("autolog", {}) except FileNotFoundError: return mdp = self.autolog.get(user_id, None) return mdp
Read auto-connection parameters and returns local password or None
def convert_celeba_aligned_cropped(directory, output_directory, output_filename=OUTPUT_FILENAME): output_path = os.path.join(output_directory, output_filename) h5file = _initialize_conversion(directory, output_path, (218, 178)) features_dataset = h5file['features'] image_file_path = os.path.join(directory, IMAGE_FILE) with zipfile.ZipFile(image_file_path, 'r') as image_file: with progress_bar('images', NUM_EXAMPLES) as bar: for i in range(NUM_EXAMPLES): image_name = 'img_align_celeba/{:06d}.jpg'.format(i + 1) features_dataset[i] = numpy.asarray( Image.open( image_file.open(image_name, 'r'))).transpose(2, 0, 1) bar.update(i + 1) h5file.flush() h5file.close() return (output_path,)
Converts the aligned and cropped CelebA dataset to HDF5. Converts the CelebA dataset to an HDF5 dataset compatible with :class:`fuel.datasets.CelebA`. The converted dataset is saved as 'celeba_aligned_cropped.hdf5'. It assumes the existence of the following files: * `img_align_celeba.zip` * `list_attr_celeba.txt` Parameters ---------- directory : str Directory in which input files reside. output_directory : str Directory in which to save the converted dataset. output_filename : str, optional Name of the saved dataset. Defaults to 'celeba_aligned_cropped.hdf5'. Returns ------- output_paths : tuple of str Single-element tuple containing the path to the converted dataset.
def get_radians(self): if not self: raise NullVectorError() return math.atan2(self.y, self.x)
Return the angle between this vector and the positive x-axis measured in radians. Result will be between -pi and pi.
def form_valid(self, form): self.object = form.save(commit=False) self.pre_save() self.object.save() if hasattr(form, 'save_m2m'): form.save_m2m() self.post_save() if self.request.is_ajax(): return self.render_json_response(self.get_success_result()) return HttpResponseRedirect(self.get_success_url())
If the request is ajax, save the form and return a json response. Otherwise return super as expected.
def deliver_message(self, timeout=None): deliver_task = asyncio.ensure_future(self._handler.mqtt_deliver_next_message(), loop=self._loop) self.client_tasks.append(deliver_task) self.logger.debug("Waiting message delivery") done, pending = yield from asyncio.wait([deliver_task], loop=self._loop, return_when=asyncio.FIRST_EXCEPTION, timeout=timeout) if deliver_task in done: if deliver_task.exception() is not None: raise deliver_task.exception() self.client_tasks.pop() return deliver_task.result() else: deliver_task.cancel() raise asyncio.TimeoutError
Deliver next received message. Deliver next message received from the broker. If no message is available, this methods waits until next message arrives or ``timeout`` occurs. This method is a *coroutine*. :param timeout: maximum number of seconds to wait before returning. If timeout is not specified or None, there is no limit to the wait time until next message arrives. :return: instance of :class:`hbmqtt.session.ApplicationMessage` containing received message information flow. :raises: :class:`asyncio.TimeoutError` if timeout occurs before a message is delivered
def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None): params = {} if vpn_gateway_ids: self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId') if filters: i = 1 for filter in filters: params[('Filter.%d.Name' % i)] = filter[0] params[('Filter.%d.Value.1')] = filter[1] i += 1 return self.get_list('DescribeVpnGateways', params, [('item', VpnGateway)])
Retrieve information about your VpnGateways. You can filter results to return information only about those VpnGateways that match your search parameters. Otherwise, all VpnGateways associated with your account are returned. :type vpn_gateway_ids: list :param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the VpnGateway (pending,available,deleting,deleted) - *type*, the type of customer gateway (ipsec.1) - *availabilityZone*, the Availability zone the VPN gateway is in. :rtype: list :return: A list of :class:`boto.vpc.customergateway.VpnGateway`
def loads(s, filename=None, loader=None, implicit_tuple=True, env={}, schema=None): ast = reads(s, filename=filename, loader=loader, implicit_tuple=implicit_tuple) if not isinstance(env, framework.Environment): env = framework.Environment(dict(_default_bindings, **env)) obj = framework.eval(ast, env) return mod_schema.validate(obj, schema)
Load and evaluate a GCL expression from a string.
def showDescription( self ): plugin = self.currentPlugin() if ( not plugin ): self.uiDescriptionTXT.setText('') else: self.uiDescriptionTXT.setText(plugin.description())
Shows the description for the current plugin in the interface.
def _update_data(self, *data_dict, **kwargs): self.errors = {} for data in data_dict: if not isinstance(data, dict): raise AssertionError( f'Positional argument "{data}" passed must be a dict.' f'This argument serves as a template for loading common ' f'values.' ) for field_name, val in data.items(): setattr(self, field_name, val) for field_name, val in kwargs.items(): setattr(self, field_name, val) if self.errors: raise ValidationError(self.errors)
A private method to process and update entity values correctly. :param data: A dictionary of values to be updated for the entity :param kwargs: keyword arguments with key-value pairs to be updated
def drag(self, dragFrom=None): if dragFrom is None: dragFrom = self._lastMatch or self dragFromLocation = None if isinstance(dragFrom, Pattern): dragFromLocation = self.find(dragFrom).getTarget() elif isinstance(dragFrom, basestring): dragFromLocation = self.find(dragFrom).getTarget() elif isinstance(dragFrom, Match): dragFromLocation = dragFrom.getTarget() elif isinstance(dragFrom, Region): dragFromLocation = dragFrom.getCenter() elif isinstance(dragFrom, Location): dragFromLocation = dragFrom else: raise TypeError("drag expected dragFrom to be Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(dragFromLocation, Settings.MoveMouseDelay) time.sleep(Settings.DelayBeforeMouseDown) Mouse.buttonDown() Debug.history("Began drag at {}".format(dragFromLocation))
Starts a dragDrop operation. Moves the cursor to the target location and clicks the mouse in preparation to drag a screen element
def normalizeBoolean(value): if isinstance(value, int) and value in (0, 1): value = bool(value) if not isinstance(value, bool): raise ValueError("Boolean values must be True or False, not '%s'." % value) return value
Normalizes a boolean. * **value** must be an ``int`` with value of 0 or 1, or a ``bool``. * Returned value will be a boolean.
def build_config(config, filename=None): for clr in ('color', 'background'): val = config.pop(clr, None) if val in ('transparent', 'trans'): config[clr] = None elif val: config[clr] = val for name in ('svgid', 'svgclass', 'lineclass'): if config.get(name, None) is None: config.pop(name, None) if config.pop('no_classes', False): config['svgclass'] = None config['lineclass'] = None if filename is not None: ext = filename[filename.rfind('.') + 1:].lower() if ext == 'svgz': ext = 'svg' supported_args = _EXT_TO_KW_MAPPING.get(ext, ()) for k in list(config): if k not in supported_args: del config[k] return config
\ Builds a configuration and returns it. The config contains only keywords, which are supported by the serializer. Unsupported values are ignored.
def mpi(value): bits = value.bit_length() data_size = (bits + 7) // 8 data_bytes = bytearray(data_size) for i in range(data_size): data_bytes[i] = value & 0xFF value = value >> 8 data_bytes.reverse() return struct.pack('>H', bits) + bytes(data_bytes)
Serialize multipresicion integer using GPG format.
def get_iobuf(self, p, x, y): return self.get_iobuf_bytes(p, x, y).decode("utf-8")
Read the messages ``io_printf``'d into the ``IOBUF`` buffer on a specified core. See also: :py:meth:`.get_iobuf_bytes` which returns the undecoded raw bytes in the ``IOBUF``. Useful if the IOBUF contains non-text or non-UTF-8 encoded text. Returns ------- str The string in the ``IOBUF``, decoded from UTF-8.
def applyJSONFilters(actions, source, format=""): doc = json.loads(source) if 'meta' in doc: meta = doc['meta'] elif doc[0]: meta = doc[0]['unMeta'] else: meta = {} altered = doc for action in actions: altered = walk(altered, action, format, meta) return json.dumps(altered)
Walk through JSON structure and apply filters This: * reads a JSON-formatted pandoc document from a source string * transforms it by walking the tree and performing the actions * returns a new JSON-formatted pandoc document as a string The `actions` argument is a list of functions (see `walk` for a full description). The argument `source` is a string encoded JSON object. The argument `format` is a string describing the output format. Returns a the new JSON-formatted pandoc document.
def request(self, tree, **kwargs): apirequest = lxml.etree.tostring( E.openXML( E.credentials( E.username(self.username), OE('password', self.password), OE('hash', self.password_hash), ), tree ), method='c14n' ) try: apiresponse = self.session.post(self.url, data=apirequest) apiresponse.raise_for_status() except requests.RequestException as e: raise ServiceUnavailable(str(e)) tree = lxml.objectify.fromstring(apiresponse.content) if tree.reply.code == 0: return Response(tree) else: klass = from_code(tree.reply.code) desc = tree.reply.desc code = tree.reply.code data = getattr(tree.reply, 'data', '') raise klass(u"{0} ({1}) {2}".format(desc, code, data), code)
Construct a new request with the given tree as its contents, then ship it to the OpenProvider API.
def make_multisig_segwit_wallet( m, n ): pks = [] for i in xrange(0, n): pk = BitcoinPrivateKey(compressed=True).to_wif() pks.append(pk) return make_multisig_segwit_info(m, pks)
Create a bundle of information that can be used to generate an m-of-n multisig witness script.
def codepoint_included(self, codepoint): if self.codepoints == None: return True for cp in self.codepoints: mismatch = False for i in range(len(cp)): if (cp[i] is not None) and (cp[i] != codepoint[i]): mismatch = True break if not mismatch: return True return False
Check if codepoint matches any of the defined codepoints.
def probes(self, **kwargs): for key in kwargs: if key not in ['limit', 'offset', 'onlyactive', 'includedeleted']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of probes()\n') return self.request("GET", "probes", kwargs).json()['probes']
Returns a list of all Pingdom probe servers Parameters: * limit -- Limits the number of returned probes to the specified quantity Type: Integer * offset -- Offset for listing (requires limit). Type: Integer Default: 0 * onlyactive -- Return only active probes Type: Boolean Default: False * includedeleted -- Include old probes that are no longer in use Type: Boolean Default: False Returned structure: [ { 'id' : <Integer> Unique probe id 'country' : <String> Country 'city' : <String> City 'name' : <String> Name 'active' : <Boolean> True if probe is active 'hostname' : <String> DNS name 'ip' : <String> IP address 'countryiso': <String> Country ISO code }, ... ]
def register_resource(self, viewset, namespace=None): try: serializer = viewset.serializer_class() resource_key = serializer.get_resource_key() resource_name = serializer.get_name() path_name = serializer.get_plural_name() except: import traceback traceback.print_exc() raise Exception( "Failed to extract resource name from viewset: '%s'." " It, or its serializer, may not be DREST-compatible." % ( viewset ) ) if namespace: namespace = namespace.rstrip('/') + '/' base_path = namespace or '' base_path = r'%s' % base_path + path_name self.register(base_path, viewset) if resource_key in resource_map: raise Exception( "The resource '%s' has already been mapped to '%s'." " Each resource can only be mapped to one canonical" " path. " % ( resource_key, resource_map[resource_key]['path'] ) ) resource_map[resource_key] = { 'path': base_path, 'viewset': viewset } if resource_name in resource_name_map: resource_key = resource_name_map[resource_name] raise Exception( "The resource name '%s' has already been mapped to '%s'." " A resource name can only be used once." % ( resource_name, resource_map[resource_key]['path'] ) ) resource_name_map[resource_name] = resource_key
Register a viewset that should be considered the canonical endpoint for a particular resource. In addition to generating and registering the route, it adds the route in a reverse map to allow DREST to build the canonical URL for a given resource. Arguments: viewset - viewset class, should have `serializer_class` attr. namespace - (optional) URL namespace, e.g. 'v3'.
def get_threats_lists(self): response = self.service.threatLists().list().execute() self.set_wait_duration(response.get('minimumWaitDuration')) return response['threatLists']
Retrieve all available threat lists
def _evaluate_expressions(self, expression_engine, step_id, values, context): if expression_engine is None: return values processed = {} for name, value in values.items(): if isinstance(value, str): value = value.strip() try: expression = expression_engine.get_inline_expression(value) if expression is not None: value = expression_engine.evaluate_inline(expression, context) else: value = expression_engine.evaluate_block(value, context) except EvaluationError as error: raise ExecutionError('Error while evaluating expression for step "{}":\n{}'.format( step_id, error )) elif isinstance(value, dict): value = self._evaluate_expressions(expression_engine, step_id, value, context) processed[name] = value return processed
Recursively evaluate expressions in a dictionary of values.
def parse_crop(self, crop, original_size, size): if crop is None: return None crop = crop.split(' ') if len(crop) == 1: crop = crop[0] x_crop = 50 y_crop = 50 if crop in CROP_ALIASES['x']: x_crop = CROP_ALIASES['x'][crop] elif crop in CROP_ALIASES['y']: y_crop = CROP_ALIASES['y'][crop] x_offset = self.calculate_offset(x_crop, original_size[0], size[0]) y_offset = self.calculate_offset(y_crop, original_size[1], size[1]) return int(x_offset), int(y_offset)
Parses crop into a tuple usable by the crop function. :param crop: String with the crop settings. :param original_size: A tuple of size of the image that should be cropped. :param size: A tuple of the wanted size. :return: Tuple of two integers with crop settings :rtype: tuple
def delete_color_scheme_stack(self, scheme_name): self.set_scheme(scheme_name) widget = self.stack.currentWidget() self.stack.removeWidget(widget) index = self.order.index(scheme_name) self.order.pop(index)
Remove stack widget by 'scheme_name'.
def hexedit(pktlist): f = get_temp_file() wrpcap(f, pktlist) with ContextManagerSubprocess("hexedit()", conf.prog.hexedit): subprocess.call([conf.prog.hexedit, f]) pktlist = rdpcap(f) os.unlink(f) return pktlist
Run hexedit on a list of packets, then return the edited packets.
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'): import os import sqlite3 fd = os.open(database_path, os.O_RDONLY) connection = sqlite3.connect('/dev/fd/%d' % fd) os.close(fd) try: row = connection.execute( 'SELECT secret FROM clients WHERE slug = ?', (client_id,) ).fetchone() if row: return cls( base_url, version=version, client_id=client_id, client_secret=row[0] ) else: raise GhostException(401, [{ 'errorType': 'InternalError', 'message': 'No client_secret found for client_id: %s' % client_id }]) finally: connection.close()
Initialize a new Ghost API client, reading the client ID and secret from the SQlite database. :param database_path: The path to the database file. :param base_url: The base url of the server :param version: The server version to use (default: `auto`) :param client_id: The client ID to look for in the database :return: A new Ghost API client instance
def generate_contentinfo_from_channeldir(self, args, options): LOGGER.info('Generating Content.csv rows folders and file in channeldir') file_path = get_metadata_file_path(self.channeldir, self.contentinfo) with open(file_path, 'a') as csv_file: csvwriter = csv.DictWriter(csv_file, CONTENT_INFO_HEADER) channeldir = args['channeldir'] if channeldir.endswith(os.path.sep): channeldir.rstrip(os.path.sep) content_folders = sorted(os.walk(channeldir)) _ = content_folders.pop(0) for rel_path, _subfolders, filenames in content_folders: LOGGER.info('processing folder ' + str(rel_path)) sorted_filenames = sorted(filenames) self.generate_contentinfo_from_folder(csvwriter, rel_path, sorted_filenames) LOGGER.info('Generted {} row for all folders and files in {}'.format(self.contentinfo, self.channeldir))
Create rows in Content.csv for each folder and file in `self.channeldir`.
def coinbase_withdraw(self, amount, currency, coinbase_account_id): params = {'amount': amount, 'currency': currency, 'coinbase_account_id': coinbase_account_id} return self._send_message('post', '/withdrawals/coinbase-account', data=json.dumps(params))
Withdraw funds to a coinbase account. You can move funds between your Coinbase accounts and your cbpro trading accounts within your daily limits. Moving funds between Coinbase and cbpro is instant and free. See AuthenticatedClient.get_coinbase_accounts() to receive information regarding your coinbase_accounts. Args: amount (Decimal): The amount to withdraw. currency (str): The type of currency (eg. 'BTC') coinbase_account_id (str): ID of the coinbase account. Returns: dict: Information about the deposit. Example:: { "id":"593533d2-ff31-46e0-b22e-ca754147a96a", "amount":"10.00", "currency": "BTC", }
def fromordinal(cls, ordinal): if ordinal < 1: raise ValueError("ordinal must be >= 1") return super(Week, cls).__new__(cls, *(date.fromordinal((ordinal-1) * 7 + 1).isocalendar()[:2]))
Return the week corresponding to the proleptic Gregorian ordinal, where January 1 of year 1 starts the week with ordinal 1.
def insert(self, inst): if isinstance(inst, VD): id = inst._id elif isinstance(inst, VG): id = inst._id else: raise HDF4Error("insrt: bad argument") index = _C.Vinsert(self._id, id) _checkErr('insert', index, "cannot insert in vgroup") return index
Insert a vdata or a vgroup in the vgroup. Args:: inst vdata or vgroup instance to add Returns:: index of the inserted vdata or vgroup (0 based) C library equivalent : Vinsert
def stack_files(files, hemi, source, target): import csv import os import numpy as np fname = "sdist_%s_%s_%s.csv" % (hemi, source, target) filename = os.path.join(os.getcwd(),fname) alldist = [] for dfile in files: alldist.append(np.genfromtxt(dfile, delimiter=',')) alldist = np.array(alldist) alldist.tofile(filename,",") return filename
This function takes a list of files as input and vstacks them
def insert_text(self, text, at_end=False, error=False, prompt=False): if at_end: self.append_text_to_shell(text, error, prompt) else: ConsoleBaseWidget.insert_text(self, text)
Insert text at the current cursor position or at the end of the command line
def _add_extra_fields(gelf_dict, record): skip_list = ( 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'thread', 'threadName') for key, value in record.__dict__.items(): if key not in skip_list and not key.startswith('_'): gelf_dict['_%s' % key] = value
Add extra fields to the given ``gelf_dict`` However, this does not add additional fields in to ``message_dict`` that are either duplicated from standard :class:`logging.LogRecord` attributes, duplicated from the python logging module source (e.g. ``exc_text``), or violate GLEF format (i.e. ``id``). .. seealso:: The list of standard :class:`logging.LogRecord` attributes can be found at: http://docs.python.org/library/logging.html#logrecord-attributes :param gelf_dict: dictionary representation of a GELF log. :type gelf_dict: dict :param record: :class:`logging.LogRecord` to extract extra fields from to insert into the given ``gelf_dict``. :type record: logging.LogRecord
def get_names(self): names = [id for id in self.ecrm_P1_is_identified_by if id.uri == surf.ns.EFRBROO['F12_Name']] self.names = [] for name in names: for variant in name.rdfs_label: self.names.append((variant.language,variant.title())) return self.names
Returns a dict where key is the language and value is the name in that language. Example: {'it':"Sofocle"}
def get_http(base_url, function, opts): url = (os.path.join(base_url, function) + '/?' + urlencode(opts)) data = urlopen(url) if data.code != 200: raise ValueError("Random.rg returned server code: " + str(data.code)) return data.read()
HTTP request generator.
def value(self): v = self.file return v.open("rb").read() if v is not None else v
Binary value content.
def preparse(output_format): try: return templating.preparse(output_format, lambda path: os.path.join(config.config_dir, "templates", path)) except ImportError as exc: if "tempita" in str(exc): raise error.UserError("To be able to use Tempita templates, install the 'tempita' package (%s)\n" " Possibly USING THE FOLLOWING COMMAND:\n" " %s/easy_install tempita" % (exc, os.path.dirname(sys.executable))) raise except IOError as exc: raise error.LoggableError("Cannot read template: {}".format(exc))
Do any special processing of a template, and return the result.
def _parse_mut(mut): multiplier = 1 if mut.startswith("-"): mut = mut[1:] multiplier = -1 nt = mut.strip('0123456789') pos = int(mut[:-2]) * multiplier return nt, pos
Parse mutation field to get position and nts.
def _null_ac_sia(transition, direction, alpha=0.0): return AcSystemIrreducibilityAnalysis( transition=transition, direction=direction, alpha=alpha, account=(), partitioned_account=() )
Return an |AcSystemIrreducibilityAnalysis| with zero |big_alpha| and empty accounts.
def put(self, locator = None, component = None): if component == None: raise Exception("Component cannot be null") self._lock.acquire() try: self._references.append(Reference(locator, component)) finally: self._lock.release()
Puts a new reference into this reference map. :param locator: a component reference to be added. :param component: a locator to find the reference by.
def convolve_comb_lines(lines_wave, lines_flux, sigma, crpix1, crval1, cdelt1, naxis1): xwave = crval1 + (np.arange(naxis1) + 1 - crpix1) * cdelt1 spectrum = np.zeros(naxis1) for wave, flux in zip(lines_wave, lines_flux): sp_tmp = gauss_box_model(x=xwave, amplitude=flux, mean=wave, stddev=sigma) spectrum += sp_tmp return xwave, spectrum
Convolve a set of lines of known wavelengths and flux. Parameters ---------- lines_wave : array like Input array with wavelengths lines_flux : array like Input array with fluxes sigma : float Sigma of the broadening gaussian to be applied. crpix1 : float CRPIX1 of the desired wavelength calibration. crval1 : float CRVAL1 of the desired wavelength calibration. cdelt1 : float CDELT1 of the desired wavelength calibration. naxis1 : integer NAXIS1 of the output spectrum. Returns ------- xwave : array like Array with wavelengths for the output spectrum. spectrum : array like Array with the expected fluxes at each pixel.
def show_config(config): print("\nCurrent Configuration:\n") for k, v in sorted(config.config.items()): print("{0:15}: {1}".format(k, v))
Show the current configuration.
def enable_logging(self): handler = logging.FileHandler(self.log_fname) handler.setLevel(logging.INFO) run_id = self.id class WBFilter(logging.Filter): def filter(self, record): record.run_id = run_id return True formatter = logging.Formatter( '%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d [%(run_id)s:%(filename)s:%(funcName)s():%(lineno)s] %(message)s') handler.setFormatter(formatter) handler.addFilter(WBFilter()) root = logging.getLogger() root.addHandler(handler)
Enable logging to the global debug log. This adds a run_id to the log, in case of muliple processes on the same machine. Currently no way to disable logging after it's enabled.
def quantile_binning(data=None, bins=10, *, qrange=(0.0, 1.0), **kwargs) -> StaticBinning: if np.isscalar(bins): bins = np.linspace(qrange[0] * 100, qrange[1] * 100, bins + 1) bins = np.percentile(data, bins) return static_binning(bins=make_bin_array(bins), includes_right_edge=True)
Binning schema based on quantile ranges. This binning finds equally spaced quantiles. This should lead to all bins having roughly the same frequencies. Note: weights are not (yet) take into account for calculating quantiles. Parameters ---------- bins: sequence or Optional[int] Number of bins qrange: Optional[tuple] Two floats as minimum and maximum quantile (default: 0.0, 1.0) Returns ------- StaticBinning
def group_default_invalidator(self, obj): user_pks = User.objects.values_list('pk', flat=True) return [('User', pk, False) for pk in user_pks]
Invalidated cached items when the Group changes.
def get_best_match(text_log_error): score_cut_off = 0.7 return (text_log_error.matches.filter(score__gt=score_cut_off) .order_by("-score", "-classified_failure_id") .select_related('classified_failure') .first())
Get the best TextLogErrorMatch for a given TextLogErrorMatch. Matches are further filtered by the score cut off.
def bypass(*inputs, copy=False): if len(inputs) == 1: inputs = inputs[0] return _copy.deepcopy(inputs) if copy else inputs
Returns the same arguments. :param inputs: Inputs values. :type inputs: T :param copy: If True, it returns a deepcopy of input values. :type copy: bool, optional :return: Same input values. :rtype: (T, ...), T Example:: >>> bypass('a', 'b', 'c') ('a', 'b', 'c') >>> bypass('a') 'a'
def process_action(self): if self.publish_version == self.UNPUBLISH_CHOICE: actioned = self._unpublish() else: actioned = self._publish() if actioned: self._log_action() return actioned
Process the action and update the related object, returns a boolean if a change is made.
def _rnd_date(start, end): return date.fromordinal(random.randint(start.toordinal(), end.toordinal()))
Internal random date generator.
def get_scheduler_info(): scheduler = current_app.apscheduler d = OrderedDict([ ('current_host', scheduler.host_name), ('allowed_hosts', scheduler.allowed_hosts), ('running', scheduler.running) ]) return jsonify(d)
Gets the scheduler info.
def reqRealTimeBars( self, contract: Contract, barSize: int, whatToShow: str, useRTH: bool, realTimeBarsOptions: List[TagValue] = None) -> RealTimeBarList: reqId = self.client.getReqId() bars = RealTimeBarList() bars.reqId = reqId bars.contract = contract bars.barSize = barSize bars.whatToShow = whatToShow bars.useRTH = useRTH bars.realTimeBarsOptions = realTimeBarsOptions self.wrapper.startSubscription(reqId, bars, contract) self.client.reqRealTimeBars( reqId, contract, barSize, whatToShow, useRTH, realTimeBarsOptions) return bars
Request realtime 5 second bars. https://interactivebrokers.github.io/tws-api/realtime_bars.html Args: contract: Contract of interest. barSize: Must be 5. whatToShow: Specifies the source for constructing bars. Can be 'TRADES', 'MIDPOINT', 'BID' or 'ASK'. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. realTimeBarsOptions: Unknown.
def migrate(ctx,): adapter = ctx.obj['adapter'] start_time = datetime.now() nr_updated = migrate_database(adapter) LOG.info("All variants updated, time to complete migration: {}".format( datetime.now() - start_time)) LOG.info("Nr variants that where updated: %s", nr_updated)
Migrate an old loqusdb instance to 1.0
def get_appliances(self, start=0, count=-1, filter='', fields='', query='', sort='', view=''): uri = self.URI + '/image-streamer-appliances' return self._client.get_all(start, count, filter=filter, sort=sort, query=query, fields=fields, view=view, uri=uri)
Gets a list of all the Image Streamer resources based on optional sorting and filtering, and constrained by start and count parameters. Args: start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response might differ from the requested count if the sum of start and count exceeds the total number of items. filter (list or str): A general filter/query string to narrow the list of items returned. The default is no filter; all resources are returned. fields: Specifies which fields should be returned in the result set. query: A general query string to narrow the list of resources returned. The default is no query - all resources are returned. sort: The sort order of the returned data set. By default, the sort order is based on create time with the oldest entry first. view: Return a specific subset of the attributes of the resource or collection, by specifying the name of a predefined view. The default view is expand - show all attributes of the resource and all elements of collections of resources. Returns: list: Image Streamer resources associated with the Deployment Servers.
def create_db_info(): result = {} result['instrument'] = '' result['uuid'] = '' result['tags'] = {} result['type'] = '' result['mode'] = '' result['observation_date'] = "" result['origin'] = {} return result
Create metadata structure
def _expanded_sql(self): if not self._sql: self._sql = UDF._build_udf(self._name, self._code, self._return_type, self._params, self._language, self._imports) return self._sql
Get the expanded BigQuery SQL string of this UDF Returns The expanded SQL string of this UDF
def set_led(self, colorcode): data = [] data.append(0x0A) data.append(self.servoid) data.append(RAM_WRITE_REQ) data.append(LED_CONTROL_RAM) data.append(0x01) data.append(colorcode) send_data(data)
Set the LED Color of Herkulex Args: colorcode (int): The code for colors (0x00-OFF 0x02-BLUE 0x03-CYAN 0x04-RED 0x05-ORANGE 0x06-VIOLET 0x07-WHITE
def execute_phase(self, phase): repeat_count = 1 repeat_limit = phase.options.repeat_limit or sys.maxsize while not self._stopping.is_set(): is_last_repeat = repeat_count >= repeat_limit phase_execution_outcome = self._execute_phase_once(phase, is_last_repeat) if phase_execution_outcome.is_repeat and not is_last_repeat: repeat_count += 1 continue return phase_execution_outcome return PhaseExecutionOutcome(None)
Executes a phase or skips it, yielding PhaseExecutionOutcome instances. Args: phase: Phase to execute. Returns: The final PhaseExecutionOutcome that wraps the phase return value (or exception) of the final phase run. All intermediary results, if any, are REPEAT and handled internally. Returning REPEAT here means the phase hit its limit for repetitions.
def _control(self, state): if not self._subscription_is_recent(): self._subscribe() cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state _LOGGER.debug("Sending new state to %s: %s", self.host, ord(state)) ack_state = self._udp_transact(cmd, self._control_resp, state) if ack_state is None: raise S20Exception( "Device didn't acknowledge control request: {}".format( self.host))
Control device state. Possible states are ON or OFF. :param state: Switch to this state.
def db_exists(name, user=None, password=None, host=None, port=None, authdb=None): dbs = db_list(user, password, host, port, authdb=authdb) if isinstance(dbs, six.string_types): return False return name in dbs
Checks if a database exists in MongoDB CLI Example: .. code-block:: bash salt '*' mongodb.db_exists <name> <user> <password> <host> <port>
def _get_extra_args(extra_args, arg_keys): single_keys = set(["sam_ref", "config"]) out = [] for i, arg_key in enumerate(arg_keys): vals = [xs[i] for xs in extra_args] if arg_key in single_keys: out.append(vals[-1]) else: out.append(vals) return out
Retrieve extra arguments to pass along to combine function. Special cases like reference files and configuration information are passed as single items, the rest as lists mapping to each data item combined.
def _convert_string_array(data, encoding, errors, itemsize=None): if encoding is not None and len(data): data = Series(data.ravel()).str.encode( encoding, errors).values.reshape(data.shape) if itemsize is None: ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data
we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed
def _prepare_init_params_from_job_description(cls, job_details): init_params = dict() init_params['model_name'] = job_details['ModelName'] init_params['instance_count'] = job_details['TransformResources']['InstanceCount'] init_params['instance_type'] = job_details['TransformResources']['InstanceType'] init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId') init_params['strategy'] = job_details.get('BatchStrategy') init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith') init_params['output_path'] = job_details['TransformOutput']['S3OutputPath'] init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId') init_params['accept'] = job_details['TransformOutput'].get('Accept') init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms') init_params['max_payload'] = job_details.get('MaxPayloadInMB') init_params['base_transform_job_name'] = job_details['TransformJobName'] return init_params
Convert the transform job description to init params that can be handled by the class constructor Args: job_details (dict): the returned job details from a describe_transform_job API call. Returns: dict: The transformed init_params