code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _parse(self, line): try: result = line.split(':', maxsplit=4) filename, line_num_txt, column_txt, message_type, text = result except ValueError: return try: self.line_num = int(line_num_txt.strip()) self.column = int(column_txt.strip()) except ValueError: return self.filename = filename self.message_type = message_type.strip() self.text = text.strip() self.valid = True
Parse the output line
def content_type(self, data): self._content_type = str(data) self.add_header('Content-Type', str(data))
The Content-Type header value for this request.
def _construct_permission(self, function, source_arn=None, source_account=None, suffix="", event_source_token=None): lambda_permission = LambdaPermission(self.logical_id + 'Permission' + suffix, attributes=function.get_passthrough_resource_attributes()) try: function_name_or_arn = function.get_runtime_attr("name") except NotImplementedError: function_name_or_arn = function.get_runtime_attr("arn") lambda_permission.Action = 'lambda:invokeFunction' lambda_permission.FunctionName = function_name_or_arn lambda_permission.Principal = self.principal lambda_permission.SourceArn = source_arn lambda_permission.SourceAccount = source_account lambda_permission.EventSourceToken = event_source_token return lambda_permission
Constructs the Lambda Permission resource allowing the source service to invoke the function this event source triggers. :returns: the permission resource :rtype: model.lambda_.LambdaPermission
def validate_body(schema): location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_request(fn, schema) record_schemas( fn, wrapper, location, request_schema=sort_schema(schema)) return wrapper return decorator
Validate the body of incoming requests for a flask view. An example usage might look like this:: from snapstore_schemas import validate_body @validate_body({ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'snap_id': {'type': 'string'}, 'series': {'type': 'string'}, 'name': {'type': 'string'}, 'title': {'type': 'string'}, 'keywords': { 'type': 'array', 'items': {'type': 'string'} }, 'summary': {'type': 'string'}, 'description': {'type': 'string'}, 'created_at': {'type': 'string'}, }, 'required': ['snap_id', 'series'], 'additionalProperties': False } }) def my_flask_view(): # view code here return "Hello World", 200 All incoming request that have been routed to this view will be matched against the specified schema. If the request body does not match the schema an instance of `DataValidationError` will be raised. By default this will cause the flask application to return a 500 response, but this can be customised by telling flask how to handle these exceptions. The exception instance has an 'error_list' attribute that contains a list of all the errors encountered while processing the request body.
def show_dependencies(self, stream=sys.stdout): def child_iter(node): return [d.node for d in node.deps] def text_str(node): return colored(str(node), color=node.status.color_opts["color"]) for task in self.iflat_tasks(): print(draw_tree(task, child_iter, text_str), file=stream)
Writes to the given stream the ASCII representation of the dependency tree.
def _prepareSubForm(self, liveForm): liveForm = liveForm.asSubForm(self.name) if self._parameterIsCompact: liveForm.compact() return liveForm
Utility for turning liveforms into subforms, and compacting them as necessary. @param liveForm: a liveform. @type liveForm: L{LiveForm} @return: a sub form. @rtype: L{LiveForm}
def srv_event(token, hits, url=RBA_URL): if url is None: log.error("Please provide a valid RainbowAlga URL.") return ws_url = url + '/message' if isinstance(hits, pd.core.frame.DataFrame): pos = [tuple(x) for x in hits[['x', 'y', 'z']].values] time = list(hits['time']) tot = list(hits['tot']) elif isinstance(hits, Table): pos = list(zip(hits.pos_x, hits.pos_y, hits.pos_z)) time = list(hits.time) tot = list(hits.tot) else: log.error( "No calibration information found in hits (type: {0})".format( type(hits) ) ) return event = { "hits": { 'pos': pos, 'time': time, 'tot': tot, } } srv_data(ws_url, token, event, 'event')
Serve event to RainbowAlga
def nmf_tsne(data, k, n_runs=10, init='enhanced', **params): clusters = [] nmf = NMF(k) tsne = TSNE(2) km = KMeans(k) for i in range(n_runs): w = nmf.fit_transform(data) h = nmf.components_ tsne_wh = tsne.fit_transform(w.dot(h).T) clust = km.fit_predict(tsne_wh) clusters.append(clust) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) nmf_new = NMF(k, init='custom') init_w, init_h = nmf_init(data, consensus, k, init) W = nmf_new.fit_transform(data, W=init_w, H=init_h) H = nmf_new.components_ return W, H
runs tsne-consensus-NMF 1. run a bunch of NMFs, get W and H 2. run tsne + km on all WH matrices 3. run consensus clustering on all km results 4. use consensus clustering as initialization for a new run of NMF 5. return the W and H from the resulting NMF run
def update_redirect(self): page_history = Stack(session.get("page_history", [])) page_history.push(request.url) session["page_history"] = page_history.to_json()
Call it on your own endpoint's to update the back history navigation. If you bypass it, the next submit or back will go over it.
def update(self, iteration, fobj): self.bst.update(self.dtrain, iteration, fobj)
Update the boosters for one iteration
def only_passed_and_wait(result): verdict = result.get("verdict", "").strip().lower() if verdict in Verdicts.PASS + Verdicts.WAIT: return result return None
Returns PASS and WAIT results only, skips everything else.
def queue_bind(self, queue, exchange, routing_key, arguments=None): return self.channel.queue_bind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments)
Bind queue to an exchange using a routing key.
def disable_snapshots(self, volume_id, schedule_type): return self.client.call('Network_Storage', 'disableSnapshots', schedule_type, id=volume_id)
Disables snapshots for a specific block volume at a given schedule :param integer volume_id: The id of the volume :param string schedule_type: 'HOURLY'|'DAILY'|'WEEKLY' :return: Returns whether successfully disabled or not
def rollaxis(vari, axis, start=0): if isinstance(vari, Poly): core_old = vari.A.copy() core_new = {} for key in vari.keys: core_new[key] = rollaxis(core_old[key], axis, start) return Poly(core_new, vari.dim, None, vari.dtype) return numpy.rollaxis(vari, axis, start)
Roll the specified axis backwards, until it lies in a given position. Args: vari (chaospy.poly.base.Poly, numpy.ndarray): Input array or polynomial. axis (int): The axis to roll backwards. The positions of the other axes do not change relative to one another. start (int): The axis is rolled until it lies before thes position.
def format_endpoint(schema, addr, port, api_version): return '{}://{}:{}/{}/'.format(schema, addr, port, get_api_suffix(api_version))
Return a formatted keystone endpoint @param schema: http or https @param addr: ipv4/ipv6 host of the keystone service @param port: port of the keystone service @param api_version: 2 or 3 @returns a fully formatted keystone endpoint
def _set_final_freeness(self, flag): if flag: self.state.memory.store(self.heap_base + self.heap_size - self._chunk_size_t_size, ~CHUNK_P_MASK) else: self.state.memory.store(self.heap_base + self.heap_size - self._chunk_size_t_size, CHUNK_P_MASK)
Sets the freedom of the final chunk. Since no proper chunk follows the final chunk, the heap itself manages this. Nonetheless, for now it is implemented as if an additional chunk followed the final chunk.
def _create_filter_dsl(urlkwargs, definitions): filters = [] for name, filter_factory in definitions.items(): values = request.values.getlist(name, type=text_type) if values: filters.append(filter_factory(values)) for v in values: urlkwargs.add(name, v) return (filters, urlkwargs)
Create a filter DSL expression.
def __generate_key(self, config): cwd = config.get('ssh_path', self._install_directory()) if config.is_affirmative('create', default="yes"): if not os.path.exists(cwd): os.makedirs(cwd) if not os.path.exists(os.path.join(cwd, config.get('keyname'))): command = "ssh-keygen -t %(type)s -f %(keyname)s -N " % config.to_dict() lib.call(command, cwd=cwd, output_log_level=logging.DEBUG) if not config.has('ssh_path'): config.set('ssh_path', cwd) config.set('ssh_key_path', os.path.join(config.get('ssh_path'), config.get('keyname')))
Generate the ssh key, and return the ssh config location
def destroy(self): self.stop() if self.base_pathname is not None: self._robust_remove(self.base_pathname)
Undo the effects of initdb. Destroy all evidence of this DBMS, including its backing files.
def _extract_image_urls(arg: Message_T) -> List[str]: arg_as_msg = Message(arg) return [s.data['url'] for s in arg_as_msg if s.type == 'image' and 'url' in s.data]
Extract all image urls from a message-like object.
def iterpackages(self): pkgdir = os.path.join(self._path, self.PKG_DIR) if not os.path.isdir(pkgdir): return for team in sub_dirs(pkgdir): for user in sub_dirs(self.team_path(team)): for pkg in sub_dirs(self.user_path(team, user)): pkgpath = self.package_path(team, user, pkg) for hsh in sub_files(os.path.join(pkgpath, PackageStore.CONTENTS_DIR)): yield self.get_package(team, user, pkg, pkghash=hsh)
Return an iterator over all the packages in the PackageStore.
def _handle_aui(self, data): msg = AUIMessage(data) self.on_aui_message(message=msg) return msg
Handle AUI messages. :param data: RF message to parse :type data: string :returns: :py:class`~alarmdecoder.messages.AUIMessage`
def set_attributes(self, attr_obj=None, ns_uri=None, **attr_dict): self._set_element_attributes(self.impl_node, attr_obj=attr_obj, ns_uri=ns_uri, **attr_dict)
Add or update this element's attributes, where attributes can be specified in a number of ways. :param attr_obj: a dictionary or list of attribute name/value pairs. :type attr_obj: dict, list, tuple, or None :param ns_uri: a URI defining a namespace for the new attributes. :type ns_uri: string or None :param dict attr_dict: attribute name and values specified as keyword arguments.
def filter_content_types(self, content_type_qs): valid_ct_ids = [] for ct in content_type_qs: model = ct.model_class() if model and issubclass(model, EventBase): valid_ct_ids.append(ct.id) return content_type_qs.filter(pk__in=valid_ct_ids)
Filter the content types selectable to only event subclasses
def instances_changed(self): value = bool(lib.EnvGetInstancesChanged(self._env)) lib.EnvSetInstancesChanged(self._env, int(False)) return value
True if any instance has changed.
def on(self, event): handler = self._handlers.get(event, None) if not handler: raise ValueError("Unknown event '{}'".format(event)) return handler.register
Returns a wrapper for the given event. Usage: @dispatch.on("my_event") def handle_my_event(foo, bar, baz): ...
def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True): resultMatrix = Matrix(columns, rows, matrix_list, rowBased) return resultMatrix
Create a new Matrix instance from a matrix_list. :note: This method is used to create a Matrix instance using cpython. :param integer rows: The height of the Matrix. :param integer columns: The width of the Matrix. :param matrix_list: A one dimensional list containing the values for Matrix. Depending on the rowBased parameter, either the rows are combined or the columns. :param rowBased Boolean: Only necessary if the oneDimArray is given. Indicates whether the oneDimArray combines rows together (rowBased=True) or columns (rowBased=False).
def set_location(self, obj, cursor): if (hasattr(cursor, 'location') and cursor.location is not None and cursor.location.file is not None): obj.location = (cursor.location.file.name, cursor.location.line) return
Location is also used for codegeneration ordering.
def render_template(process, template_string, context): from resolwe.flow.managers import manager expression_engine = process.requirements.get('expression-engine', None) if not expression_engine: return template_string return manager.get_expression_engine(expression_engine).evaluate_block(template_string, context)
Render template using the specified expression engine.
def instance(cls, *args, **kwgs): if not hasattr(cls, "_instance"): cls._instance = cls(*args, **kwgs) return cls._instance
Will be the only instance
def findNestedNamespaces(self, lst): if self.kind == "namespace": lst.append(self) for c in self.children: c.findNestedNamespaces(lst)
Recursive helper function for finding nested namespaces. If this node is a namespace node, it is appended to ``lst``. Each node also calls each of its child ``findNestedNamespaces`` with the same list. :Parameters: ``lst`` (list) The list each namespace node is to be appended to.
def haversine(point1, point2, unit='km'): AVG_EARTH_RADIUS_KM = 6371.0088 conversions = {'km': 1, 'm': 1000, 'mi': 0.621371192, 'nmi': 0.539956803, 'ft': 3280.839895013, 'in': 39370.078740158} avg_earth_radius = AVG_EARTH_RADIUS_KM * conversions[unit] lat1, lng1 = point1 lat2, lng2 = point2 lat1, lng1, lat2, lng2 = map(radians, (lat1, lng1, lat2, lng2)) lat = lat2 - lat1 lng = lng2 - lng1 d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2 return 2 * avg_earth_radius * asin(sqrt(d))
Calculate the great-circle distance between two points on the Earth surface. :input: two 2-tuples, containing the latitude and longitude of each point in decimal degrees. Keyword arguments: unit -- a string containing the initials of a unit of measurement (i.e. miles = mi) default 'km' (kilometers). Example: haversine((45.7597, 4.8422), (48.8567, 2.3508)) :output: Returns the distance between the two points. The default returned unit is kilometers. The default unit can be changed by setting the unit parameter to a string containing the initials of the desired unit. Other available units are miles (mi), nautic miles (nmi), meters (m), feets (ft) and inches (in).
def get_urlhash(self, url, fmt): with self.open(os.path.basename(url)) as f: return {'url': fmt(url), 'sha256': filehash(f, 'sha256')}
Returns the hash of the file of an internal url
def trigger_info(self, trigger=None, dump=False): if dump: return self._syntax response = None for category in self._syntax: for topic in self._syntax[category]: if trigger in self._syntax[category][topic]: if response is None: response = list() fname, lineno = self._syntax[category][topic][trigger]['trigger'] response.append(dict( category=category, topic=topic, trigger=trigger, filename=fname, line=lineno, )) return response
Get information about a trigger. Pass in a raw trigger to find out what file name and line number it appeared at. This is useful for e.g. tracking down the location of the trigger last matched by the user via ``last_match()``. Returns a list of matching triggers, containing their topics, filenames and line numbers. Returns ``None`` if there weren't any matches found. The keys in the trigger info is as follows: * ``category``: Either 'topic' (for normal) or 'thats' (for %Previous triggers) * ``topic``: The topic name * ``trigger``: The raw trigger text * ``filename``: The filename the trigger was found in. * ``lineno``: The line number the trigger was found on. Pass in a true value for ``dump``, and the entire syntax tracking tree is returned. :param str trigger: The raw trigger text to look up. :param bool dump: Whether to dump the entire syntax tracking tree. :return: A list of matching triggers or ``None`` if no matches.
def get_locator(self, dmin, dmax): 'Pick the best locator based on a distance.' _check_implicitly_registered() delta = relativedelta(dmax, dmin) num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds tot_sec = num_days * 86400. + num_sec if abs(tot_sec) < self.minticks: self._freq = -1 locator = MilliSecondLocator(self.tz) locator.set_axis(self.axis) locator.set_view_interval(*self.axis.get_view_interval()) locator.set_data_interval(*self.axis.get_data_interval()) return locator return dates.AutoDateLocator.get_locator(self, dmin, dmax)
Pick the best locator based on a distance.
def endings(self): if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(ENDING)
The list of word endings. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
def update_is_start(self): self.is_start = self.state.is_root_state or \ self.parent is None or \ isinstance(self.parent.state, LibraryState) or \ self.state.state_id == self.state.parent.start_state_id
Updates the `is_start` property of the state A state is a start state, if it is the root state, it has no parent, the parent is a LibraryState or the state's state_id is identical with the ContainerState.start_state_id of the ContainerState it is within.
def probably_wkt(text): valid = False valid_types = set([ 'POINT', 'LINESTRING', 'POLYGON', 'MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION', ]) matched = re.match(r'(\w+)\s*\([^)]+\)', text.strip()) if matched: valid = matched.group(1).upper() in valid_types return valid
Quick check to determine if the provided text looks like WKT
def show_config(): ret = {} cmd = 'cpan -J' out = __salt__['cmd.run'](cmd).splitlines() for line in out: if '=>' not in line: continue comps = line.split('=>') key = comps[0].replace("'", '').strip() val = comps[1].replace("',", '').replace("'", '').strip() ret[key] = val return ret
Return a dict of CPAN configuration values CLI Example: .. code-block:: bash salt '*' cpan.show_config
def load_or_create_client_key(pem_path): acme_key_file = pem_path.asTextMode().child(u'client.key') if acme_key_file.exists(): key = serialization.load_pem_private_key( acme_key_file.getContent(), password=None, backend=default_backend()) else: key = generate_private_key(u'rsa') acme_key_file.setContent( key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())) return JWKRSA(key=key)
Load the client key from a directory, creating it if it does not exist. .. note:: The client key that will be created will be a 2048-bit RSA key. :type pem_path: ``twisted.python.filepath.FilePath`` :param pem_path: The certificate directory to use, as with the endpoint.
def get_data(): data, targets = make_classification( n_samples=1000, n_features=45, n_informative=12, n_redundant=7, random_state=134985745, ) return data, targets
Synthetic binary classification dataset.
def _make_xml(self, endpoints): root = ElementTree.Element(TAG_ENDPOINT_DESCRIPTIONS) for endpoint in endpoints: self._make_endpoint(root, endpoint) self._indent(root) return root
Converts the given endpoint description beans into an XML Element :param endpoints: A list of EndpointDescription beans :return: A string containing an XML document
def is_free(self): raise NotImplementedError("%s not implemented for %s" % (self.is_free.__func__.__name__, self.__class__.__name__))
Returns a concrete determination as to whether the chunk is free.
def run_fn(self, name, *args, **kwds): fn = None to_check = [picardrun] for ns in to_check: try: fn = getattr(ns, name) break except AttributeError: pass assert fn is not None, "Could not find function %s in %s" % (name, to_check) return fn(self, *args, **kwds)
Run pre-built functionality that used Broad tools by name. See the gatkrun, picardrun module for available functions.
def legacy_requests_view(request, rtype): if not rtype in ['food', 'maintenance']: raise Http404 requests_dict = [] requests = TeacherRequest.objects.filter(request_type=rtype) request_count = requests.count() paginator = Paginator(requests, 50) page = request.GET.get('page') try: requests = paginator.page(page) except PageNotAnInteger: requests = paginator.page(1) except EmptyPage: requests = paginator.page(paginator.num_pages) for req in requests: requests_dict.append( (req, TeacherResponse.objects.filter(request=req),) ) return render_to_response( 'teacher_requests.html', {'page_name': "Legacy {rtype} Requests".format(rtype=rtype.title()), 'requests_dict': requests_dict, 'requests': requests, 'request_type': rtype.title(), 'request_count': request_count,}, context_instance=RequestContext(request) )
View to see legacy requests of rtype request type, which should be either 'food' or 'maintenance'.
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None): sess.run(tf.group( tf.local_variables_initializer(), tf.global_variables_initializer())) if resume and not (logdir or checkpoint): raise ValueError('Need to specify logdir to resume a checkpoint.') if logdir: state = tf.train.get_checkpoint_state(logdir) if checkpoint: checkpoint = os.path.join(logdir, checkpoint) if not checkpoint and state and state.model_checkpoint_path: checkpoint = state.model_checkpoint_path if checkpoint and resume is False: message = 'Found unexpected checkpoint when starting a new run.' raise RuntimeError(message) if checkpoint: saver.restore(sess, checkpoint)
Initialize or restore variables from a checkpoint if available. Args: sess: Session to initialize variables in. saver: Saver to restore variables. logdir: Directory to search for checkpoints. checkpoint: Specify what checkpoint name to use; defaults to most recent. resume: Whether to expect recovering a checkpoint or starting a new run. Raises: ValueError: If resume expected but no log directory specified. RuntimeError: If no resume expected but a checkpoint was found.
def upload_cart(cart, collection): cart_cols = cart_db() cart_json = read_json_document(cart.cart_file()) try: cart_id = cart_cols[collection].save(cart_json) except MongoErrors.AutoReconnect: raise JuicerConfigError("Error saving cart to `cart_host`. Ensure that this node is the master.") return cart_id
Connect to mongo and store your cart in the specified collection.
def underlying_variable_ref(t): while t.op.type in ["Identity", "ReadVariableOp", "Enter"]: t = t.op.inputs[0] op_type = t.op.type if "Variable" in op_type or "VarHandle" in op_type: return t else: return None
Find the underlying variable ref. Traverses through Identity, ReadVariableOp, and Enter ops. Stops when op type has Variable or VarHandle in name. Args: t: a Tensor Returns: a Tensor that is a variable ref, or None on error.
def AddKeyByPath(self, key_path, registry_key): if not key_path.startswith(definitions.KEY_PATH_SEPARATOR): raise ValueError('Key path does not start with: {0:s}'.format( definitions.KEY_PATH_SEPARATOR)) if not self._root_key: self._root_key = FakeWinRegistryKey(self._key_path_prefix) path_segments = key_paths.SplitKeyPath(key_path) parent_key = self._root_key for path_segment in path_segments: try: subkey = FakeWinRegistryKey(path_segment) parent_key.AddSubkey(subkey) except KeyError: subkey = parent_key.GetSubkeyByName(path_segment) parent_key = subkey parent_key.AddSubkey(registry_key)
Adds a Windows Registry key for a specific key path. Args: key_path (str): Windows Registry key path to add the key. registry_key (WinRegistryKey): Windows Registry key. Raises: KeyError: if the subkey already exists. ValueError: if the Windows Registry key cannot be added.
def stack_plot(self, *args, **kwargs): df = self.as_pandas(with_metadata=True) ax = plotting.stack_plot(df, *args, **kwargs) return ax
Plot timeseries stacks of existing data see pyam.plotting.stack_plot() for all available options
def AUC_analysis(AUC): try: if AUC == "None": return "None" if AUC < 0.6: return "Poor" if AUC >= 0.6 and AUC < 0.7: return "Fair" if AUC >= 0.7 and AUC < 0.8: return "Good" if AUC >= 0.8 and AUC < 0.9: return "Very Good" return "Excellent" except Exception: return "None"
Analysis AUC with interpretation table. :param AUC: area under the ROC curve :type AUC : float :return: interpretation result as str
def normpath(path): scheme, netloc, path_ = parse(path) return unparse(scheme, netloc, os.path.normpath(path_))
Normalize ``path``, collapsing redundant separators and up-level refs.
def get_program_args(self): ret = [] for arg in self.get_options().program_args: ret.extend(safe_shlex_split(arg)) return ret
Get the program args to run this JVM with. These are the arguments passed to main() and are program-specific.
def _iter_bitmasks(eid): bitmask = idaapi.get_first_bmask(eid) yield bitmask while bitmask != DEFMASK: bitmask = idaapi.get_next_bmask(eid, bitmask) yield bitmask
Iterate all bitmasks in a given enum. Note that while `DEFMASK` indicates no-more-bitmasks, it is also a valid bitmask value. The only way to tell if it exists is when iterating the serials.
def verify_module(self, filename, module, verify_signature): with open(filename, 'rb') as f: module_data = f.read() self.verify_checksum(module_data, module['checksum'], module['location']) if self.gpg_verify: signature_url = "{0}/{1}".format(self.url, module['signature']) file_url = "{0}/{1}".format(self.url, module['location']) self.verify_file_signature(signature_url, file_url, filename)
Verify kernel module checksum and signature :type filename: str :param filename: downloaded kernel module path :type module: dict :param module: kernel module metadata :type verify_signature: bool :param verify_signature: enable/disable signature verification
def get_highlights(self, user: Union[int, Profile]) -> Iterator[Highlight]: userid = user if isinstance(user, int) else user.userid data = self.context.graphql_query("7c16654f22c819fb63d1183034a5162f", {"user_id": userid, "include_chaining": False, "include_reel": False, "include_suggested_users": False, "include_logged_out_extras": False, "include_highlight_reels": True})["data"]["user"]['edge_highlight_reels'] if data is None: raise BadResponseException('Bad highlights reel JSON.') yield from (Highlight(self.context, edge['node'], user if isinstance(user, Profile) else None) for edge in data['edges'])
Get all highlights from a user. To use this, one needs to be logged in. .. versionadded:: 4.1 :param user: ID or Profile of the user whose highlights should get fetched.
def __get_indexer(in_fns, selected_type=None): indexer = None if selected_type is not None: indexer = get_indexer_by_filetype(selected_type) else: if len(in_fns) == 0: raise IndexError("reading from stdin, unable to guess input file " + "type, use -t option to set manually.\n") else: extension = set([os.path.splitext(f)[1] for f in in_fns]) assert(len(extension) >= 1) if len(extension) > 1: raise IndexError("more than one file extension present, unable " + "to get input type, use -t option to set manually.\n") else: indexer = get_indexer_by_file_extension(list(extension)[0]) assert(indexer is not None) return indexer
Determine which indexer to use based on input files and type option.
def forum_topic_list(self, title_matches=None, title=None, category_id=None): params = { 'search[title_matches]': title_matches, 'search[title]': title, 'search[category_id]': category_id } return self._get('forum_topics.json', params)
Function to get forum topics. Parameters: title_matches (str): Search body for the given terms. title (str): Exact title match. category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively).
def parse_time_step(time_step, target='s', units_ref=None): log.info("Parsing time step %s", time_step) value = re.findall(r'\d+', time_step)[0] valuelen = len(value) try: value = float(value) except: HydraPluginError("Unable to extract number of time steps (%s) from time step %s" % (value, time_step)) unit = time_step[valuelen:].strip() period = get_time_period(unit) log.info("Time period is %s", period) converted_time_step = units_ref.convert(value, period, target) log.info("Time period is %s %s", converted_time_step, period) return float(converted_time_step), value, period
Read in the time step and convert it to seconds.
def is_file_url(url): from .misc import to_text if not url: return False if not isinstance(url, six.string_types): try: url = getattr(url, "url") except AttributeError: raise ValueError("Cannot parse url from unknown type: {0!r}".format(url)) url = to_text(url, encoding="utf-8") return urllib_parse.urlparse(url.lower()).scheme == "file"
Returns true if the given url is a file url
def setup_logging(verbosity, formats=None): if formats is None: formats = {} log_level = logging.INFO log_format = formats.get("info", INFO_FORMAT) if sys.stdout.isatty(): log_format = formats.get("color", COLOR_FORMAT) if verbosity > 0: log_level = logging.DEBUG log_format = formats.get("debug", DEBUG_FORMAT) if verbosity < 2: logging.getLogger("botocore").setLevel(logging.CRITICAL) hdlr = logging.StreamHandler() hdlr.setFormatter(ColorFormatter(log_format, ISO_8601)) logging.root.addHandler(hdlr) logging.root.setLevel(log_level)
Configure a proper logger based on verbosity and optional log formats. Args: verbosity (int): 0, 1, 2 formats (dict): Optional, looks for `info`, `color`, and `debug` keys which may override the associated default log formats.
def run_step(context): logger.debug("started") format_expression = context.get('nowUtcIn', None) if format_expression: formatted_expression = context.get_formatted_string(format_expression) context['nowUtc'] = datetime.now( timezone.utc).strftime(formatted_expression) else: context['nowUtc'] = datetime.now(timezone.utc).isoformat() logger.info(f"timestamp {context['nowUtc']} saved to context nowUtc") logger.debug("done")
pypyr step saves current utc datetime to context. Args: context: pypyr.context.Context. Mandatory. The following context key is optional: - nowUtcIn. str. Datetime formatting expression. For full list of possible expressions, check here: https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior All inputs support pypyr formatting expressions. This step creates now in context, containing a string representation of the timestamp. If input formatting not specified, defaults to ISO8601. Default is: YYYY-MM-DDTHH:MM:SS.ffffff+00:00, or, if microsecond is 0, YYYY-MM-DDTHH:MM:SS Returns: None. updates context arg.
def encrypt_file(self, path, output_path=None, overwrite=False, enable_verbose=True): path, output_path = files.process_dst_overwrite_args( src=path, dst=output_path, overwrite=overwrite, src_to_dst_func=files.get_encrpyted_path, ) with open(path, "rb") as infile, open(output_path, "wb") as outfile: encrypt_bigfile(infile, outfile, self.his_pubkey)
Encrypt a file using rsa. RSA for big file encryption is very slow. For big file, I recommend to use symmetric encryption and use RSA to encrypt the password.
def get_all_preordered_namespace_hashes( self ): cur = self.db.cursor() namespace_hashes = namedb_get_all_preordered_namespace_hashes( cur, self.lastblock ) return namespace_hashes
Get all oustanding namespace preorder hashes that have not expired. Used for testing
def set_asset_dir(self): log = logging.getLogger(self.cls_logger + '.get_asset_dir') try: self.asset_dir = os.environ['ASSET_DIR'] except KeyError: log.warn('Environment variable ASSET_DIR is not set!') else: log.info('Found environment variable ASSET_DIR: {a}'.format(a=self.asset_dir))
Returns the ASSET_DIR environment variable This method gets the ASSET_DIR environment variable for the current asset install. It returns either the string value if set or None if it is not set. :return: None
def remove_column(conn, table, column_name, schema=None): activity_table = get_activity_table(schema=schema) remove = sa.cast(column_name, sa.Text) query = ( activity_table .update() .values( old_data=activity_table.c.old_data - remove, changed_data=activity_table.c.changed_data - remove, ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
Removes given `activity` jsonb data column key. This function is useful when you are doing schema changes that require removing a column. Let's say you've been using PostgreSQL-Audit for a while for a table called article. Now you want to remove one audited column called 'created_at' from this table. :: from alembic import op from postgresql_audit import remove_column def upgrade(): op.remove_column('article', 'created_at') remove_column(op, 'article', 'created_at') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to remove the column from :param column_name: Name of the column to remove :param schema: Optional name of schema to use.
def export(self, timestamp=None): if self._timestamp is None: raise Exception("No timestamp set. Has the archive been initialized?") if self.skip_notebook_export: super(NotebookArchive, self).export(timestamp=self._timestamp, info={'notebook':self.notebook_name}) return self.export_success = None name = self.get_namespace() capture_cmd = ((r"var capture = '%s._notebook_data=r\"\"\"'" % name) + r"+json_string+'\"\"\"'; ") cmd = (r'var kernel = IPython.notebook.kernel; ' + r'var json_data = IPython.notebook.toJSON(); ' + r'var json_string = JSON.stringify(json_data); ' + capture_cmd + "var pycmd = capture + ';%s._export_with_html()'; " % name + r"kernel.execute(pycmd)") tstamp = time.strftime(self.timestamp_format, self._timestamp) export_name = self._format(self.export_name, {'timestamp':tstamp, 'notebook':self.notebook_name}) print(('Export name: %r\nDirectory %r' % (export_name, os.path.join(os.path.abspath(self.root)))) + '\n\nIf no output appears, please check holoviews.archive.last_export_status()') display(Javascript(cmd))
Get the current notebook data and export.
def get_scheme_dirs(): scheme_glob = rel_to_cwd('schemes', '**', '*.yaml') scheme_groups = glob(scheme_glob) scheme_groups = [get_parent_dir(path) for path in scheme_groups] return set(scheme_groups)
Return a set of all scheme directories.
def walk(cls, top=".", ext=".abo"): paths = [] for root, dirs, files in os.walk(top): for f in files: if f.endswith(ext): paths.append(os.path.join(root, f)) parser = cls() okfiles = parser.parse(paths) return parser, paths, okfiles
Scan directory tree starting from top, look for files with extension `ext` and parse timing data. Return: (parser, paths, okfiles) where `parser` is the new object, `paths` is the list of files found and `okfiles` is the list of files that have been parsed successfully. (okfiles == paths) if all files have been parsed.
def _as_log_entry(self, name, now): d = { u'http_response_code': self.response_code, u'timestamp': time.mktime(now.timetuple()) } severity = _SEVERITY.INFO if self.response_code >= 400: severity = _SEVERITY.ERROR d[u'error_cause'] = self.error_cause.name if self.request_size > 0: d[u'request_size'] = self.request_size if self.response_size > 0: d[u'response_size'] = self.response_size if self.method: d[u'http_method'] = self.method if self.request_time: d[u'request_latency_in_ms'] = self.request_time.total_seconds() * 1000 for key in self.COPYABLE_LOG_FIELDS: value = getattr(self, key, None) if value: d[key] = value return sc_messages.LogEntry( name=name, timestamp=timestamp.to_rfc3339(now), severity=severity, structPayload=_struct_payload_from(d))
Makes a `LogEntry` from this instance for the given log_name. Args: rules (:class:`ReportingRules`): determines what labels, metrics and logs to include in the report request. now (:class:`datetime.DateTime`): the current time Return: a ``LogEntry`` generated from this instance with the given name and timestamp Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesReportRequest``
def wrap_embedded_keyvalue(self, data): if data is not None: try: data = u'{}'.format(data) except UnicodeEncodeError: pass variables = [] for v in re.finditer(self._vars_keyvalue_embedded, data): variables.append(v.group(0)) for var in set(variables): variable_string = re.search(self._variable_parse, var).group(0) data = data.replace(var, '": "{}"'.format(variable_string)) return data
Wrap keyvalue embedded variable in double quotes. Args: data (string): The data with embedded variables. Returns: (string): Results retrieved from DB
def get_query_with_new_limit(self, new_limit): if not self._limit: return self.sql + ' LIMIT ' + str(new_limit) limit_pos = None tokens = self._parsed[0].tokens for pos, item in enumerate(tokens): if item.ttype in Keyword and item.value.lower() == 'limit': limit_pos = pos break limit = tokens[limit_pos + 2] if limit.ttype == sqlparse.tokens.Literal.Number.Integer: tokens[limit_pos + 2].value = new_limit elif limit.is_group: tokens[limit_pos + 2].value = ( '{}, {}'.format(next(limit.get_identifiers()), new_limit) ) str_res = '' for i in tokens: str_res += str(i.value) return str_res
returns the query with the specified limit
def items(self): if hasattr(self, '_items'): return self.filter_items(self._items) self._items = self.get_items() return self.filter_items(self._items)
access for filtered items
def replace_coord(self, i): da = next(islice(self.data_iterator, i, i+1)) name, coord = self.get_alternative_coord(da, i) other_coords = {key: da.coords[key] for key in set(da.coords).difference(da.dims)} ret = da.rename({da.dims[-1]: name}).assign_coords( **{name: coord}).assign_coords(**other_coords) return ret
Replace the coordinate for the data array at the given position Parameters ---------- i: int The number of the data array in the raw data (if the raw data is not an interactive list, use 0) Returns xarray.DataArray The data array with the replaced coordinate
def _error_result_to_exception(error_result): reason = error_result.get("reason") status_code = _ERROR_REASON_TO_EXCEPTION.get( reason, http_client.INTERNAL_SERVER_ERROR ) return exceptions.from_http_status( status_code, error_result.get("message", ""), errors=[error_result] )
Maps BigQuery error reasons to an exception. The reasons and their matching HTTP status codes are documented on the `troubleshooting errors`_ page. .. _troubleshooting errors: https://cloud.google.com/bigquery\ /troubleshooting-errors :type error_result: Mapping[str, str] :param error_result: The error result from BigQuery. :rtype google.cloud.exceptions.GoogleCloudError: :returns: The mapped exception.
def install(self): super(SystemD, self).install() self.deploy_service_file(self.svc_file_path, self.svc_file_dest) self.deploy_service_file(self.env_file_path, self.env_file_dest) sh.systemctl.enable(self.name) sh.systemctl('daemon-reload')
Install the service on the local machine This is where we deploy the service files to their relevant locations and perform any other required actions to configure the service and make it ready to be `start`ed.
def reset(self): shutil.copy2(self.config_file + ".orig", self.config_file) if filecmp.cmp(self.config_file + ".orig", self.config_file): print("{0}The reset was done{1}".format( self.meta.color["GREEN"], self.meta.color["ENDC"])) else: print("{0}Reset failed{1}".format(self.meta.color["RED"], self.meta.color["ENDC"]))
Reset slpkg.conf file with default values
def xmlrpc_reschedule(self): if not len(self.scheduled_tasks) == 0: self.reschedule = list(self.scheduled_tasks.items()) self.scheduled_tasks = {} return True
Reschedule all running tasks.
def transport_jabsorbrpc(self): self.context.install_bundle( "pelix.remote.transport.jabsorb_rpc" ).start() with use_waiting_list(self.context) as ipopo: ipopo.add( rs.FACTORY_TRANSPORT_JABSORBRPC_EXPORTER, "pelix-jabsorbrpc-exporter", ) ipopo.add( rs.FACTORY_TRANSPORT_JABSORBRPC_IMPORTER, "pelix-jabsorbrpc-importer", )
Installs the JABSORB-RPC transport bundles and instantiates components
def get(self): frac, whole = modf(self.size * self.percent / 100.0) ret = curses_bars[8] * int(whole) if frac > 0: ret += curses_bars[int(frac * 8)] whole += 1 ret += self.__empty_char * int(self.size - whole) if self.__with_text: ret = '{}{:5.1f}%'.format(ret, self.percent) return ret
Return the bars.
def keys(self, name_start, name_end, limit=10): limit = get_positive_integer('limit', limit) return self.execute_command('keys', name_start, name_end, limit)
Return a list of the top ``limit`` keys between ``name_start`` and ``name_end`` Similiar with **Redis.KEYS** .. note:: The range is (``name_start``, ``name_end``]. ``name_start`` isn't in the range, but ``name_end`` is. :param string name_start: The lower bound(not included) of keys to be returned, empty string ``''`` means -inf :param string name_end: The upper bound(included) of keys to be returned, empty string ``''`` means +inf :param int limit: number of elements will be returned. :return: a list of keys :rtype: list >>> ssdb.keys('set_x1', 'set_x3', 10) ['set_x2', 'set_x3'] >>> ssdb.keys('set_x ', 'set_xx', 3) ['set_x1', 'set_x2', 'set_x3'] >>> ssdb.keys('set_x ', '', 3) ['set_x1', 'set_x2', 'set_x3', 'set_x4'] >>> ssdb.keys('set_zzzzz ', '', ) []
def items_to_extract(self, offset=0, length=None): endoffset = length and offset + length qs = self.origin_data()[offset:endoffset] self.items_to_extract_length = qs.count() return qs
Return an iterable of specific items to extract. As a side-effect, set self.items_to_extract_length. :param offset: where to start extracting :param length: how many to extract :return: An iterable of the specific
def createCleanup(self, varBind, **context): name, val = varBind (debug.logger & debug.FLAG_INS and debug.logger('%s: createCleanup(%s, %r)' % (self, name, val))) instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}}) idx = context['idx'] self.branchVersionId += 1 instances[self.ST_CREATE].pop(-idx - 1, None) self._vars[name].writeCleanup(varBind, **context)
Finalize Managed Object Instance creation. Implements the successful third step of the multi-step workflow similar to the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the third (successful) phase is to seal the new Managed Object Instance. Once the system transitions into the *cleanup* state, no roll back to the previous Managed Object Instance state is possible. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to create Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. * `instances` (dict): user-supplied dict for temporarily holding Managed Objects Instances being created. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object.
def move_camera(action, action_space, minimap): minimap.assign_to(spatial(action, action_space).camera_move.center_minimap)
Move the camera.
def check_ttl_max_tries(tries, enqueued_at, max_tries, ttl): if max_tries > 0 and tries >= max_tries: raise FSQMaxTriesError(errno.EINTR, u'Max tries exceded:'\ u' {0} ({1})'.format(max_tries, tries)) if ttl > 0 and datetime.datetime.now() < enqueued_at + datetime.timedelta( seconds=ttl): raise FSQTTLExpiredError(errno.EINTR, u'TTL Expired:'\ u' {0}'.format(ttl))
Check that the ttl for an item has not expired, and that the item has not exceeded it's maximum allotted tries
def _build_toctree_node(parent=None, entries=None, includefiles=None, caption=None): subnode = sphinx.addnodes.toctree() subnode['parent'] = parent subnode['entries'] = entries subnode['includefiles'] = includefiles subnode['caption'] = caption subnode['maxdepth'] = 1 subnode['hidden'] = False subnode['glob'] = None subnode['hidden'] = False subnode['includehidden'] = False subnode['numbered'] = 0 subnode['titlesonly'] = False return subnode
Factory for a toctree node.
def create(graph, kmin=0, kmax=10, verbose=True): from turicreate._cython.cy_server import QuietProgress if not isinstance(graph, _SGraph): raise TypeError('graph input must be a SGraph object.') opts = {'graph': graph.__proxy__, 'kmin': kmin, 'kmax': kmax} with QuietProgress(verbose): params = _tc.extensions._toolkits.graph.kcore.create(opts) return KcoreModel(params['model'])
Compute the K-core decomposition of the graph. Return a model object with total number of cores as well as the core id for each vertex in the graph. Parameters ---------- graph : SGraph The graph on which to compute the k-core decomposition. kmin : int, optional Minimum core id. Vertices having smaller core id than `kmin` will be assigned with core_id = `kmin`. kmax : int, optional Maximum core id. Vertices having larger core id than `kmax` will be assigned with core_id=`kmax`. verbose : bool, optional If True, print progress updates. Returns ------- out : KcoreModel References ---------- - Alvarez-Hamelin, J.I., et al. (2005) `K-Core Decomposition: A Tool for the Visualization of Large Networks <http://arxiv.org/abs/cs/0504107>`_. Examples -------- If given an :class:`~turicreate.SGraph` ``g``, we can create a :class:`~turicreate.kcore.KcoreModel` as follows: >>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap') >>> kc = turicreate.kcore.create(g) We can obtain the ``core id`` corresponding to each vertex in the graph ``g`` using: >>> kcore_id = kc['core_id'] # SFrame We can add the new core id field to the original graph g using: >>> g.vertices['core_id'] = kc['graph'].vertices['core_id'] Note that the task above does not require a join because the vertex ordering is preserved through ``create()``. See Also -------- KcoreModel
def latex_name(self): star = ('*' if self._star_latex_name else '') if self._latex_name is not None: return self._latex_name + star return self.__class__.__name__.lower() + star
Return the name of the class used in LaTeX. It can be `None` when the class doesn't have a name.
def subscribe(self, *, create_task=True, **params): if create_task and not self.task: self.task = asyncio.ensure_future(self.run(**params)) return self.channel.subscribe()
Subscribes to the Docker events channel. Use the keyword argument create_task=False to prevent automatically spawning the background tasks that listen to the events. This function returns a ChannelSubscriber object.
def is_working_day(self, day, extra_working_days=None, extra_holidays=None): day = cleaned_date(day) if extra_working_days: extra_working_days = tuple(map(cleaned_date, extra_working_days)) if extra_holidays: extra_holidays = tuple(map(cleaned_date, extra_holidays)) if extra_working_days and day in extra_working_days: return True if day.weekday() in self.get_weekend_days(): return False return not self.is_holiday(day, extra_holidays=extra_holidays)
Return True if it's a working day. In addition to the regular holidays, you can add exceptions. By providing ``extra_working_days``, you'll state that these dates **are** working days. By providing ``extra_holidays``, you'll state that these dates **are** holidays, even if not in the regular calendar holidays (or weekends). Please note that the ``extra_working_days`` list has priority over the ``extra_holidays`` list.
def import_tf_tensor(self, x, tf_x): return self.LaidOutTensor(self.make_slices(tf_x, x.shape))
Import a tf.Tensor, producing a LaidOutTensor. Args: x: a Tensor tf_x: a tf.Tensor Returns: a LaidOutTensor
def to_string(self, other): arg = "%s/%s,%s" % ( self.ttl, self.get_remaining_ttl(current_time_millis()), other) return DNSEntry.to_string(self, "record", arg)
String representation with addtional information
def _terms(self): res = [] for sign, terms in self.terms.items(): for ID, lon in terms.items(): res.append(self.T(ID, sign)) return res
Returns a list with the objects as terms.
def merge_parts(self, version_id=None, **kwargs): self.file.update_checksum(**kwargs) with db.session.begin_nested(): obj = ObjectVersion.create( self.bucket, self.key, _file_id=self.file_id, version_id=version_id ) self.delete() return obj
Merge parts into object version.
def parse_type_signature(sig): match = TYPE_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Type signature invalid, got ' + sig) groups = match.groups() typ = groups[0] generic_types = groups[1] if not generic_types: generic_types = [] else: generic_types = split_sig(generic_types[1:-1]) is_array = (groups[2] is not None) return typ, generic_types, is_array
Parse a type signature
def vbd_list(name=None, call=None): if call == 'function': raise SaltCloudSystemExit( 'This function must be called with -a, --action argument.' ) if name is None: return 'A name kwarg is rquired' ret = {} data = {} session = _get_session() vms = session.xenapi.VM.get_by_name_label(name) if len(vms) == 1: vm = vms[0] vbds = session.xenapi.VM.get_VBDs(vm) if vbds is not None: x = 0 for vbd in vbds: vbd_record = session.xenapi.VBD.get_record(vbd) data['vbd-{}'.format(x)] = vbd_record x += 1 ret = data return ret
Get a list of VBDs on a VM **requires**: the name of the vm with the vbd definition .. code-block:: bash salt-cloud -a vbd_list xenvm01
def apply_formatting_dict(obj: Any, formatting: Dict[str, Any]) -> Any: new_obj = obj if isinstance(obj, str): if "$" not in obj: new_obj = string.Formatter().vformat(obj, (), formatting_dict(**formatting)) elif isinstance(obj, dict): new_obj = {} for k, v in obj.items(): new_obj[k] = apply_formatting_dict(v, formatting) elif isinstance(obj, list): new_obj = [] for i, el in enumerate(obj): new_obj.append(apply_formatting_dict(el, formatting)) elif isinstance(obj, int) or isinstance(obj, float) or obj is None: pass elif isinstance(obj, enum.Enum): pass else: logger.debug(f"Unrecognized obj '{obj}' of type '{type(obj)}'") return new_obj
Recursively apply a formatting dict to all strings in a configuration. Note that it skips applying the formatting if the string appears to contain latex (specifically, if it contains an "$"), since the formatting fails on nested brackets. Args: obj: Some configuration object to recursively applying the formatting to. formatting (dict): String formatting options to apply to each configuration field. Returns: dict: Configuration with formatting applied to every field.
def add_head(self, head): if not isinstance(head, DependencyNode): raise TypeError('"head" must be a DependencyNode') self._heads.append(head)
Add head Node
def add_method(info, target_cls, virtual=False, dont_replace=False): name = escape_identifier(info.name) if virtual: name = "do_" + name attr = VirtualMethodAttribute(info, target_cls, name) else: attr = MethodAttribute(info, target_cls, name) if dont_replace and hasattr(target_cls, name): return setattr(target_cls, name, attr)
Add a method to the target class
def set_(filename, section, parameter, value): filename = _quote(filename) section = _quote(section) parameter = _quote(parameter) value = _quote(six.text_type(value)) result = __salt__['cmd.run_all']( 'openstack-config --set {0} {1} {2} {3}'.format( filename, section, parameter, value ), python_shell=False, ) if result['retcode'] == 0: return result['stdout'] else: raise salt.exceptions.CommandExecutionError(result['stderr'])
Set a value in an OpenStack configuration file. filename The full path to the configuration file section The section in which the parameter will be set parameter The parameter to change value The value to set CLI Example: .. code-block:: bash salt-call openstack_config.set /etc/keystone/keystone.conf sql connection foo