code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_density(self): strc = self.get_output_structure() density = sum(strc.get_masses()) / strc.get_volume() * 1.660539040 return Property(scalars=[Scalar(value=density)], units="g/(cm^3)")
Compute the density from the output structure
def _get_end_index(self): return max(self.index + self.source_window, self._get_target_index() + self.target_window)
Return the end of both windows.
def config_settings_loader(request=None): conf = SPConfig() conf.load(copy.deepcopy(settings.SAML_CONFIG)) return conf
Utility function to load the pysaml2 configuration. This is also the default config loader.
def Pop(self, key): node = self._hash.get(key) if node: del self._hash[key] self._age.Unlink(node) return node.data
Remove the object from the cache completely.
def get_delegates(self): candidate_elections = CandidateElection.objects.filter(election=self) delegates = None for ce in candidate_elections: delegates = delegates | ce.delegates.all() return delegates
Get all pledged delegates for any candidate in this election.
def loop(self): logger.debug("Node loop starts for {!r}.".format(self)) while self.should_loop: try: self.step() except InactiveReadableError: break logger.debug("Node loop ends for {!r}.".format(self))
The actual infinite loop for this transformation.
def get_existing_keys(self, events): data = [e[self.key] for e in events] ss = ','.join(['%s' for _ in data]) query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss) cursor = self.conn.conn.cursor() cursor.execute(query, data) LOG.info("%s (data: %s)", query, data) existing = [r[0] for r in cursor.fetchall()] LOG.info("Existing IDs: %s" % existing) return set(existing)
Returns the list of keys from the given event source that are already in the DB
def addreadergroup(self, newgroup): hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER) if 0 != hresult: raise error( 'Failed to establish context: ' + \ SCardGetErrorMessage(hresult)) try: hresult = SCardIntroduceReaderGroup(hcontext, newgroup) if 0 != hresult: raise error( 'Unable to introduce reader group: ' + \ SCardGetErrorMessage(hresult)) else: innerreadergroups.addreadergroup(self, newgroup) finally: hresult = SCardReleaseContext(hcontext) if 0 != hresult: raise error( 'Failed to release context: ' + \ SCardGetErrorMessage(hresult))
Add a reader group
def content(self): content = self._get_content() if bool(content and '\0' in content): return content return safe_unicode(content)
Returns lazily content of the FileNode. If possible, would try to decode content from UTF-8.
def prior_from_config(cp, variable_params, prior_section, constraint_section): logging.info("Setting up priors for each parameter") dists = distributions.read_distributions_from_config(cp, prior_section) constraints = distributions.read_constraints_from_config( cp, constraint_section) return distributions.JointDistribution(variable_params, *dists, constraints=constraints)
Gets arguments and keyword arguments from a config file. Parameters ---------- cp : WorkflowConfigParser Config file parser to read. variable_params : list List of of model parameter names. prior_section : str Section to read prior(s) from. constraint_section : str Section to read constraint(s) from. Returns ------- pycbc.distributions.JointDistribution The prior.
def prefetch_docker_image_on_private_agents( image, timeout=timedelta(minutes=5).total_seconds()): agents = len(shakedown.get_private_agents()) app = { "id": "/prefetch", "instances": agents, "container": { "type": "DOCKER", "docker": {"image": image} }, "cpus": 0.1, "mem": 128 } client = marathon.create_client() client.add_app(app) shakedown.deployment_wait(timeout) shakedown.delete_all_apps() shakedown.deployment_wait(timeout)
Given a docker image. An app with the image is scale across the private agents to ensure that the image is prefetched to all nodes. :param image: docker image name :type image: str :param timeout: timeout for deployment wait in secs (default: 5m) :type password: int
def select(self, *column_or_columns): labels = self._varargs_as_labels(column_or_columns) table = type(self)() for label in labels: self._add_column_and_format(table, label, np.copy(self[label])) return table
Return a table with only the columns in ``column_or_columns``. Args: ``column_or_columns``: Columns to select from the ``Table`` as either column labels (``str``) or column indices (``int``). Returns: A new instance of ``Table`` containing only selected columns. The columns of the new ``Table`` are in the order given in ``column_or_columns``. Raises: ``KeyError`` if any of ``column_or_columns`` are not in the table. >>> flowers = Table().with_columns( ... 'Number of petals', make_array(8, 34, 5), ... 'Name', make_array('lotus', 'sunflower', 'rose'), ... 'Weight', make_array(10, 5, 6) ... ) >>> flowers Number of petals | Name | Weight 8 | lotus | 10 34 | sunflower | 5 5 | rose | 6 >>> flowers.select('Number of petals', 'Weight') Number of petals | Weight 8 | 10 34 | 5 5 | 6 >>> flowers # original table unchanged Number of petals | Name | Weight 8 | lotus | 10 34 | sunflower | 5 5 | rose | 6 >>> flowers.select(0, 2) Number of petals | Weight 8 | 10 34 | 5 5 | 6
def serialize(self, value, entity=None, request=None): ret = self.from_python(value) self.validate(ret) self.run_validators(value) return ret
Validate and serialize the value. This is the default implementation
def _async_open(self, session_id, proto_version): try: yield self.application_context.create_session_if_needed(session_id, self.request) session = self.application_context.get_session(session_id) protocol = Protocol(proto_version) self.receiver = Receiver(protocol) log.debug("Receiver created for %r", protocol) self.handler = ProtocolHandler() log.debug("ProtocolHandler created for %r", protocol) self.connection = self.application.new_connection(protocol, self, self.application_context, session) log.info("ServerConnection created") except ProtocolError as e: log.error("Could not create new server session, reason: %s", e) self.close() raise e msg = self.connection.protocol.create('ACK') yield self.send_message(msg) raise gen.Return(None)
Perform the specific steps needed to open a connection to a Bokeh session Specifically, this method coordinates: * Getting a session for a session ID (creating a new one if needed) * Creating a protocol receiver and hander * Opening a new ServerConnection and sending it an ACK Args: session_id (str) : A session ID to for a session to connect to If no session exists with the given ID, a new session is made proto_version (str): The protocol version requested by the connecting client. Returns: None
def isDocumentCollection(cls, name) : try : col = cls.getCollectionClass(name) return issubclass(col, Collection) except KeyError : return False
return true or false wether 'name' is the name of a document collection.
def process_request(self, request, client_address): thread = threading.Thread( name="RemoteShell-{0}-Client-{1}".format( self.server_address[1], client_address[:2] ), target=self.process_request_thread, args=(request, client_address), ) thread.daemon = self.daemon_threads thread.start()
Starts a new thread to process the request, adding the client address in its name.
def format_for_columns(pkgs, options): running_outdated = options.outdated if running_outdated: header = ["Package", "Version", "Latest", "Type"] else: header = ["Package", "Version"] data = [] if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs): header.append("Location") if options.verbose >= 1: header.append("Installer") for proj in pkgs: row = [proj.project_name, proj.version] if running_outdated: row.append(proj.latest_version) row.append(proj.latest_filetype) if options.verbose >= 1 or dist_is_editable(proj): row.append(proj.location) if options.verbose >= 1: row.append(get_installer(proj)) data.append(row) return data, header
Convert the package data into something usable by output_package_listing_columns.
def index(): for k, v in current_index.items(): current_index[k] = 0 logging.info("Dashboard refreshed") return render_template("crystal_dashboard.html")
Renders the dashboard when the server is initially run. Usage description: The rendered HTML allows the user to select a project and the desired run. :return: Template to render, Object that is taken care by flask.
def time_delta(self, end_datetime=None): start_datetime = self._parse_start_datetime('now') end_datetime = self._parse_end_datetime(end_datetime) seconds = end_datetime - start_datetime ts = self.generator.random.randint(*sorted([0, seconds])) return timedelta(seconds=ts)
Get a timedelta object
def remove_router_interface(self, context, router_id, interface_info): router_to_del = ( super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info) ) core = directory.get_plugin() subnet = core.get_subnet(context, router_to_del['subnet_id']) network_id = subnet['network_id'] ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] router = self.get_router(context, router_id) router_info = copy.deepcopy(router_to_del) router_info['seg_id'] = seg_id router_info['name'] = router['name'] try: self.driver.remove_router_interface(context, router_info) return router_to_del except Exception as exc: LOG.error(_LE("Error removing interface %(interface)s from " "router %(router_id)s on Arista HW" "Exception =(exc)s"), {'interface': interface_info, 'router_id': router_id, 'exc': exc})
Remove a subnet of a network from an existing router.
def associate_health_monitor(self, pool, body): return self.post(self.associate_pool_health_monitors_path % (pool), body=body)
Associate specified load balancer health monitor and pool.
def run_send_nologin(*args): for user_rec in MUser.query_nologin(): email_add = user_rec.user_email print(email_add) send_mail([email_add], "{0}|{1}".format(SMTP_CFG['name'], email_cfg['title']), email_cfg['content']) MUser.set_sendemail_time(user_rec.uid)
Send email to who not logged in recently.
def add(self, item, safe=None): item._set_session(self) if safe is None: safe = self.safe self.queue.append(SaveOp(self.transaction_id, self, item, safe)) self.cache_write(item) if self.autoflush: return self.flush()
Add an item into the queue of things to be inserted. Does not flush.
def flatMap(self, f, preservesPartitioning=False): return self.mapPartitions( lambda p: (e for pp in p for e in f(pp)), preservesPartitioning, )
Apply function f and flatten. :param f: mapping function :rtype: DStream
def clear(self): self.pos = [[0 for _ in range(self.board_size)] for _ in range(self.board_size)] self.graph = copy.deepcopy(self.pos) self._game_round = 1
Clear a chessboard
def do_mute(self, sender, body, args): if sender.get('MUTED'): self.send_message('you are already muted', sender) else: self.broadcast('%s has muted this chatroom' % (sender['NICK'],)) sender['QUEUED_MESSAGES'] = [] sender['MUTED'] = True
Temporarily mutes chatroom for a user
def requirement_spec(package_name, *args): if not args or args == (None,): return package_name version_specs = [] for version_spec in args: if isinstance(version_spec, (list, tuple)): operator, version = version_spec else: assert isinstance(version_spec, str) operator = "==" version = version_spec version_specs.append("%s%s" % (operator, version)) return "%s%s" % (package_name, ",".join(version_specs))
Identifier used when specifying a requirement to pip or setuptools.
def humanize_timedelta(td): secs = int(td.total_seconds()) hours, secs = divmod(secs, 60 * 60) mins, secs = divmod(secs, 60) if hours: return '%dh %dm' % (hours, mins) if mins: return '%dm' % mins return '%ds' % secs
Pretty-print a timedelta in a human readable format.
def add_group_members(self, members): if not isinstance(members, list): members = [members] if not getattr(self, 'group_members', None): self.group_members = members else: self.group_members.extend(members)
Add a new group member to the groups list :param members: member name :type members: str :return: None
def _insert_uncompressed(collection_name, docs, check_keys, safe, last_error_args, continue_on_error, opts): op_insert, max_bson_size = _insert( collection_name, docs, check_keys, continue_on_error, opts) rid, msg = __pack_message(2002, op_insert) if safe: rid, gle, _ = __last_error(collection_name, last_error_args) return rid, msg + gle, max_bson_size return rid, msg, max_bson_size
Internal insert message helper.
def RGB_to_HSV(cobj, *args, **kwargs): var_R = cobj.rgb_r var_G = cobj.rgb_g var_B = cobj.rgb_b var_max = max(var_R, var_G, var_B) var_min = min(var_R, var_G, var_B) var_H = __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max) if var_max == 0: var_S = 0 else: var_S = 1.0 - (var_min / var_max) var_V = var_max return HSVColor( var_H, var_S, var_V)
Converts from RGB to HSV. H values are in degrees and are 0 to 360. S values are a percentage, 0.0 to 1.0. V values are a percentage, 0.0 to 1.0.
def create_subgroup_global(self, id, title, description=None, vendor_guid=None): path = {} data = {} params = {} path["id"] = id data["title"] = title if description is not None: data["description"] = description if vendor_guid is not None: data["vendor_guid"] = vendor_guid self.logger.debug("POST /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, single_item=True)
Create a subgroup. Creates a new empty subgroup under the outcome group with the given title and description.
def rfft2d_freqs(h, w): fy = np.fft.fftfreq(h)[:, None] if w % 2 == 1: fx = np.fft.fftfreq(w)[: w // 2 + 2] else: fx = np.fft.fftfreq(w)[: w // 2 + 1] return np.sqrt(fx * fx + fy * fy)
Computes 2D spectrum frequencies.
def purge_archives(self): klass = self.get_version_class() qs = klass.normal.filter(object_id=self.object_id, state=self.ARCHIVED).order_by('-last_save')[self.NUM_KEEP_ARCHIVED:] for obj in qs: obj._delete_reverses() klass.normal.filter(vid=obj.vid).delete()
Delete older archived items. Use the class attribute NUM_KEEP_ARCHIVED to control how many items are kept.
def length_limits(max_length_limit, length_limit_step): string_len = len(str(max_length_limit)) return [ str(i).zfill(string_len) for i in xrange( length_limit_step, max_length_limit + length_limit_step - 1, length_limit_step ) ]
Generates the length limits
def schema(self): if self.status == "DELETING": return "" parts = ["GLOBAL", self.index_type, "INDEX"] parts.append("('%s', %s," % (self.name, self.hash_key.name)) if self.range_key: parts.append("%s," % self.range_key.name) if self.includes: parts.append("[%s]," % ", ".join(("'%s'" % i for i in self.includes))) parts.append( "THROUGHPUT (%d, %d))" % (self.read_throughput, self.write_throughput) ) return " ".join(parts)
The DQL fragment for constructing this index
def cmd_dhcp_discover(iface, timeout, verbose): conf.verb = False if iface: conf.iface = iface conf.checkIPaddr = False hw = get_if_raw_hwaddr(conf.iface) ether = Ether(dst="ff:ff:ff:ff:ff:ff") ip = IP(src="0.0.0.0",dst="255.255.255.255") udp = UDP(sport=68,dport=67) bootp = BOOTP(chaddr=hw) dhcp = DHCP(options=[("message-type","discover"),"end"]) dhcp_discover = ether / ip / udp / bootp / dhcp ans, unans = srp(dhcp_discover, multi=True, timeout=5) for _, pkt in ans: if verbose: print(pkt.show()) else: print(pkt.summary())
Send a DHCP request and show what devices has replied. Note: Using '-v' you can see all the options (like DNS servers) included on the responses. \b # habu.dhcp_discover Ether / IP / UDP 192.168.0.1:bootps > 192.168.0.5:bootpc / BOOTP / DHCP
def latent_prediction_model(inputs, ed_attention_bias, latents_discrete, latents_dense, hparams, vocab_size=None, name=None): with tf.variable_scope(name, default_name="latent_prediction"): if hparams.mode != tf.estimator.ModeKeys.PREDICT: latents_pred = transformer_latent_decoder(tf.stop_gradient(latents_dense), inputs, ed_attention_bias, hparams, name) if vocab_size is None: vocab_size = 2**hparams.bottleneck_bits if not hparams.soft_em: latents_discrete = tf.one_hot(latents_discrete, depth=vocab_size) _, latent_pred_loss = ae_latent_softmax( latents_pred, tf.stop_gradient(latents_discrete), vocab_size, hparams) return latents_pred, latent_pred_loss
Transformer-based latent prediction model. It is an autoregressive decoder over latents_discrete given inputs. Args: inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Inputs to attend to for the decoder on latents. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. latents_discrete: Tensor of shape [batch, length_q, vocab_size]. One-hot latents to compute log-probability of given inputs. latents_dense: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). hparams: HParams. vocab_size: int or None. If None, it is 2**hparams.bottleneck_bits. name: string, variable scope. Returns: latents_pred: Tensor of shape [batch, length_q, hparams.hidden_size]. latents_pred_loss: Tensor of shape [batch, length_q].
def context_id(ctx: Context_T, *, mode: str = 'default', use_hash: bool = False) -> str: ctx_id = '' if mode == 'default': if ctx.get('group_id'): ctx_id = f'/group/{ctx["group_id"]}' elif ctx.get('discuss_id'): ctx_id = f'/discuss/{ctx["discuss_id"]}' if ctx.get('user_id'): ctx_id += f'/user/{ctx["user_id"]}' elif mode == 'group': if ctx.get('group_id'): ctx_id = f'/group/{ctx["group_id"]}' elif ctx.get('discuss_id'): ctx_id = f'/discuss/{ctx["discuss_id"]}' elif ctx.get('user_id'): ctx_id = f'/user/{ctx["user_id"]}' elif mode == 'user': if ctx.get('user_id'): ctx_id = f'/user/{ctx["user_id"]}' if ctx_id and use_hash: ctx_id = hashlib.md5(ctx_id.encode('ascii')).hexdigest() return ctx_id
Calculate a unique id representing the current context. mode: default: one id for one context group: one id for one group or discuss user: one id for one user :param ctx: the context dict :param mode: unique id mode: "default", "group", or "user" :param use_hash: use md5 to hash the id or not
def get_names(self): return (['do_' + x for x in self.commands['shell']] + ['do_' + x for x in self.commands['forth']])
Get names for autocompletion.
def _use_last_dir_name(self, path, prefix=''): matching_dirs = ( dir_name for dir_name in reversed(os.listdir(path)) if os.path.isdir(os.path.join(path, dir_name)) and dir_name.startswith(prefix) ) return next(matching_dirs, None) or ''
Return name of the last dir in path or '' if no dir found. Parameters ---------- path: str Use dirs in this path prefix: str Use only dirs startings by this prefix
def check_element_by_selector(self, selector): elems = find_elements_by_jquery(world.browser, selector) if not elems: raise AssertionError("Expected matching elements, none found.")
Assert an element exists matching the given selector.
def _write_critic_model_stats(self, iteration:int)->None: "Writes gradient statistics for critic to Tensorboard." critic = self.learn.gan_trainer.critic self.stats_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='crit_model_stats') self.crit_stats_updated = True
Writes gradient statistics for critic to Tensorboard.
def to_packets(pages, strict=False): serial = pages[0].serial sequence = pages[0].sequence packets = [] if strict: if pages[0].continued: raise ValueError("first packet is continued") if not pages[-1].complete: raise ValueError("last packet does not complete") elif pages and pages[0].continued: packets.append([b""]) for page in pages: if serial != page.serial: raise ValueError("invalid serial number in %r" % page) elif sequence != page.sequence: raise ValueError("bad sequence number in %r" % page) else: sequence += 1 if page.continued: packets[-1].append(page.packets[0]) else: packets.append([page.packets[0]]) packets.extend([p] for p in page.packets[1:]) return [b"".join(p) for p in packets]
Construct a list of packet data from a list of Ogg pages. If strict is true, the first page must start a new packet, and the last page must end the last packet.
def registerJavaFunction(self, name, javaClassName, returnType=None): jdt = None if returnType is not None: if not isinstance(returnType, DataType): returnType = _parse_datatype_string(returnType) jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json()) self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)]
async def on_capability_sasl_enabled(self): if self.sasl_mechanism: if self._sasl_mechanisms and self.sasl_mechanism not in self._sasl_mechanisms: self.logger.warning('Requested SASL mechanism is not in server mechanism list: aborting SASL authentication.') return cap.failed mechanisms = [self.sasl_mechanism] else: mechanisms = self._sasl_mechanisms or ['PLAIN'] if mechanisms == ['EXTERNAL']: mechanism = 'EXTERNAL' else: self._sasl_client = puresasl.client.SASLClient(self.connection.hostname, 'irc', username=self.sasl_username, password=self.sasl_password, identity=self.sasl_identity ) try: self._sasl_client.choose_mechanism(mechanisms, allow_anonymous=False) except puresasl.SASLError: self.logger.exception('SASL mechanism choice failed: aborting SASL authentication.') return cap.FAILED mechanism = self._sasl_client.mechanism.upper() await self._sasl_start(mechanism) return cap.NEGOTIATING
Start SASL authentication.
def enclosing_frame(frame=None, level=2): frame = frame or sys._getframe(level) while frame.f_globals.get('__name__') == __name__: frame = frame.f_back return frame
Get an enclosing frame that skips decorator code
def extend(self): graph = GraphAPI() response = graph.get('oauth/access_token', client_id = FACEBOOK_APPLICATION_ID, client_secret = FACEBOOK_APPLICATION_SECRET_KEY, grant_type = 'fb_exchange_token', fb_exchange_token = self.token ) components = parse_qs(response) self.token = components['access_token'][0] self.expires_at = now() + timedelta(seconds = int(components['expires'][0])) self.save()
Extend the OAuth token.
def external_system_identifiers(endpoint): @utils.flatten @utils.for_each_value def _external_system_identifiers(self, key, value): new_recid = maybe_int(value.get('d')) if new_recid: self['new_record'] = get_record_ref(new_recid, endpoint) return [ { 'schema': 'SPIRES', 'value': ext_sys_id, } for ext_sys_id in force_list(value.get('a')) ] return _external_system_identifiers
Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects.
def convert_sbml_model(model): biomass_reactions = set() for reaction in model.reactions: if reaction.id not in model.limits: lower, upper = parse_flux_bounds(reaction) if lower is not None or upper is not None: model.limits[reaction.id] = reaction.id, lower, upper objective = parse_objective_coefficient(reaction) if objective is not None and objective != 0: biomass_reactions.add(reaction.id) if len(biomass_reactions) == 1: model.biomass_reaction = next(iter(biomass_reactions)) convert_model_entries(model) if model.extracellular_compartment is None: extracellular = detect_extracellular_compartment(model) model.extracellular_compartment = extracellular convert_exchange_to_compounds(model)
Convert raw SBML model to extended model. Args: model: :class:`NativeModel` obtained from :class:`SBMLReader`.
def add_resources(self, resources): new_resources = self._build_resource_dictionary(resources) for key in new_resources: self._resources[key] = new_resources[key] self._dirty_attributes.add(u'resources')
Adds new resources to the event. *resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects.
def rescue(f, on_success, on_error=reraise, on_complete=nop): def _rescue(*args, **kwargs): try: return on_success(f(*args, **kwargs)) except Exception as e: return on_error(e) finally: on_complete() return _rescue
Functional try-except-finally :param function f: guarded function :param function on_succes: called when f is executed without error :param function on_error: called with `error` parameter when f failed :param function on_complete: called as finally block :returns function: call signature is equal f signature
def call_next(self): if self.handle is not None: self.handle.cancel() next_time = self.get_next() self.handle = self.loop.call_at(next_time, self.call_next) self.call_func()
Set next hop in the loop. Call task
def initialize(self, name, reuse=False): user = self._kwargs['user'] password = self._kwargs['password'] host = self._kwargs['host'] port = self._kwargs['port'] if '-' in name: self.error("dabase name '%s' cannot contain '-' characters" % name) return CODE_VALUE_ERROR try: Database.create(user, password, name, host, port) db = Database(user, password, name, host, port) self.__load_countries(db) except DatabaseExists as e: if not reuse: self.error(str(e)) return CODE_DATABASE_EXISTS except DatabaseError as e: self.error(str(e)) return CODE_DATABASE_ERROR except LoadError as e: Database.drop(user, password, name, host, port) self.error(str(e)) return CODE_LOAD_ERROR return CMD_SUCCESS
Create an empty Sorting Hat registry. This method creates a new database including the schema of Sorting Hat. Any attempt to create a new registry over an existing instance will produce an error, except if reuse=True. In that case, the database will be reused, assuming the database schema is correct (it won't be created in this case). :param name: name of the database :param reuse: reuse database if it already exists
def get_nested_blocks_spec(self): return [ block_spec if isinstance(block_spec, NestedXBlockSpec) else NestedXBlockSpec(block_spec) for block_spec in self.allowed_nested_blocks ]
Converts allowed_nested_blocks items to NestedXBlockSpec to provide common interface
def batch_delete_entities(self, parent, entity_values, language_code=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): if 'batch_delete_entities' not in self._inner_api_calls: self._inner_api_calls[ 'batch_delete_entities'] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_delete_entities, default_retry=self._method_configs[ 'BatchDeleteEntities'].retry, default_timeout=self._method_configs['BatchDeleteEntities'] .timeout, client_info=self._client_info, ) request = entity_type_pb2.BatchDeleteEntitiesRequest( parent=parent, entity_values=entity_values, language_code=language_code, ) operation = self._inner_api_calls['batch_delete_entities']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=struct_pb2.Struct, )
Deletes entities in the specified entity type. Operation <response: ``google.protobuf.Empty``, metadata: [google.protobuf.Struct][google.protobuf.Struct]> Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.EntityTypesClient() >>> >>> parent = client.entity_type_path('[PROJECT]', '[ENTITY_TYPE]') >>> >>> # TODO: Initialize ``entity_values``: >>> entity_values = [] >>> >>> response = client.batch_delete_entities(parent, entity_values) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: parent (str): Required. The name of the entity type to delete entries for. Format: ``projects/<Project ID>/agent/entityTypes/<Entity Type ID>``. entity_values (list[str]): Required. The canonical ``values`` of the entities to delete. Note that these are not fully-qualified names, i.e. they don't start with ``projects/<Project ID>``. language_code (str): Optional. The language of entity synonyms defined in ``entities``. If not specified, the agent's default language is used. [More than a dozen languages](https://dialogflow.com/docs/reference/language) are supported. Note: languages must be enabled in the agent, before they can be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
async def get_participant(self, p_id: int, force_update=False) -> Participant: found_p = self._find_participant(p_id) if force_update or found_p is None: await self.get_participants() found_p = self._find_participant(p_id) return found_p
get a participant by its id |methcoro| Args: p_id: participant id force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException
def _get_result_paths(self, output_dir): self._write_properties_file() properties_fp = os.path.join(self.ModelDir, self.PropertiesFile) result_paths = { 'properties': ResultPath(properties_fp, IsWritten=True,) } return result_paths
Return a dict of output files.
def smartos(): grains = {} if salt.utils.platform.is_smartos_zone(): grains = salt.utils.dictupdate.update(grains, _smartos_zone_data(), merge_lists=True) grains = salt.utils.dictupdate.update(grains, _smartos_zone_pkgsrc_data(), merge_lists=True) grains = salt.utils.dictupdate.update(grains, _smartos_zone_pkgin_data(), merge_lists=True) elif salt.utils.platform.is_smartos_globalzone(): grains = salt.utils.dictupdate.update(grains, _smartos_computenode_data(), merge_lists=True) return grains
Provide grains for SmartOS
def _setTag(self, tag): if tag: self._tagRef = weakref.ref(tag) else: self._tagRef = None
_setTag - INTERNAL METHOD. Associated a given AdvancedTag to this attributes dict. If bool(#tag) is True, will set the weakref to that tag. Otherwise, will clear the reference @param tag <AdvancedTag/None> - Either the AdvancedTag to associate, or None to clear current association
def list_env(saltenv='base'): ret = {} if saltenv not in __opts__['pillar_roots']: return ret for f_root in __opts__['pillar_roots'][saltenv]: ret[f_root] = {} for root, dirs, files in salt.utils.path.os_walk(f_root): sub = ret[f_root] if root != f_root: sroot = root above = [] while not os.path.samefile(sroot, f_root): base = os.path.basename(sroot) if base: above.insert(0, base) sroot = os.path.dirname(sroot) for aroot in above: sub = sub[aroot] for dir_ in dirs: sub[dir_] = {} for fn_ in files: sub[fn_] = 'f' return ret
Return all of the file paths found in an environment
def linestrings_to_path(multi): entities = [] vertices = [] if not util.is_sequence(multi): multi = [multi] for line in multi: if hasattr(line, 'coords'): coords = np.array(line.coords) if len(coords) < 2: continue entities.append(Line(np.arange(len(coords)) + len(vertices))) vertices.extend(coords) kwargs = {'entities': np.array(entities), 'vertices': np.array(vertices)} return kwargs
Load shapely LineString objects into a trimesh.path.Path2D object Parameters ------------- multi : shapely.geometry.LineString or MultiLineString Input 2D geometry Returns ------------- kwargs : dict Keyword arguments for Path2D constructor
def parameterized_send(self, request, parameter_list): response_queues = OrderedDict() for parameter in parameter_list: response_queues[parameter] = self.send(request % parameter) return response_queues
Send batched requests for a list of parameters Args: request (str): Request to send, like "%s.*?\n" parameter_list (list): parameters to format with, like ["TTLIN", "TTLOUT"] Returns: dict: {parameter: response_queue}
def away_two_point_field_goal_percentage(self): result = float(self.away_two_point_field_goals) / \ float(self.away_two_point_field_goal_attempts) return round(float(result), 3)
Returns a ``float`` of the number of two point field goals made divided by the number of two point field goal attempts by the away team. Percentage ranges from 0-1.
def get(self, request): user = request.user serializer = PermissionsUserSerializer( instance=user, context={'request': request}) return Response(data=serializer.data)
Get user information, with a list of permissions for that user.
def rolling_upgrade(self, upgrade_from_cdh_version, upgrade_to_cdh_version, upgrade_service_names, slave_batch_size=None, slave_fail_count_threshold=None, sleep_seconds=None): args = dict() args['upgradeFromCdhVersion'] = upgrade_from_cdh_version args['upgradeToCdhVersion'] = upgrade_to_cdh_version args['upgradeServiceNames'] = upgrade_service_names if slave_batch_size: args['slaveBatchSize'] = slave_batch_size if slave_fail_count_threshold: args['slaveFailCountThreshold'] = slave_fail_count_threshold if sleep_seconds: args['sleepSeconds'] = sleep_seconds return self._cmd('rollingUpgrade', data=args, api_version=10)
Command to do a rolling upgrade of services in the given cluster This command does not handle any services that don't support rolling upgrades. The command will throw an error and not start if upgrade of any such service is requested. This command does not upgrade the full CDH Cluster. You should normally use the upgradeCDH Command for upgrading the cluster. This is primarily helpful if you need to need to recover from an upgrade failure or for advanced users to script an alternative to the upgradeCdhCommand. This command expects the binaries to be available on hosts and activated. It does not change any binaries on the hosts. @param upgrade_from_cdh_version: Current CDH Version of the services. Example versions are: "5.1.0", "5.2.2" or "5.4.0" @param upgrade_to_cdh_version: Target CDH Version for the services. The CDH version should already be present and activated on the nodes. Example versions are: "5.1.0", "5.2.2" or "5.4.0" @param upgrade_service_names: List of specific services to be upgraded and restarted. @param slave_batch_size: Number of hosts with slave roles to restart at a time Must be greater than 0. Default is 1. @param slave_fail_count_threshold: The threshold for number of slave host batches that are allowed to fail to restart before the entire command is considered failed. Must be >= 0. Default is 0. @param sleep_seconds: Number of seconds to sleep between restarts of slave host batches. Must be >=0. Default is 0. @return: Reference to the submitted command. @since: API v10
def _static(self, target, value): return 'static ' + self.__p(ast.Assign(targets=[target],value=value))
PHP's "static"
def elapsed_time_from(start_time): time_then = make_time(start_time) time_now = datetime.utcnow().replace(microsecond=0) if time_then is None: return delta_t = time_now - time_then return delta_t
calculate time delta from latched time and current time
def assert_regex(text, regex, msg_fmt="{msg}"): compiled = re.compile(regex) if not compiled.search(text): msg = "{!r} does not match {!r}".format(text, compiled.pattern) fail(msg_fmt.format(msg=msg, text=text, pattern=compiled.pattern))
Fail if text does not match the regular expression. regex can be either a regular expression string or a compiled regular expression object. >>> assert_regex("Hello World!", r"llo.*rld!$") >>> assert_regex("Hello World!", r"\\d") Traceback (most recent call last): ... AssertionError: 'Hello World!' does not match '\\\\d' The following msg_fmt arguments are supported: * msg - the default error message * text - text that is matched * pattern - regular expression pattern as string
def get_compliance_expansion(self): if not self.order <= 4: raise ValueError("Compliance tensor expansion only " "supported for fourth-order and lower") ce_exp = [ElasticTensor(self[0]).compliance_tensor] einstring = "ijpq,pqrsuv,rskl,uvmn->ijklmn" ce_exp.append(np.einsum(einstring, -ce_exp[-1], self[1], ce_exp[-1], ce_exp[-1])) if self.order == 4: einstring_1 = "pqab,cdij,efkl,ghmn,abcdefgh" tensors_1 = [ce_exp[0]]*4 + [self[-1]] temp = -np.einsum(einstring_1, *tensors_1) einstring_2 = "pqab,abcdef,cdijmn,efkl" einstring_3 = "pqab,abcdef,efklmn,cdij" einstring_4 = "pqab,abcdef,cdijkl,efmn" for es in [einstring_2, einstring_3, einstring_4]: temp -= np.einsum(es, ce_exp[0], self[-2], ce_exp[1], ce_exp[0]) ce_exp.append(temp) return TensorCollection(ce_exp)
Gets a compliance tensor expansion from the elastic tensor expansion.
def get_url(self, version=None): if self.fixed_bundle_url: return self.fixed_bundle_url return '%s.%s.%s' % (os.path.join(self.bundle_url_root, self.bundle_filename), version or self.get_version(), self.bundle_type)
Return the filename of the bundled bundle
def trace(f, *args, **kwargs): print 'Calling %s() with args %s, %s ' % (f.__name__, args, kwargs) return f(*args,**kwargs)
Decorator used to trace function calls for debugging purposes.
def _connect(self, sock, addr, timeout): if self.connection: raise SocketClientConnectedError() if self.connector: raise SocketClientConnectingError() self.connect_deferred = Deferred(self.loop) self.sock = sock self.addr = addr self.connector = Connector(self.loop, sock, addr, timeout) self.connector.deferred.add_callback(self._connected) self.connector.deferred.add_errback(self._connect_failed) self.connector.start() return self.connect_deferred
Start watching the socket for it to be writtable.
def charge(self, user, vault_id=None): assert self.is_in_vault(user) if vault_id: user_vault = self.get(user=user, vault_id=vault_id) else: user_vault = self.get(user=user)
If vault_id is not passed this will assume that there is only one instane of user and vault_id in the db.
def valid (names): if isinstance(names, str): names = [names] assert is_iterable_typed(names, basestring) return all(name in __all_features for name in names)
Returns true iff all elements of names are valid features.
def insert(self, item, low_value): return c_void_p(lib.zlistx_insert(self._as_parameter_, item, low_value))
Create a new node and insert it into a sorted list. Calls the item duplicator, if any, on the item. If low_value is true, starts searching from the start of the list, otherwise searches from the end. Use the item comparator, if any, to find where to place the new node. Returns a handle to the new node, or NULL if memory was exhausted. Resets the cursor to the list head.
def task_log(self): if self.task_id is None: raise ValueError('task_id is None') return self.get_task_log(self.task_id, self.session, self.request_kwargs)
Get task log. :rtype: str :returns: The task log as a string.
def parse_protobuf(self, proto_type): if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a Protobuf request') obj = proto_type() try: obj.ParseFromString(self.data) except Exception: raise BadRequest("Unable to parse Protobuf request") if self.protobuf_check_initialization and not obj.IsInitialized(): raise BadRequest("Partial Protobuf request") return obj
Parse the data into an instance of proto_type.
def get_topic_set(file_path): topic_set = set() file_row_gen = get_file_row_generator(file_path, ",") for file_row in file_row_gen: topic_set.add(file_row[0]) return topic_set
Opens one of the topic set resource files and returns a set of topics. - Input: - file_path: The path pointing to the topic set resource file. - Output: - topic_set: A python set of strings.
def get_group(self, group_id): def process_result(result): return Group(self, result) return Command('get', [ROOT_GROUPS, group_id], process_result=process_result)
Return specified group. Returns a Command.
def ways_in_bbox(lat_min, lng_min, lat_max, lng_max, network_type, timeout=180, memory=None, max_query_area_size=50*1000*50*1000, custom_osm_filter=None): return parse_network_osm_query( osm_net_download(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, network_type=network_type, timeout=timeout, memory=memory, max_query_area_size=max_query_area_size, custom_osm_filter=custom_osm_filter))
Get DataFrames of OSM data in a bounding box. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : {'walk', 'drive'}, optional Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- nodes, ways, waynodes : pandas.DataFrame
def cmd_condition_yaw(self, args): if ( len(args) != 3): print("Usage: yaw ANGLE ANGULAR_SPEED MODE:[0 absolute / 1 relative]") return if (len(args) == 3): angle = float(args[0]) angular_speed = float(args[1]) angle_mode = float(args[2]) print("ANGLE %s" % (str(angle))) self.master.mav.command_long_send( self.settings.target_system, mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, mavutil.mavlink.MAV_CMD_CONDITION_YAW, 0, angle, angular_speed, 0, angle_mode, 0, 0, 0)
yaw angle angular_speed angle_mode
def to_python_index(self, length, check_bounds=True, circular=False): if not self.is_unitless: raise ValueError("Index cannot have units: {0!r}".format(self)) ret = int(self.value) if ret != self.value: raise ValueError("Index must be an integer: {0!r}".format(ret)) if ret == 0: raise ValueError("Index cannot be zero") if check_bounds and not circular and abs(ret) > length: raise ValueError("Index {0!r} out of bounds for length {1}".format(ret, length)) if ret > 0: ret -= 1 if circular: ret = ret % length return ret
Return a plain Python integer appropriate for indexing a sequence of the given length. Raise if this is impossible for any reason whatsoever.
def find(s): abbrev1 = None origS = s if ' ' in s: first, rest = s.split(' ', 1) s = first.title() + ' ' + rest.lower() else: s = s.title() if s in NAMES: abbrev1 = s elif s in ABBREV3_TO_ABBREV1: abbrev1 = ABBREV3_TO_ABBREV1[s] elif s in NAMES_TO_ABBREV1: abbrev1 = NAMES_TO_ABBREV1[s] else: def findCodon(target): for abbrev1, codons in CODONS.items(): for codon in codons: if codon == target: return abbrev1 abbrev1 = findCodon(origS.upper()) if abbrev1: return AminoAcid( NAMES[abbrev1], ABBREV3[abbrev1], abbrev1, CODONS[abbrev1], PROPERTIES[abbrev1], PROPERTY_DETAILS[abbrev1], PROPERTY_CLUSTERS[abbrev1])
Find an amino acid whose name or abbreviation is s. @param s: A C{str} amino acid specifier. This may be a full name, a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored. return: An L{AminoAcid} instance or C{None} if no matching amino acid can be located.
def Q(name): retval = PApplet.getDeclaredField(name).get(Sketch.get_instance()) if isinstance(retval, (long, int)): return float(retval) else: return retval
Gets a variable from the current sketch. Processing has a number of methods and variables with the same name, 'mousePressed' for example. This allows us to disambiguate. Also casts numeric values as floats to make it easier to translate code from pde to python.
def _check_align(self): if not hasattr(self, "_align"): self._align = ["l"]*self._row_size if not hasattr(self, "_valign"): self._valign = ["t"]*self._row_size
Check if alignment has been specified, set default one if not
def get_default_if(): f = open ('/proc/net/route', 'r') for line in f: words = line.split() dest = words[1] try: if (int (dest) == 0): interf = words[0] break except ValueError: pass return interf
Returns the default interface
def create(): if not os.path.isdir(options.path): logger.info('creating working directory: ' + options.path) os.makedirs(options.path)
Create workdir.options.path
def _initialize(self, boto_session, sagemaker_client, sagemaker_runtime_client): self.boto_session = boto_session or boto3.Session() self._region_name = self.boto_session.region_name if self._region_name is None: raise ValueError('Must setup local AWS configuration with a region supported by SageMaker.') self.sagemaker_client = sagemaker_client or self.boto_session.client('sagemaker') prepend_user_agent(self.sagemaker_client) if sagemaker_runtime_client is not None: self.sagemaker_runtime_client = sagemaker_runtime_client else: config = botocore.config.Config(read_timeout=80) self.sagemaker_runtime_client = self.boto_session.client('runtime.sagemaker', config=config) prepend_user_agent(self.sagemaker_runtime_client) self.local_mode = False
Initialize this SageMaker Session. Creates or uses a boto_session, sagemaker_client and sagemaker_runtime_client. Sets the region_name.
def max_intensity(self, time): ti = np.where(time == self.times)[0][0] return self.timesteps[ti].max()
Calculate the maximum intensity found at a timestep.
def getheader(self, field, default=''): if self.headers: for header in self.headers: if field.lower() == header.lower(): return self.headers[header] return default
Returns the HTTP response header field, case insensitively
def render_template(self, template_file, target_file, template_vars = {}): template_dir = str(self.__class__.__name__).lower() template = self.jinja_env.get_template(os.path.join(template_dir, template_file)) file_path = os.path.join(self.work_root, target_file) with open(file_path, 'w') as f: f.write(template.render(template_vars))
Render a Jinja2 template for the backend The template file is expected in the directory templates/BACKEND_NAME.
def render_django_response(self, **kwargs): from django.http import HttpResponse return HttpResponse( self.render(**kwargs), content_type='image/svg+xml' )
Render the graph, and return a Django response
def create_relation(self, event, content_object, distinction=''): return EventRelation.objects.create( event=event, distinction=distinction, content_object=content_object)
Creates a relation between event and content_object. See EventRelation for help on distinction.
def find_first_fit(unoccupied_columns, row, row_length): for free_col in unoccupied_columns: first_item_x = row[0][0] offset = free_col - first_item_x if check_columns_fit(unoccupied_columns, row, offset, row_length): return offset raise ValueError("Row cannot bossily fit in %r: %r" % (list(unoccupied_columns.keys()), row))
Finds the first index that the row's items can fit.
def remove_port_channel(self, **kwargs): port_int = kwargs.pop('port_int') callback = kwargs.pop('callback', self._callback) if re.search('^[0-9]{1,4}$', port_int) is None: raise ValueError('%s must be in the format of x for port channel ' 'interfaces.' % repr(port_int)) port_channel = getattr(self._interface, 'interface_port_channel_name') port_channel_args = dict(name=port_int) config = port_channel(**port_channel_args) delete_channel = config.find('.//*port-channel') delete_channel.set('operation', 'delete') return callback(config)
Remove a port channel interface. Args: port_int (str): port-channel number (1, 2, 3, etc). callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `port_int` is not passed. ValueError: if `port_int` is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.channel_group(name='225/0/20', ... int_type='tengigabitethernet', ... port_int='1', channel_type='standard', mode='active') ... output = dev.interface.remove_port_channel( ... port_int='1')
def to_networkx(self): try: from networkx import DiGraph, set_node_attributes except ImportError: raise ImportError('You must have networkx installed to export networkx graphs') max_node = 2 * self._linkage.shape[0] num_points = max_node - (self._linkage.shape[0] - 1) result = DiGraph() for parent, row in enumerate(self._linkage, num_points): result.add_edge(parent, row[0], weight=row[2]) result.add_edge(parent, row[1], weight=row[2]) size_dict = {parent: row[3] for parent, row in enumerate(self._linkage, num_points)} set_node_attributes(result, size_dict, 'size') return result
Return a NetworkX DiGraph object representing the single linkage tree. Edge weights in the graph are the distance values at which child nodes merge to form the parent cluster. Nodes have a `size` attribute attached giving the number of points that are in the cluster.
def write_case_data(self, file): writer = self._get_writer(file) writer.writerow(["Name", "base_mva"]) writer.writerow([self.case.name, self.case.base_mva])
Writes the case data as CSV.
def get_vertex(self, key): if key in self.vertex_map: return self.vertex_map[key] vertex = self.new_vertex() self.vertex_map[key] = vertex return vertex
Returns or Creates a Vertex mapped by key. Args: key: A string reference for a vertex. May refer to a new Vertex in which case it will be created. Returns: A the Vertex mapped to by key.
def subtract_column_median(df, prefix='Intensity '): df = df.copy() df.replace([np.inf, -np.inf], np.nan, inplace=True) mask = [l.startswith(prefix) for l in df.columns.values] df.iloc[:, mask] = df.iloc[:, mask] - df.iloc[:, mask].median(axis=0) return df
Apply column-wise normalisation to expression columns. Default is median transform to expression columns beginning with Intensity :param df: :param prefix: The column prefix for expression columns :return: