code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_fun_prop(f, k): if not has_fun_prop(f, k): raise InternalError("Function %s has no property %s" % (str(f), k)) return getattr(f, _FUN_PROPS)[k]
Get the value of property `k` from function `f`. We define properties as annotations added to a function throughout the process of defining a function for verification, e.g. the argument types. If `f` does not have a property named `k`, this throws an error. If `f` has the property named `k`, it returns the value of it. Users should never access this function directly.
def create_request(self, reset_gpd_iterator=False): if reset_gpd_iterator: self.gpd_iterator = None gpd_service = GeopediaImageService() self.download_list = gpd_service.get_request(self) self.gpd_iterator = gpd_service.get_gpd_iterator()
Set a list of download requests Set a list of DownloadRequests for all images that are under the given property of the Geopedia's Vector layer. :param reset_gpd_iterator: When re-running the method this flag is used to reset/keep existing ``gpd_iterator`` (i.e. instance of ``GeopediaFeatureIterator`` class). If the iterator is not reset you don't have to repeat a service call but tiles and dates will stay the same. :type reset_gpd_iterator: bool
def _boto_conn_kwargs(self): kwargs = {'region_name': self.region} if self.account_id is not None: logger.debug("Connecting for account %s role '%s' with STS " "(region: %s)", self.account_id, self.account_role, self.region) credentials = self._get_sts_token() kwargs['aws_access_key_id'] = credentials.access_key kwargs['aws_secret_access_key'] = credentials.secret_key kwargs['aws_session_token'] = credentials.session_token elif self.profile_name is not None: logger.debug("Using credentials profile: %s", self.profile_name) session = boto3.Session(profile_name=self.profile_name) credentials = session._session.get_credentials() kwargs['aws_access_key_id'] = credentials.access_key kwargs['aws_secret_access_key'] = credentials.secret_key kwargs['aws_session_token'] = credentials.token else: logger.debug("Connecting to region %s", self.region) return kwargs
Generate keyword arguments for boto3 connection functions. If ``self.account_id`` is defined, this will call :py:meth:`~._get_sts_token` to get STS token credentials using `boto3.STS.Client.assume_role <https://boto3.readthedocs.org/en/ latest/reference/services/sts.html#STS.Client.assume_role>`_ and include those credentials in the return value. If ``self.profile_name`` is defined, this will call `boto3.Session() <http://boto3.readthedocs.io/en/latest/reference/core/session.html>` with that profile and include those credentials in the return value. :return: keyword arguments for boto3 connection functions :rtype: dict
def _parse_output_for_errors(data, command, **kwargs): if re.search('% Invalid', data): raise CommandExecutionError({ 'rejected_input': command, 'message': 'CLI excution error', 'code': '400', 'cli_error': data.lstrip(), }) if kwargs.get('error_pattern') is not None: for re_line in kwargs.get('error_pattern'): if re.search(re_line, data): raise CommandExecutionError({ 'rejected_input': command, 'message': 'CLI excution error', 'code': '400', 'cli_error': data.lstrip(), })
Helper method to parse command output for error information
def tx2genefile(gtf, out_file=None): installed_tx2gene = os.path.join(os.path.dirname(gtf), "tx2gene.csv") if file_exists(installed_tx2gene): return installed_tx2gene if file_exists(out_file): return out_file with file_transaction(out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for k, v in transcript_to_gene(gtf).items(): out_handle.write(",".join([k, v]) + "\n") return out_file
write out a file of transcript->gene mappings. use the installed tx2gene.csv if it exists, else write a new one out
def one_of(*generators): class OneOfGenerators(ArbitraryInterface): @classmethod def arbitrary(cls): return arbitrary(random.choice(generators)) OneOfGenerators.__name__ = ''.join([ 'one_of(', ', '.join(generator.__name__ for generator in generators), ')' ]) return OneOfGenerators
Generates an arbitrary value of one of the specified generators. This is a class factory, it makes a class which is a closure around the specified generators.
def from_str(cls, version_str: str): o = cls() o.version = version_str return o
Alternate constructor that accepts a string SemVer.
def set(self, folder: str, subscribed: bool) -> None: if subscribed: self.add(folder) else: self.remove(folder)
Set the subscribed status of a folder.
def reaction_formula(reaction, compound_formula): def multiply_formula(compound_list): for compound, count in compound_list: yield count * compound_formula[compound.name] for compound, _ in reaction.compounds: if compound.name not in compound_formula: return None else: left_form = reduce( operator.or_, multiply_formula(reaction.left), Formula()) right_form = reduce( operator.or_, multiply_formula(reaction.right), Formula()) return left_form, right_form
Calculate formula compositions for both sides of the specified reaction. If the compounds in the reaction all have formula, then calculate and return the chemical compositions for both sides, otherwise return `None`. Args: reaction: :class:`psamm.reaction.Reaction`. compound_formula: a map from compound id to formula.
def upload_marcxml(self, marcxml, mode): if mode not in ["-i", "-r", "-c", "-a", "-ir"]: raise NameError("Incorrect mode " + str(mode)) return requests.post(self.server_url + "/batchuploader/robotupload", data={'file': marcxml, 'mode': mode}, headers={'User-Agent': CFG_USER_AGENT})
Upload a record to the server. :param marcxml: the XML to upload. :param mode: the mode to use for the upload. - "-i" insert new records - "-r" replace existing records - "-c" correct fields of records - "-a" append fields to records - "-ir" insert record or replace if it exists
def message(self, message, source, point, ln): if message is None: message = "parsing failed" if ln is not None: message += " (line " + str(ln) + ")" if source: if point is None: message += "\n" + " " * taberrfmt + clean(source) else: part = clean(source.splitlines()[lineno(point, source) - 1], False).lstrip() point -= len(source) - len(part) part = part.rstrip() message += "\n" + " " * taberrfmt + part if point > 0: if point >= len(part): point = len(part) - 1 message += "\n" + " " * (taberrfmt + point) + "^" return message
Creates a SyntaxError-like message.
def freeze(self, freeze: bool = True): for topic in self._topics.values(): topic.freeze() self._frozen = freeze
Freezing the hub means that each topic has to be assigned and no new topics can be created after this point.
def load_credential_file(self, path): c_data = StringIO.StringIO() c_data.write("[Credentials]\n") for line in open(path, "r").readlines(): c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key")) c_data.seek(0) self.readfp(c_data)
Load a credential file as is setup like the Java utilities
def RemoveObject(self, identifier): if identifier not in self._values: raise KeyError('Missing cached object for identifier: {0:s}'.format( identifier)) del self._values[identifier]
Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache.
def get_custom_query(self): query = {} q = req.get_query() if q: query["SearchableText"] = q path = req.get_path() if path: query["path"] = {'query': path, 'depth': req.get_depth()} recent_created = req.get_recent_created() if recent_created: date = api.calculate_delta_date(recent_created) query["created"] = {'query': date, 'range': 'min'} recent_modified = req.get_recent_modified() if recent_modified: date = api.calculate_delta_date(recent_modified) query["modified"] = {'query': date, 'range': 'min'} return query
Extracts custom query keys from the index. Parameters which get extracted from the request: `q`: Passes the value to the `SearchableText` `path`: Creates a path query `recent_created`: Creates a date query `recent_modified`: Creates a date query :param catalog: The catalog to build the query for :type catalog: ZCatalog :returns: Catalog query :rtype: dict
def rmdir(path, dir_fd=None): system = get_instance(path) system.remove(system.ensure_dir_path(path))
Remove a directory. Equivalent to "os.rmdir". Args: path (path-like object): Path or URL. dir_fd: directory descriptors; see the os.rmdir() description for how it is interpreted. Not supported on cloud storage objects.
def get_dvcs_info(): cmd = "git rev-list --count HEAD" commit_count = str( int(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip()) ) cmd = "git rev-parse HEAD" commit = str(subprocess.check_output(shlex.split(cmd)).decode("utf8").strip()) return {Constants.COMMIT_FIELD: commit, Constants.COMMIT_COUNT_FIELD: commit_count}
Gets current repository info from git
def generate_metadata_entry(self, entry_point, toolchain, spec): export_target = spec['export_target'] toolchain_bases = trace_toolchain(toolchain) toolchain_bin_path = spec.get(TOOLCHAIN_BIN_PATH) toolchain_bin = ([ basename(toolchain_bin_path), get_bin_version_str(toolchain_bin_path), ] if toolchain_bin_path else []) return {basename(export_target): { 'toolchain_bases': toolchain_bases, 'toolchain_bin': toolchain_bin, 'builder': '%s:%s' % ( entry_point.module_name, '.'.join(entry_point.attrs)), }}
After the toolchain and spec have been executed, this may be called to generate the artifact export entry for persistence into the metadata file.
def child(self, fragment): return os.path.join(self.path, FS(fragment).path)
Returns a path of a child item represented by `fragment`.
def build_self_reference(filename, clean_wcs=False): if 'sipwcs' in filename: sciname = 'sipwcs' else: sciname = 'sci' wcslin = build_reference_wcs([filename], sciname=sciname) if clean_wcs: wcsbase = wcslin.wcs customwcs = build_hstwcs(wcsbase.crval[0], wcsbase.crval[1], wcsbase.crpix[0], wcsbase.crpix[1], wcslin._naxis1, wcslin._naxis2, wcslin.pscale, wcslin.orientat) else: customwcs = wcslin return customwcs
This function creates a reference, undistorted WCS that can be used to apply a correction to the WCS of the input file. Parameters ---------- filename : str Filename of image which will be corrected, and which will form the basis of the undistorted WCS. clean_wcs : bool Specify whether or not to return the WCS object without any distortion information, or any history of the original input image. This converts the output from `utils.output_wcs()` into a pristine `~stwcs.wcsutils.HSTWCS` object. Returns ------- customwcs : `stwcs.wcsutils.HSTWCS` HSTWCS object which contains the undistorted WCS representing the entire field-of-view for the input image. Examples -------- This function can be used with the following syntax to apply a shift/rot/scale change to the same image: >>> import buildref >>> from drizzlepac import updatehdr >>> filename = "jce501erq_flc.fits" >>> wcslin = buildref.build_self_reference(filename) >>> updatehdr.updatewcs_with_shift(filename, wcslin, xsh=49.5694, ... ysh=19.2203, rot = 359.998, scale = 0.9999964)
def _setup_core_modules(self): self.ir_emulator = None self.smt_solver = None self.smt_translator = None if self.arch_info: self.ir_emulator = ReilEmulator(self.arch_info) self.smt_solver = None if SMT_SOLVER not in ("Z3", "CVC4"): raise Exception("{} SMT solver not supported.".format(SMT_SOLVER)) try: if SMT_SOLVER == "Z3": self.smt_solver = Z3Solver() elif SMT_SOLVER == "CVC4": self.smt_solver = CVC4Solver() except SmtSolverNotFound: logger.warn("{} Solver is not installed. Run 'barf-install-solvers.sh' to install it.".format(SMT_SOLVER)) self.smt_translator = None if self.smt_solver: self.smt_translator = SmtTranslator(self.smt_solver, self.arch_info.address_size) self.smt_translator.set_arch_alias_mapper(self.arch_info.alias_mapper) self.smt_translator.set_arch_registers_size(self.arch_info.registers_size)
Set up core modules.
def create_prefetch(self, addresses): with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True)
Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain).
def is_user_in_group(self, user, group): search_url = "%s/%s/%s/%s/%s" % (self.url, "group", group, "user", user) response = self.jss.get(search_url) length = len(response) result = False if length == 1: pass elif length == 2: if response.findtext("ldap_user/username") == user: if response.findtext("ldap_user/is_member") == "Yes": result = True elif len(response) >= 2: raise JSSGetError("Unexpected response.") return result
Test for whether a user is in a group. There is also the ability in the API to test for whether multiple users are members of an LDAP group, but you should just call is_user_in_group over an enumerated list of users. Args: user: String username. group: String group name. Returns bool.
def add_perm(self, subj_str, perm_str): self._assert_valid_permission(perm_str) self._perm_dict.setdefault(perm_str, set()).add(subj_str)
Add a permission for a subject. Args: subj_str : str Subject for which to add permission(s) perm_str : str Permission to add. Implicitly adds all lower permissions. E.g., ``write`` will also add ``read``.
def _process_args_as_rows_or_columns(self, arg, unpack=False): flags = set() if isinstance(arg, (tuple, list, numpy.ndarray)): if isstring(arg[0]): result = arg else: result = arg flags.add('isrows') elif isstring(arg): result = arg elif isinstance(arg, slice): if unpack: flags.add('isrows') result = self._slice2rows(arg.start, arg.stop, arg.step) else: flags.add('isrows') flags.add('isslice') result = self._process_slice(arg) else: result = arg flags.add('isrows') if numpy.ndim(arg) == 0: flags.add('isscalar') return result, flags
We must be able to interpret the args as as either a column name or row number, or sequences thereof. Numpy arrays and slices are also fine. Examples: 'field' 35 [35,55,86] ['f1',f2',...] Can also be tuples or arrays.
def _sort_column(self, column, reverse): if tk.DISABLED in self.state(): return l = [(self.set(child, column), child) for child in self.get_children('')] l.sort(reverse=reverse, key=lambda x: self._column_types[column](x[0])) for index, (val, child) in enumerate(l): self.move(child, "", index) self.heading(column, command=lambda: self._sort_column(column, not reverse))
Sort a column by its values
def num(string): if not isinstance(string, type('')): raise ValueError(type('')) try: string = re.sub('[^a-zA-Z0-9\.\-]', '', string) number = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", string) return float(number[0]) except Exception as e: logger = logging.getLogger('tradingAPI.utils.num') logger.debug("number not found in %s" % string) logger.debug(e) return None
convert a string to float
def reversed_blocks(handle, blocksize=4096): handle.seek(0, os.SEEK_END) here = handle.tell() while 0 < here: delta = min(blocksize, here) here -= delta handle.seek(here, os.SEEK_SET) yield handle.read(delta)
Generate blocks of file's contents in reverse order.
def handle_pubcomp(self): self.logger.info("PUBCOMP received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventPubcomp(mid) self.push_event(evt) return NC.ERR_SUCCESS
Handle incoming PUBCOMP packet.
def auto(self, enabled=True, **kwargs): self.namespace = self.get_namespace() self.notebook_name = "{notebook}" self._timestamp = tuple(time.localtime()) kernel = r'var kernel = IPython.notebook.kernel; ' nbname = r"var nbname = IPython.notebook.get_notebook_name(); " nbcmd = (r"var name_cmd = '%s.notebook_name = \"' + nbname + '\"'; " % self.namespace) cmd = (kernel + nbname + nbcmd + "kernel.execute(name_cmd); ") display(Javascript(cmd)) time.sleep(0.5) self._auto=enabled self.param.set_param(**kwargs) tstamp = time.strftime(" [%Y-%m-%d %H:%M:%S]", self._timestamp) print("Automatic capture is now %s.%s" % ('enabled' if enabled else 'disabled', tstamp if enabled else ''))
Method to enable or disable automatic capture, allowing you to simultaneously set the instance parameters.
def _copytoscratch(self, maps): try: for p in self.inputs: self._scratch[p][:] = maps[p] except ValueError: invals = maps[list(self.inputs)[0]] if isinstance(invals, numpy.ndarray): shape = invals.shape else: shape = len(invals) self._createscratch(shape) for p in self.inputs: self._scratch[p][:] = maps[p]
Copies the data in maps to the scratch space. If the maps contain arrays that are not the same shape as the scratch space, a new scratch space will be created.
def create_adjusted_model_for_percentages(model_src, model_use): shutil.copyfile(model_src, model_use) with open(model_src) as f: content = f.read() content = content.replace("logreg", "sigmoid") with open(model_use, "w") as f: f.write(content)
Replace logreg layer by sigmoid to get probabilities.
def setdefault(msg_or_dict, key, value): if not get(msg_or_dict, key, default=None): set(msg_or_dict, key, value)
Set the key on a protobuf Message or dictionary to a given value if the current value is falsy. Because protobuf Messages do not distinguish between unset values and falsy ones particularly well (by design), this method treats any falsy value (e.g. 0, empty list) as a target to be overwritten, on both Messages and dictionaries. Args: msg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key on the object in question. value (Any): The value to set. Raises: TypeError: If ``msg_or_dict`` is not a Message or dictionary.
def serialize(self, m): from pymacaroons import macaroon if m.version == macaroon.MACAROON_V1: return self._serialize_v1(m) return self._serialize_v2(m)
Serialize the macaroon in JSON format indicated by the version field. @param macaroon the macaroon to serialize. @return JSON macaroon.
def add_col_features(self, col=None, degree=None): if not col and not degree: return else: if isinstance(col, list) and isinstance(degree, list): if len(col) != len(degree): print('col len: ', len(col)) print('degree len: ', len(degree)) raise ValueError('col and degree should have equal length.') else: if self.preprocessed_data.empty: data = self.original_data else: data = self.preprocessed_data for i in range(len(col)): data.loc[:,col[i]+str(degree[i])] = pow(data.loc[:,col[i]],degree[i]) / pow(10,degree[i]-1) self.preprocessed_data = data else: raise TypeError('col and degree should be lists.')
Exponentiate columns of dataframe. Basically this function squares/cubes a column. e.g. df[col^2] = pow(df[col], degree) where degree=2. Parameters ---------- col : list(str) Column to exponentiate. degree : list(str) Exponentiation degree.
def as_repository(resource): reg = get_current_registry() if IInterface in provided_by(resource): resource = reg.getUtility(resource, name='collection-class') return reg.getAdapter(resource, IRepository)
Adapts the given registered resource to its configured repository. :return: object implementing :class:`everest.repositories.interfaces.IRepository`.
def addfield(self, name, type, width=10): fieldDefn = ogr.FieldDefn(name, type) if type == ogr.OFTString: fieldDefn.SetWidth(width) self.layer.CreateField(fieldDefn)
add a field to the vector layer Parameters ---------- name: str the field name type: int the OGR Field Type (OFT), e.g. ogr.OFTString. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. width: int the width of the new field (only for ogr.OFTString fields) Returns -------
def get_lxc_version(): runner = functools.partial( subprocess.check_output, stderr=subprocess.STDOUT, universal_newlines=True, ) try: result = runner(['lxc-version']).rstrip() return parse_version(result.replace("lxc version: ", "")) except (OSError, subprocess.CalledProcessError): pass return parse_version(runner(['lxc-start', '--version']).rstrip())
Asks the current host what version of LXC it has. Returns it as a string. If LXC is not installed, raises subprocess.CalledProcessError
def processGif(searchStr): searchStr.replace('| ', ' ') searchStr.replace('|', ' ') searchStr.replace(', ', ' ') searchStr.replace(',', ' ') searchStr.rstrip() searchStr = searchStr.strip('./?\'!,') searchStr = searchStr.replace(' ', '+') if searchStr is None or searchStr == '': print("No search parameters specified!") return no_search_params api_url = 'http://api.giphy.com/v1/gifs/search' api_key = 'dc6zaTOxFJmzC' payload = { 'q': searchStr, 'limit': 1, 'api_key': api_key, } r = requests.get(api_url, params=payload) parsed_json = json.loads(r.text) if len(parsed_json['data']) == 0: print("Couldn't find suitable match for gif! :(") return -1 else: imgURL = parsed_json['data'][0]['images']['fixed_height']['url'] return imgURL
This function returns the url of the gif searched for with the given search parameters using the Giphy API. Thanks! Fails gracefully when it can't find a gif by returning an appropriate image url with the failure message on it.
def _decode_region(decoder, region, corrections, shrink): with _decoded_matrix_region(decoder, region, corrections) as msg: if msg: p00 = DmtxVector2() p11 = DmtxVector2(1.0, 1.0) dmtxMatrix3VMultiplyBy( p00, region.contents.fit2raw ) dmtxMatrix3VMultiplyBy(p11, region.contents.fit2raw) x0 = int((shrink * p00.X) + 0.5) y0 = int((shrink * p00.Y) + 0.5) x1 = int((shrink * p11.X) + 0.5) y1 = int((shrink * p11.Y) + 0.5) return Decoded( string_at(msg.contents.output), Rect(x0, y0, x1 - x0, y1 - y0) ) else: return None
Decodes and returns the value in a region. Args: region (DmtxRegion): Yields: Decoded or None: The decoded value.
def send(self, event): data = b"" if isinstance(event, Request): data += self._initiate_connection(event) elif isinstance(event, AcceptConnection): data += self._accept(event) elif isinstance(event, RejectConnection): data += self._reject(event) elif isinstance(event, RejectData): data += self._send_reject_data(event) else: raise LocalProtocolError( "Event {} cannot be sent during the handshake".format(event) ) return data
Send an event to the remote. This will return the bytes to send based on the event or raise a LocalProtocolError if the event is not valid given the state.
def spawn(func, kwargs): t = threading.Thread(target=func, kwargs=kwargs) t.start() yield t.join()
Spawn a thread, and join it after the context is over.
def setCurveModel(self, model): self.stimModel = model self.ui.curveWidget.setModel(model)
Sets the stimulus model for the calibration curve test :param model: Stimulus model that has a tone curve configured :type model: :class:`StimulusModel <sparkle.stim.stimulus_model.StimulusModel>`
def _nested_unary_mul(nested_a, p): def mul_with_broadcast(tensor): ndims = tensor.shape.ndims if ndims != 2: p_reshaped = tf.reshape(p, [-1] + [1] * (ndims - 1)) return p_reshaped * tensor else: return p * tensor return nest.map(mul_with_broadcast, nested_a)
Multiply `Tensors` in arbitrarily nested `Tensor` `nested_a` with `p`.
def dominant_flat_five(note): res = dominant_seventh(note) res[2] = notes.diminish(res[2]) return res
Build a dominant flat five chord on note. Example: >>> dominant_flat_five('C') ['C', 'E', 'Gb', 'Bb']
def presigned_get_object(self, bucket_name, object_name, expires=timedelta(days=7), response_headers=None, request_date=None): return self.presigned_url('GET', bucket_name, object_name, expires, response_headers=response_headers, request_date=request_date)
Presigns a get object request and provides a url Example: from datetime import timedelta presignedURL = presigned_get_object('bucket_name', 'object_name', timedelta(days=7)) print(presignedURL) :param bucket_name: Bucket for the presigned url. :param object_name: Object for which presigned url is generated. :param expires: Optional expires argument to specify timedelta. Defaults to 7days. :params response_headers: Optional response_headers argument to specify response fields like date, size, type of file, data about server, etc. :params request_date: Optional request_date argument to specify a different request date. Default is current date. :return: Presigned url.
def generate_np(self, x_val, **kwargs): _, feedable, _feedable_types, hash_key = self.construct_variables(kwargs) if hash_key not in self.graphs: with tf.variable_scope(None, 'attack_%d' % len(self.graphs)): with tf.device('/gpu:0'): x = tf.placeholder(tf.float32, shape=x_val.shape, name='x') inputs, outputs = self.generate(x, **kwargs) from runner import RunnerMultiGPU runner = RunnerMultiGPU(inputs, outputs, sess=self.sess) self.graphs[hash_key] = runner runner = self.graphs[hash_key] feed_dict = {'x': x_val} for name in feedable: feed_dict[name] = feedable[name] fvals = runner.run(feed_dict) while not runner.is_finished(): fvals = runner.run() return fvals['adv_x']
Facilitates testing this attack.
def get_arrive_stop(self, **kwargs): params = { 'idStop': kwargs.get('stop_number'), 'cultureInfo': util.language_code(kwargs.get('lang')) } result = self.make_request('geo', 'get_arrive_stop', **params) if not util.check_result(result, 'arrives'): return False, 'UNKNOWN ERROR' values = util.response_list(result, 'arrives') return True, [emtype.Arrival(**a) for a in values]
Obtain bus arrival info in target stop. Args: stop_number (int): Stop number to query. lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Arrival]), or message string in case of error.
def get_swstat_bits(frame_filenames, swstat_channel_name, start_time, end_time): swstat = frame.read_frame(frame_filenames, swstat_channel_name, start_time=start_time, end_time=end_time) bits = bin(int(swstat[0])) filterbank_off = False if len(bits) < 14 or int(bits[-13]) == 0 or int(bits[-11]) == 0: filterbank_off = True return bits[-10:], filterbank_off
This function just checks the first time in the SWSTAT channel to see if the filter was on, it doesn't check times beyond that. This is just for a first test on a small chunck of data. To read the SWSTAT bits, reference: https://dcc.ligo.org/DocDB/0107/T1300711/001/LIGO-T1300711-v1.pdf Bit 0-9 = Filter on/off switches for the 10 filters in an SFM. Bit 10 = Filter module input switch on/off Bit 11 = Filter module offset switch on/off Bit 12 = Filter module output switch on/off Bit 13 = Filter module limit switch on/off Bit 14 = Filter module history reset momentary switch
def collect_filtered_models(discard, *input_values): ids = set([]) collected = [] queued = [] def queue_one(obj): if obj.id not in ids and not (callable(discard) and discard(obj)): queued.append(obj) for value in input_values: _visit_value_and_its_immediate_references(value, queue_one) while queued: obj = queued.pop(0) if obj.id not in ids: ids.add(obj.id) collected.append(obj) _visit_immediate_value_references(obj, queue_one) return collected
Collect a duplicate-free list of all other Bokeh models referred to by this model, or by any of its references, etc, unless filtered-out by the provided callable. Iterate over ``input_values`` and descend through their structure collecting all nested ``Models`` on the go. Args: *discard (Callable[[Model], bool]) a callable which accepts a *Model* instance as its single argument and returns a boolean stating whether to discard the instance. The latter means that the instance will not be added to collected models nor will its references be explored. *input_values (Model) Bokeh models to collect other models from Returns: None
def shorten(string, maxlen): if 1 < maxlen < len(string): string = string[:maxlen - 1] + u'…' return string[:maxlen]
shortens string if longer than maxlen, appending ellipsis
def _check_embedded_object(embedded_object, type, value, element_kind, element_name): if embedded_object not in ('instance', 'object'): raise ValueError( _format("{0} {1!A} specifies an invalid value for " "embedded_object: {2!A} (must be 'instance' or 'object')", element_kind, element_name, embedded_object)) if type != 'string': raise ValueError( _format("{0} {1!A} specifies embedded_object {2!A} but its CIM " "type is invalid: {3!A} (must be 'string')", element_kind, element_name, embedded_object, type)) if value is not None: if isinstance(value, list): if value: v0 = value[0] if v0 is not None and \ not isinstance(v0, (CIMInstance, CIMClass)): raise ValueError( _format("Array {0} {1!A} specifies embedded_object " "{2!A} but the Python type of its first array " "value is invalid: {3} (must be CIMInstance " "or CIMClass)", element_kind, element_name, embedded_object, builtin_type(v0))) else: if not isinstance(value, (CIMInstance, CIMClass)): raise ValueError( _format("{0} {1!A} specifies embedded_object {2!A} but " "the Python type of its value is invalid: {3} " "(must be CIMInstance or CIMClass)", element_kind, element_name, embedded_object, builtin_type(value)))
Check whether embedded-object-related parameters are ok.
def at(self, hour, minute=0, second=0, microsecond=0): return self.set( hour=hour, minute=minute, second=second, microsecond=microsecond )
Returns a new instance with the current time to a different time. :param hour: The hour :type hour: int :param minute: The minute :type minute: int :param second: The second :type second: int :param microsecond: The microsecond :type microsecond: int :rtype: DateTime
def refresh(self): r = fapi.get_workspace(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) self.data = r.json() return self
Reload workspace metadata from firecloud. Workspace metadata is cached in the data attribute of a Workspace, and may become stale, requiring a refresh().
def fit(self, X): self.n_samples_fit = X.shape[0] if self.metric_kwds is None: metric_kwds = {} else: metric_kwds = self.metric_kwds self.pynndescent_ = NNDescent( X, self.metric, metric_kwds, self.n_neighbors, self.n_trees, self.leaf_size, self.pruning_level, self.tree_init, self.random_state, self.algorithm, self.max_candidates, self.n_iters, self.early_termination_value, self.sampling_rate, ) return self
Fit the PyNNDescent transformer to build KNN graphs with neighbors given by the dataset X. Parameters ---------- X : array-like, shape (n_samples, n_features) Sample data Returns ------- transformer : PyNNDescentTransformer The trained transformer
def create_audit_event(self, code='AUDIT'): event = self._meta.event_model( code=code, model=self.__class__.__name__, ) if current_user: event.created_by = current_user.get_id() self.copy_foreign_keys(event) self.populate_audit_fields(event) return event
Creates a generic auditing Event logging the changes between saves and the initial data in creates. Kwargs: code (str): The code to set the new Event to. Returns: Event: A new event with relevant info inserted into it
def dot_eth_label(name): label = name_to_label(name, registrar='eth') if len(label) < MIN_ETH_LABEL_LENGTH: raise InvalidLabel('name %r is too short' % label) else: return label
Convert from a name, like 'ethfinex.eth', to a label, like 'ethfinex' If name is already a label, this should be a noop, except for converting to a string and validating the name syntax.
def _wrapped(self): assignments = tuple( a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__') @functools.wraps(self.func, assigned=assignments) def wrapper(*args): return self(*args) wrapper.__name__ = self._name wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__') else self.func.__class__.__module__) wrapper.func = self.func wrapper.returnType = self.returnType wrapper.evalType = self.evalType wrapper.deterministic = self.deterministic wrapper.asNondeterministic = functools.wraps( self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped()) return wrapper
Wrap this udf with a function and attach docstring from func
def evaluate_stream(self, stream: StreamWrapper) -> None: self._run_epoch(stream=stream, train=False)
Evaluate the given stream. :param stream: stream to be evaluated :param stream_name: stream name
def destroy_decompress(dinfo): argtypes = [ctypes.POINTER(DecompressionInfoType)] OPENJPEG.opj_destroy_decompress.argtypes = argtypes OPENJPEG.opj_destroy_decompress(dinfo)
Wraps openjpeg library function opj_destroy_decompress.
def write(self, char): char = str(char).lower() self.segments.write(self.font[char])
Display a single character on the display :type char: str or int :param char: Character to display
def _get_response_ms(self): response_timedelta = now() - self.log['requested_at'] response_ms = int(response_timedelta.total_seconds() * 1000) return max(response_ms, 0)
Get the duration of the request response cycle is milliseconds. In case of negative duration 0 is returned.
def tag_remove(self, *tags): return View({**self.spec, 'tag': list(set(self.tags) - set(tags))})
Return a view with the specified tags removed
def sign(mv): md5 = hashlib.md5() update_hash(md5, mv) return md5.digest()
Obtains a signature for a `MetricValue` Args: mv (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricValue`): a MetricValue that's part of an operation Returns: string: a unique signature for that operation
def processWhileRunning(self): work = self.step() for result, more in work: yield result if not self.running: break if more: delay = 0.1 else: delay = 10.0 yield task.deferLater(reactor, delay, lambda: None)
Run tasks until stopService is called.
def clear(self): while True: try: session = self._sessions.get(block=False) except queue.Empty: break else: session.delete()
Delete all sessions in the pool.
def __decode_ext_desc(self, value_type, value): if value_type == 0: return self.__decode_string(value) elif value_type == 1: return value elif 1 < value_type < 6: return _bytes_to_int_le(value)
decode ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT values
def relay_events_from(self, originator, event_type, *more_event_types): handlers = { event_type: lambda *args, **kwargs: \ self.dispatch_event(event_type, *args, **kwargs) for event_type in (event_type,) + more_event_types } originator.set_handlers(**handlers)
Configure this handler to re-dispatch events from another handler. This method configures this handler dispatch an event of type *event_type* whenever *originator* dispatches events of the same type or any of the types in *more_event_types*. Any arguments passed to the original event are copied to the new event. This method is mean to be useful for creating composite widgets that want to present a simple API by making it seem like the events being generated by their children are actually coming from them. See the `/composing_widgets` tutorial for an example.
def set_window_user_pointer(window, pointer): data = (False, pointer) if not isinstance(pointer, ctypes.c_void_p): data = (True, pointer) pointer = ctypes.cast(ctypes.pointer(ctypes.py_object(pointer)), ctypes.c_void_p) window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value _window_user_data_repository[window_addr] = data _glfw.glfwSetWindowUserPointer(window, pointer)
Sets the user pointer of the specified window. You may pass a normal python object into this function and it will be wrapped automatically. The object will be kept in existence until the pointer is set to something else or until the window is destroyed. Wrapper for: void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
def _validate_ids(self, resource_ids): for resource_id in resource_ids: if self._id_regex.fullmatch(resource_id) is None: LOGGER.debug('Invalid resource id requested: %s', resource_id) raise _ResponseFailed(self._status.INVALID_ID)
Validates a list of ids, raising a ResponseFailed error if invalid. Args: resource_id (list of str): The ids to validate Raises: ResponseFailed: The id was invalid, and a status of INVALID_ID will be sent with the response.
def log(client, revision, format, no_output, paths): graph = Graph(client) if not paths: start, is_range, stop = revision.partition('..') if not is_range: stop = start elif not stop: stop = 'HEAD' commit = client.repo.rev_parse(stop) paths = ( str(client.path / item.a_path) for item in commit.diff(commit.parents or NULL_TREE) ) graph.build(paths=paths, revision=revision, can_be_cwl=no_output) FORMATS[format](graph)
Show logs for a file.
def zGetUpdate(self): status,ret = -998, None ret = self._sendDDEcommand("GetUpdate") if ret != None: status = int(ret) return status
Update the lens
def send_offset_commit_request(self, group, payloads=None, fail_on_error=True, callback=None, group_generation_id=-1, consumer_id=''): group = _coerce_consumer_group(group) encoder = partial(KafkaCodec.encode_offset_commit_request, group=group, group_generation_id=group_generation_id, consumer_id=consumer_id) decoder = KafkaCodec.decode_offset_commit_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
Send a list of OffsetCommitRequests to the Kafka broker for the given consumer group. Args: group (str): The consumer group to which to commit the offsets payloads ([OffsetCommitRequest]): List of topic, partition, offsets to commit. fail_on_error (bool): Whether to raise an exception if a response from the Kafka broker indicates an error callback (callable): a function to call with each of the responses before returning the returned value to the caller. group_generation_id (int): Must currently always be -1 consumer_id (str): Must currently always be empty string Returns: [OffsetCommitResponse]: List of OffsetCommitResponse objects. Will raise KafkaError for failed requests if fail_on_error is True
def resolve_primary_keys_in_schema(sql_tokens: List[str], schema: Dict[str, List[TableColumn]]) -> List[str]: primary_keys_for_tables = {name: max(columns, key=lambda x: x.is_primary_key).name for name, columns in schema.items()} resolved_tokens = [] for i, token in enumerate(sql_tokens): if i > 2: table_name = sql_tokens[i - 2] if token == "ID" and table_name in primary_keys_for_tables.keys(): token = primary_keys_for_tables[table_name] resolved_tokens.append(token) return resolved_tokens
Some examples in the text2sql datasets use ID as a column reference to the column of a table which has a primary key. This causes problems if you are trying to constrain a grammar to only produce the column names directly, because you don't know what ID refers to. So instead of dealing with that, we just replace it.
def grist (self): path = self.path () if path: return 'p' + path else: project_location = self.project_.get ('location') path_components = b2.util.path.split(project_location) location_grist = '!'.join (path_components) if self.action_: ps = self.action_.properties () property_grist = ps.as_path () if property_grist: location_grist = location_grist + '/' + property_grist return 'l' + location_grist
Helper to 'actual_name', above. Compute unique prefix used to distinguish this target from other targets with the same name which create different file.
def findfirst(f, coll): result = list(dropwhile(f, coll)) return result[0] if result else None
Return first occurrence matching f, otherwise None
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): with tf.variable_scope(name, default_name="edge_vectors"): att_adj_vectors_shape = [num_edge_types, depth] adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) adj_vectors = ( tf.get_variable( "adj_vectors", att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**0.5)) adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) att_adj_vectors = tf.matmul( tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), adj_vectors) return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth])
Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors
def open(self, results=False): webbrowser.open(self.results_url if results else self.url)
Open the strawpoll in a browser. Can specify to open the main or results page. :param results: True/False
def _checkType(self, obj, identifier): if not isinstance(obj, basestring): raise TypeError("expected %s '%s' to be a string (was '%s')" % (identifier, obj, type(obj).__name__))
Assert that an object is of type str.
def maskIndex(self): if isinstance(self.mask, bool): return np.full(self.data.shape, self.mask, dtype=np.bool) else: return self.mask
Returns a boolean index with True if the value is masked. Always has the same shape as the maksedArray.data, event if the mask is a single boolan.
def parse_xmlsec_output(output): for line in output.splitlines(): if line == 'OK': return True elif line == 'FAIL': raise XmlsecError(output) raise XmlsecError(output)
Parse the output from xmlsec to try to find out if the command was successfull or not. :param output: The output from Popen :return: A boolean; True if the command was a success otherwise False
def _get_indices_and_signs(hasher, terms): X = _transform_terms(hasher, terms) indices = X.nonzero()[1] signs = X.sum(axis=1).A.ravel() return indices, signs
For each term from ``terms`` return its column index and sign, as assigned by FeatureHasher ``hasher``.
def get_char_range(self, start, end, increment=None): increment = int(increment) if increment else 1 if increment < 0: increment = -increment if increment == 0: increment = 1 inverse = start > end alpha = _nalpha if inverse else _alpha start = alpha.index(start) end = alpha.index(end) if start < end: return (c for c in alpha[start:end + 1:increment]) else: return (c for c in alpha[end:start + 1:increment])
Get a range of alphabetic characters.
def get_len(self, key): data = self.model.get_data() return len(data[key])
Return sequence length
def _get_external_network_dict(self, context, port_db): if port_db.device_owner == DEVICE_OWNER_ROUTER_GW: network = self._core_plugin.get_network(context, port_db.network_id) else: router = self.l3_plugin.get_router(context, port_db.device_id) ext_gw_info = router.get(EXTERNAL_GW_INFO) if not ext_gw_info: return {}, None network = self._core_plugin.get_network(context, ext_gw_info['network_id']) external_network = self.get_ext_net_name(network['name']) transit_net = self.transit_nets_cfg.get( external_network) or self._default_ext_dict transit_net['network_name'] = external_network return transit_net, network
Get external network information Get the information about the external network, so that it can be used to create the hidden port, subnet, and network.
def exists(self): if self.driver == 'sqlite' and not os.path.exists(self.path): return False self.engine try: from sqlalchemy.engine.reflection import Inspector inspector = Inspector.from_engine(self.engine) if 'config' in inspector.get_table_names(schema=self._schema): return True else: return False finally: self.close_connection()
Return True if the database exists, or for Sqlite, which will create the file on the first reference, the file has been initialized with the root config
def add_paths_to_os(self, key=None, update=None): if key is not None: allpaths = key if isinstance(key, list) else [key] else: allpaths = [k for k in self.environ.keys() if 'default' not in k] for key in allpaths: paths = self.get_paths(key) self.check_paths(paths, update=update)
Add the paths in tree environ into the os environ This code goes through the tree environ and checks for existence in the os environ, then adds them Parameters: key (str): The section name to check against / add update (bool): If True, overwrites existing tree environment variables in your local environment. Default is False.
def remove_file_filters(self, file_filters): self.file_filters = util.remove_from_list(self.file_filters, file_filters)
Removes the `file_filters` from the internal state. `file_filters` can be a single object or an iterable.
def getTypesModuleName(self): if self.types_module_name is not None: return self.types_module_name name = GetModuleBaseNameFromWSDL(self._wsdl) if not name: raise WsdlGeneratorError, 'could not determine a service name' if self.types_module_suffix is None: return name return '%s%s' %(name, self.types_module_suffix)
types module name.
def cmp_ast(node1, node2): if type(node1) != type(node2): return False if isinstance(node1, (list, tuple)): if len(node1) != len(node2): return False for left, right in zip(node1, node2): if not cmp_ast(left, right): return False elif isinstance(node1, ast.AST): for field in node1._fields: left = getattr(node1, field, Undedined) right = getattr(node2, field, Undedined) if not cmp_ast(left, right): return False else: return node1 == node2 return True
Compare if two nodes are equal.
def _encode(data): if not isinstance(data, bytes_types): data = six.b(str(data)) return base64.b64encode(data).decode("utf-8")
Encode the given data using base-64 :param data: :return: base-64 encoded string
def ulocalized_time(time, long_format=None, time_only=None, context=None, request=None): time = get_date(context, time) if not time or not isinstance(time, DateTime): return '' if time.second() + time.minute() + time.hour() == 0: long_format = False try: time_str = _ut(time, long_format, time_only, context, 'senaite.core', request) except ValueError: err_msg = traceback.format_exc() + '\n' logger.warn( err_msg + '\n' + "Error converting '{}' time to string in {}." .format(time, context)) time_str = '' return time_str
This function gets ans string as time or a DateTime objects and returns a string with the time formatted :param time: The time to process :type time: str/DateTime :param long_format: If True, return time in ling format :type portal_type: boolean/null :param time_only: If True, only returns time. :type title: boolean/null :param context: The current context :type context: ATContentType :param request: The current request :type request: HTTPRequest object :returns: The formatted date as string :rtype: string
def is_balance_proof_safe_for_onchain_operations( balance_proof: BalanceProofSignedState, ) -> bool: total_amount = balance_proof.transferred_amount + balance_proof.locked_amount return total_amount <= UINT256_MAX
Check if the balance proof would overflow onchain.
def _start_beacon(port=None): global _beacon if _beacon is None: _logger.debug("About to start beacon with port %s", port) try: _beacon = _Beacon(port) except (OSError, socket.error) as exc: if exc.errno == errno.EADDRINUSE: _logger.warn("Beacon already active on this machine") _beacon = _remote_beacon else: raise else: _beacon.start()
Start a beacon thread within this process if no beacon is currently running on this machine. In general this is called automatically when an attempt is made to advertise or discover. It might be convenient, though, to call this function directly if you want to have a process whose only job is to host this beacon so that it doesn't shut down when other processes shut down.
def build_output(self, fout): fout.write('\n'.join([s for s in self.out]))
Squash self.out into string. Join every line in self.out with a new line and write the result to the output file.
def match(self, expression=None, xpath=None, namespaces=None): class MatchObject(Dict): pass def _match(function): self.matches.append( MatchObject(expression=expression, xpath=xpath, function=function, namespaces=namespaces)) def wrapper(self, *args, **params): return function(self, *args, **params) return wrapper return _match
decorator that allows us to match by expression or by xpath for each transformation method
def to_html(self): uri = resource_url( resources_path('img', 'logos', 'inasafe-logo-white.png')) snippet = ( '<div class="branding">' '<img src="%s" title="%s" alt="%s" %s/></div>') % ( uri, 'InaSAFE', 'InaSAFE', self.html_attributes()) return snippet
Render as html.
def getTaskTypes(self): types = [ ('Calibration', safe_unicode(_('Calibration')).encode('utf-8')), ('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8')), ('Preventive', safe_unicode(_('Preventive')).encode('utf-8')), ('Repair', safe_unicode(_('Repair')).encode('utf-8')), ('Validation', safe_unicode(_('Validation')).encode('utf-8')), ] return DisplayList(types)
Return the current list of task types
def get_element_child_info(doc, attr): props = [] for child in doc: if child.tag not in ["author_signature", "parent_author_signature"]: props.append(getattr(child, attr)) return props
Get information from child elements of this elementas a list since order is important. Don't include signature tags. :param doc: XML element :param attr: Attribute to get from the elements, for example "tag" or "text".
def version(self): cmd = b"version\r\n" results = self._misc_cmd([cmd], b'version', False) before, _, after = results[0].partition(b' ') if before != b'VERSION': raise MemcacheUnknownError( "Received unexpected response: %s" % results[0]) return after
The memcached "version" command. Returns: A string of the memcached version.