positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def get_scale(self, gg): """ Create a scale """ # This method does some introspection to save users from # scale mismatch error. This could happen when the # aesthetic is mapped to a categorical but the limits # are not provided in categorical form. We only handle # the case where the mapping uses an expression to # conver to categorical e.g `aes(color='factor(cyl)')`. # However if `'cyl'` column is a categorical and the # mapping is `aes(color='cyl')`, that will result in # an error. If later case proves common enough then we # could inspect the data and be clever based on that too!! ae = self.aesthetic series = self.limits_series ae_values = [] # Look through all the mappings for this aesthetic, # if we detect any factor stuff then we convert the # limits data to categorical so that the right scale # can be choosen. This should take care of the most # common use cases. for layer in gg.layers: with suppress(KeyError): value = layer.mapping[ae] if isinstance(value, str): ae_values.append(value) for value in ae_values: if ('factor(' in value or 'Categorical(' in value): series = pd.Categorical(self.limits_series) break return make_scale(self.aesthetic, series, limits=self.limits, trans=self.trans)
Create a scale
def get_num_gpu(): """ Returns: int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system. """ def warn_return(ret, message): try: import tensorflow as tf except ImportError: return ret built_with_cuda = tf.test.is_built_with_cuda() if not built_with_cuda and ret > 0: logger.warn(message + "But TensorFlow was not built with CUDA support and could not use GPUs!") return ret env = os.environ.get('CUDA_VISIBLE_DEVICES', None) if env: return warn_return(len(env.split(',')), "Found non-empty CUDA_VISIBLE_DEVICES. ") output, code = subproc_call("nvidia-smi -L", timeout=5) if code == 0: output = output.decode('utf-8') return warn_return(len(output.strip().split('\n')), "Found nvidia-smi. ") try: # Use NVML to query device properties with NVMLContext() as ctx: return warn_return(ctx.num_devices(), "NVML found nvidia devices. ") except Exception: # Fallback logger.info("Loading local devices by TensorFlow ...") try: import tensorflow as tf # available since TF 1.14 gpu_devices = tf.config.experimental.list_physical_devices('GPU') except AttributeError: from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() # Note this will initialize all GPUs and therefore has side effect # https://github.com/tensorflow/tensorflow/issues/8136 gpu_devices = [x.name for x in local_device_protos if x.device_type == 'GPU'] return len(gpu_devices)
Returns: int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
def _query(cls, *args, **kwds): """Create a Query object for this class. Args: distinct: Optional bool, short hand for group_by = projection. *args: Used to apply an initial filter **kwds: are passed to the Query() constructor. Returns: A Query object. """ # Validating distinct. if 'distinct' in kwds: if 'group_by' in kwds: raise TypeError( 'cannot use distinct= and group_by= at the same time') projection = kwds.get('projection') if not projection: raise TypeError( 'cannot use distinct= without projection=') if kwds.pop('distinct'): kwds['group_by'] = projection # TODO: Disallow non-empty args and filter=. from .query import Query # Import late to avoid circular imports. qry = Query(kind=cls._get_kind(), **kwds) qry = qry.filter(*cls._default_filters()) qry = qry.filter(*args) return qry
Create a Query object for this class. Args: distinct: Optional bool, short hand for group_by = projection. *args: Used to apply an initial filter **kwds: are passed to the Query() constructor. Returns: A Query object.
def override_default_templates(self): """ Override the default emails already defined by other apps """ if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']: dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR'] for file_ in os.listdir(dir_): if file_.endswith(('.html', 'txt')): self.overrides[file_] = dir_
Override the default emails already defined by other apps
def load_config(args, config_path=".inlineplz.yml"): """Load inline-plz config from yaml config file with reasonable defaults.""" config = {} try: with open(config_path) as configfile: config = yaml.safe_load(configfile) or {} if config: print("Loaded config from {}".format(config_path)) pprint.pprint(config) except (IOError, OSError, yaml.parser.ParserError): traceback.print_exc() args = update_from_config(args, config) args.ignore_paths = args.__dict__.get("ignore_paths") or [ "node_modules", ".git", ".tox", "godeps", "vendor", "site-packages", "venv", ".env", "spec", "migrate", "bin", "fixtures", "cassettes", ".cache", ".idea", ".pytest_cache", "__pycache__", "dist", ] if config_path != ".inlineplz.yml": return args # fall back to config_dir inlineplz yaml if we didn't find one locally if args.config_dir and not config: new_config_path = os.path.join(args.config_dir, config_path) if os.path.exists(new_config_path): return load_config(args, new_config_path) return args
Load inline-plz config from yaml config file with reasonable defaults.
def get_appliances(self, location_id): """Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data """ url = "https://api.neur.io/v1/appliances" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data
def cancelAllPendingResults( self ): """Cancel all pending results. Note that this only affects the notebook's record, not any job running in a lab.""" for k in self._results.keys(): rs = self._results[k] self._results[k] = [ j for j in rs if isinstance(j, dict) ] self._pending = dict()
Cancel all pending results. Note that this only affects the notebook's record, not any job running in a lab.
def _DownloadScript(self, url, dest_dir): """Download the contents of the URL to the destination. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script. """ # Check for the preferred Google Storage URL format: # gs://<bucket>/<object> if url.startswith(r'gs://'): # Convert the string into a standard URL. url = re.sub('^gs://', 'https://storage.googleapis.com/', url) return self._DownloadAuthUrl(url, dest_dir) header = r'http[s]?://' domain = r'storage\.googleapis\.com' # Many of the Google Storage URLs are supported below. # It is prefered that customers specify their object using # its gs://<bucket>/<object> url. bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])' # Accept any non-empty string that doesn't contain a wildcard character obj = r'(?P<obj>[^\*\?]+)' # Check for the Google Storage URLs: # http://<bucket>.storage.googleapis.com/<object> # https://<bucket>.storage.googleapis.com/<object> gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) # Check for the other possible Google Storage URLs: # http://storage.googleapis.com/<bucket>/<object> # https://storage.googleapis.com/<bucket>/<object> # # The following are deprecated but checked: # http://commondatastorage.googleapis.com/<bucket>/<object> # https://commondatastorage.googleapis.com/<bucket>/<object> gs_regex = re.compile( r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj)) match = gs_regex.match(url) if match: return self._DownloadAuthUrl(url, dest_dir) # Unauthenticated download of the object. return self._DownloadUrl(url, dest_dir)
Download the contents of the URL to the destination. Args: url: string, the URL to download. dest_dir: string, the path to a directory for storing metadata scripts. Returns: string, the path to the file storing the metadata script.
def automaster(config='/etc/auto_salt'): ''' List the contents of the auto master CLI Example: .. code-block:: bash salt '*' mount.automaster ''' ret = {} if not os.path.isfile(config): return ret with salt.utils.files.fopen(config) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) if line.startswith('#'): # Commented continue if not line.strip(): # Blank line continue comps = line.split() if len(comps) != 3: # Invalid entry continue prefix = "/.." name = comps[0].replace(prefix, "") device_fmt = comps[2].split(":") opts = comps[1].split(',') ret[name] = {'device': device_fmt[1], 'fstype': opts[0], 'opts': opts[1:]} return ret
List the contents of the auto master CLI Example: .. code-block:: bash salt '*' mount.automaster
def single_download_photos(photos): """Use single process to download photos :param photos: The photos to be downloaded :type photos: list of dicts """ global counter counter = len(photos) for photo in photos: download_photo(photo)
Use single process to download photos :param photos: The photos to be downloaded :type photos: list of dicts
def cci(self, n, array=False): """CCI指标""" result = talib.CCI(self.high, self.low, self.close, n) if array: return result return result[-1]
CCI指标
def update(self, other): ''' Add all pileup elements from other into self. ''' assert self.locus == other.locus self.elements.update(other.elements)
Add all pileup elements from other into self.
def print_params(self, allpars=False, loglevel=logging.INFO): """Print information about the model parameters (values, errors, bounds, scale).""" pars = self.get_params() o = '\n' o += '%4s %-20s%10s%10s%10s%10s%10s%5s\n' % ( 'idx', 'parname', 'value', 'error', 'min', 'max', 'scale', 'free') o += '-' * 80 + '\n' src_pars = collections.OrderedDict() for p in pars: src_pars.setdefault(p['src_name'], []) src_pars[p['src_name']] += [p] free_sources = [] for k, v in src_pars.items(): for p in v: if not p['free']: continue free_sources += [k] for k, v in src_pars.items(): if not allpars and k not in free_sources: continue o += '%s\n' % k for p in v: o += '%4i %-20.19s' % (p['idx'], p['par_name']) o += '%10.3g%10.3g' % (p['value'], p['error']) o += '%10.3g%10.3g%10.3g' % (p['min'], p['max'], p['scale']) if p['free']: o += ' *' else: o += ' ' o += '\n' self.logger.log(loglevel, o)
Print information about the model parameters (values, errors, bounds, scale).
def send(self, obj): """Send object""" buf = io.BytesIO() ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj) self.send_bytes(buf.getvalue())
Send object
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False, size_for_gist=MAX_URL_LEN): """ Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created. """ contents = make_geojson(contents) if len(contents) <= size_for_gist and not force_gist: url = data_url(contents, domain) else: gist = _make_gist(contents) url = gist_url(gist.id, domain) return url
Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created.
def _transform(self, data, transform, step_size): ''' Transform the data. If the transform is not supported by this series, returns the data unaltered. ''' if transform=='mean': total = sum( k*v for k,v in data.items() ) count = sum( data.values() ) data = float(total)/float(count) if count>0 else 0 elif transform=='count': data = sum(data.values()) elif transform=='min': data = min(data.keys() or [0]) elif transform=='max': data = max(data.keys() or [0]) elif transform=='sum': data = sum( k*v for k,v in data.items() ) elif transform=='rate': data = { k:v/float(step_size) for k,v in data.items() } elif callable(transform): data = transform(data, step_size) return data
Transform the data. If the transform is not supported by this series, returns the data unaltered.
def reset_next_ids(classes): """ For each class in the list, if the .next_id attribute is not None (meaning the table has an ID generator associated with it), set .next_id to 0. This has the effect of reseting the ID generators, and is useful in applications that process multiple documents and add new rows to tables in those documents. Calling this function between documents prevents new row IDs from growing continuously from document to document. There is no need to do this, it's purpose is merely aesthetic, but it can be confusing to open a document and find process ID 300 in the process table and wonder what happened to the other 299 processes. Example: >>> import lsctables >>> reset_next_ids(lsctables.TableByName.values()) """ for cls in classes: if cls.next_id is not None: cls.set_next_id(type(cls.next_id)(0))
For each class in the list, if the .next_id attribute is not None (meaning the table has an ID generator associated with it), set .next_id to 0. This has the effect of reseting the ID generators, and is useful in applications that process multiple documents and add new rows to tables in those documents. Calling this function between documents prevents new row IDs from growing continuously from document to document. There is no need to do this, it's purpose is merely aesthetic, but it can be confusing to open a document and find process ID 300 in the process table and wonder what happened to the other 299 processes. Example: >>> import lsctables >>> reset_next_ids(lsctables.TableByName.values())
def lin_sim_calc(goid1, goid2, sim_r, termcnts): ''' Computes Lin's similarity measure using pre-calculated Resnik's similarities. ''' if sim_r is not None: info = get_info_content(goid1, termcnts) + get_info_content(goid2, termcnts) if info != 0: return (2*sim_r)/info
Computes Lin's similarity measure using pre-calculated Resnik's similarities.
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`.""" del weight_collections text_batch = tf.reshape(inputs.get(self), shape=[-1]) m = module.Module(self.module_spec, trainable=self.trainable and trainable) return m(text_batch)
Returns a `Tensor`.
def bexpcube_moon(self, **kwargs): """ return the name of a binned exposure cube file """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.bexpcubemoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
return the name of a binned exposure cube file
def is_android_raw(raw): """ Returns a string that describes the type of file, for common Android specific formats """ val = None # We do not check for META-INF/MANIFEST.MF, # as you also want to analyze unsigned APKs... # AndroidManifest.xml should be in every APK. # classes.dex and resources.arsc are not required! # if raw[0:2] == b"PK" and b'META-INF/MANIFEST.MF' in raw: # TODO this check might be still invalid. A ZIP file with stored APK inside would match as well. # probably it would be better to rewrite this and add more sanity checks. if raw[0:2] == b"PK" and b'AndroidManifest.xml' in raw: val = "APK" elif raw[0:3] == b"dex": val = "DEX" elif raw[0:3] == b"dey": val = "DEY" elif raw[0:4] == b"\x03\x00\x08\x00" or raw[0:4] == b"\x00\x00\x08\x00": val = "AXML" elif raw[0:4] == b"\x02\x00\x0C\x00": val = "ARSC" return val
Returns a string that describes the type of file, for common Android specific formats
def logging_raslog_message_msgId_msgId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras") raslog = ET.SubElement(logging, "raslog") message = ET.SubElement(raslog, "message") msgId = ET.SubElement(message, "msgId") msgId = ET.SubElement(msgId, "msgId") msgId.text = kwargs.pop('msgId') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def chain_to_quadratic(chain, target_adjacency, chain_strength): """Determine the quadratic biases that induce the given chain. Args: chain (iterable): The variables that make up a chain. target_adjacency (dict/:class:`networkx.Graph`): Should be a dict of the form {s: Ns, ...} where s is a variable in the target graph and Ns is the set of neighbours of s. chain_strength (float): The magnitude of the quadratic bias that should be used to create chains. Returns: dict[edge, float]: The quadratic biases that induce the given chain. Raises: ValueError: If the variables in chain do not form a connected subgraph of target. Examples: >>> chain = {1, 2} >>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}} >>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1) {(1, 2): -1} """ quadratic = {} # we will be adding the edges that make the chain here # do a breadth first search seen = set() try: next_level = {next(iter(chain))} except StopIteration: raise ValueError("chain must have at least one variable") while next_level: this_level = next_level next_level = set() for v in this_level: if v not in seen: seen.add(v) for u in target_adjacency[v]: if u not in chain: continue next_level.add(u) if u != v and (u, v) not in quadratic: quadratic[(v, u)] = -chain_strength if len(chain) != len(seen): raise ValueError('{} is not a connected chain'.format(chain)) return quadratic
Determine the quadratic biases that induce the given chain. Args: chain (iterable): The variables that make up a chain. target_adjacency (dict/:class:`networkx.Graph`): Should be a dict of the form {s: Ns, ...} where s is a variable in the target graph and Ns is the set of neighbours of s. chain_strength (float): The magnitude of the quadratic bias that should be used to create chains. Returns: dict[edge, float]: The quadratic biases that induce the given chain. Raises: ValueError: If the variables in chain do not form a connected subgraph of target. Examples: >>> chain = {1, 2} >>> target_adjacency = {0: {1, 2}, 1: {0, 2}, 2: {0, 1}} >>> dimod.embedding.chain_to_quadratic(chain, target_adjacency, 1) {(1, 2): -1}
def norm_score(self): """Return the normalized score. Equals 1.0 for a z-score of 0, falling to 0.0 for extremely positive or negative values. """ cdf = (1.0 + math.erf(self.score / math.sqrt(2.0))) / 2.0 return 1 - 2*math.fabs(0.5 - cdf)
Return the normalized score. Equals 1.0 for a z-score of 0, falling to 0.0 for extremely positive or negative values.
def sample_image(d, data, u, v, w, i=-1, verbose=0, imager='xy', wres=100): """ Samples one integration and returns image i is integration to image. Default is mid int. """ if i == -1: i = len(data)/2 if imager == 'xy': image = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], d['npixx'], d['npixy'], d['uvres'], verbose=verbose) elif imager == 'w': npix = max(d['npixx'], d['npixy']) bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], ksize=21, oversample=1) image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], bls, uvkers, verbose=verbose) # bls, lmkers = rtlib.genlmkernels(w, wres, npix, d['uvres']) # image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], [bls[0]], [lmkers[0]], verbose=verbose) return image
Samples one integration and returns image i is integration to image. Default is mid int.
def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None): """ Answering questions. Provide or update an answer to one or more QuizQuestions. """ path = {} data = {} params = {} # REQUIRED - PATH - quiz_submission_id """ID""" path["quiz_submission_id"] = quiz_submission_id # REQUIRED - attempt """The attempt number of the quiz submission being taken. Note that this must be the latest attempt index, as questions for earlier attempts can not be modified.""" data["attempt"] = attempt # REQUIRED - validation_token """The unique validation token you received when the Quiz Submission was created.""" data["validation_token"] = validation_token # OPTIONAL - access_code """Access code for the Quiz, if any.""" if access_code is not None: data["access_code"] = access_code # OPTIONAL - quiz_questions """Set of question IDs and the answer value. See {Appendix: Question Answer Formats} for the accepted answer formats for each question type.""" if quiz_questions is not None: data["quiz_questions"] = quiz_questions self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True)
Answering questions. Provide or update an answer to one or more QuizQuestions.
def generate_substitution_structures(self, atom, target_species=[], sub_both_sides=False, range_tol=1e-2, dist_from_surf=0): """ Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface """ # Get symmetrized structure in case we want to substitue both sides sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure() # Define a function for substituting a site def substitute(site, i): slab = self.slab.copy() props = self.slab.site_properties if sub_both_sides: # Find an equivalent site on the other surface eq_indices = [indices for indices in sym_slab.equivalent_indices if i in indices][0] for ii in eq_indices: if "%.6f" % (sym_slab[ii].frac_coords[2]) != \ "%.6f" % (site.frac_coords[2]): props["surface_properties"][ii] = "substitute" slab.replace(ii, atom) break props["surface_properties"][i] = "substitute" slab.replace(i, atom) slab.add_site_property("surface_properties", props["surface_properties"]) return slab # Get all possible substitution sites substituted_slabs = [] # Sort sites so that we can define a range relative to the position of the # surface atoms, i.e. search for sites above (below) the bottom (top) surface sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2]) if sorted_sites[0].surface_properties == "surface": d = sorted_sites[0].frac_coords[2] + dist_from_surf else: d = sorted_sites[-1].frac_coords[2] - dist_from_surf for i, site in enumerate(sym_slab): if d - range_tol < site.frac_coords[2] < d + range_tol: if target_species and site.species_string in target_species: substituted_slabs.append(substitute(site, i)) elif not target_species: substituted_slabs.append(substitute(site, i)) matcher = StructureMatcher() return [s[0] for s in matcher.group_structures(substituted_slabs)]
Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface
def wrap_object(func, before, after): ''' before/after call will encapsulate callable object ''' def _wrapper(*args, **kwargs): before() try: return func(*args, **kwargs) except Exception as e: raise e finally: after() return _wrapper
before/after call will encapsulate callable object
def gather_candidates(self): """Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates. """ async def slave_task(addr): r_manager = await self.env.connect(addr) return await r_manager.get_candidates() if self._single_env: self._candidates = self.env.candidates else: mgrs = self.get_managers() tasks = create_tasks(slave_task, mgrs) self._candidates = run(tasks)
Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates.
def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ return ''.join( [item[0].upper() + item[1:].lower() for item in _word_beginning_split_re.split(soft_unicode(s)) if item])
Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase.
def sec_to_public_pair(sec, generator=None, strict=True): """Convert a public key in sec binary format to a public pair.""" byte_count = (generator.p().bit_length() + 7) >> 3 if generator else (len(sec) - 1) x = from_bytes_32(sec[1:1 + byte_count]) sec0 = sec[:1] if len(sec) == 1 + byte_count * 2: isok = sec0 == b'\4' if not strict: isok = isok or (sec0 in [b'\6', b'\7']) if isok: y = from_bytes_32(sec[1+byte_count:1+2*byte_count]) return (x, y) elif len(sec) == 1 + byte_count: if not strict or (sec0 in (b'\2', b'\3')): is_y_odd = (sec0 != b'\2') return generator.points_for_x(x)[is_y_odd] raise EncodingError("bad sec encoding for public key")
Convert a public key in sec binary format to a public pair.
def hypot(x, y, context=None): """ Return the Euclidean norm of x and y, i.e., the square root of the sum of the squares of x and y. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_hypot, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
Return the Euclidean norm of x and y, i.e., the square root of the sum of the squares of x and y.
def _init_az_api(self): """ Initialise client objects for talking to Azure API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``. """ with self.__lock: if self._resource_client is None: log.debug("Making Azure `ServicePrincipalcredentials` object" " with tenant=%r, client_id=%r, secret=%r ...", self.tenant_id, self.client_id, ('<redacted>' if self.secret else None)) credentials = ServicePrincipalCredentials( tenant=self.tenant_id, client_id=self.client_id, secret=self.secret, ) log.debug("Initializing Azure `ComputeManagementclient` ...") self._compute_client = ComputeManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `NetworkManagementclient` ...") self._network_client = NetworkManagementClient(credentials, self.subscription_id) log.debug("Initializing Azure `ResourceManagementclient` ...") self._resource_client = ResourceManagementClient(credentials, self.subscription_id) log.info("Azure API clients initialized.")
Initialise client objects for talking to Azure API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``.
def _exists(fs, path): """ Check that the given path exists on the filesystem. Note that unlike `os.path.exists`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up. """ try: fs.stat(path) except (exceptions.FileNotFound, exceptions.NotADirectory): return False return True
Check that the given path exists on the filesystem. Note that unlike `os.path.exists`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up.
def _prepareSubForm(self, liveForm): """ Utility for turning liveforms into subforms, and compacting them as necessary. @param liveForm: a liveform. @type liveForm: L{LiveForm} @return: a sub form. @rtype: L{LiveForm} """ liveForm = liveForm.asSubForm(self.name) # XXX Why did this work??? # if we are compact, tell the liveform so it can tell its parameters # also if self._parameterIsCompact: liveForm.compact() return liveForm
Utility for turning liveforms into subforms, and compacting them as necessary. @param liveForm: a liveform. @type liveForm: L{LiveForm} @return: a sub form. @rtype: L{LiveForm}
def upcoming(self, chamber, congress=CURRENT_CONGRESS): "Shortcut for upcoming bills" path = "bills/upcoming/{chamber}.json".format(chamber=chamber) return self.fetch(path)
Shortcut for upcoming bills
def img2img_transformer_base_tpu(): """Hparams for training img2img_transformer on tpu.""" hparams = img2img_transformer_base() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 8 hparams.num_encoder_layers = 4 hparams.shared_embedding_and_softmax_weights = False return hparams
Hparams for training img2img_transformer on tpu.
def disable(name, **kwargs): ''' Disable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.disable <service name> <runlevels=single-runlevel> salt '*' service.disable <service name> <runlevels=[runlevel1,runlevel2]> ''' levels = [] if 'runlevels' in kwargs: requested_levels = set(kwargs['runlevels'] if isinstance(kwargs['runlevels'], list) else [kwargs['runlevels']]) levels = _disable_delta(name, requested_levels) if not levels: return True cmd = _enable_disable_cmd(name, 'delete', levels) return not _ret_code(cmd)
Disable the named service to start at boot CLI Example: .. code-block:: bash salt '*' service.disable <service name> <runlevels=single-runlevel> salt '*' service.disable <service name> <runlevels=[runlevel1,runlevel2]>
def has_control_chars(i): """ Returns true if the passed token is an unknown string or a constant string having control chars (inverse, etc """ if not hasattr(i, 'type_'): return False if i.type_ != Type.string: return False if i.token in ('VAR', 'PARAMDECL'): return True # We don't know what an alphanumeric variable will hold if i.token == 'STRING': for c in i.value: if 15 < ord(c) < 22: # is it an attr char? return True return False for j in i.children: if Translator.has_control_chars(j): return True return False
Returns true if the passed token is an unknown string or a constant string having control chars (inverse, etc
def get_sort_order(molecules): """ Count up the total number of scores whose values are positve and negative. If a greater number are negative, then sort in ascending order (e.g. for binding energy estimates) Otherwise, sort in descending order (e.g. for similarity values) """ neg_count = 0 pos_count = 0 for index in range(len(molecules)): scoreList = molecules[index].GetProp('scores') for element in scoreList: if float(element) > 0: pos_count += 1 elif float(element) < 0: neg_count += 1 if pos_count > neg_count: sort_order = 'dsc' else: sort_order = 'asc' return sort_order
Count up the total number of scores whose values are positve and negative. If a greater number are negative, then sort in ascending order (e.g. for binding energy estimates) Otherwise, sort in descending order (e.g. for similarity values)
def get_marker_size(self): """ Gets the size of a message marker. :return: QSize """ h = self.get_marker_height() if h < 1: h = 1 return QtCore.QSize(self.sizeHint().width() / 2, h)
Gets the size of a message marker. :return: QSize
def find_hal(self, atoms): """Look for halogen bond acceptors (Y-{O|P|N|S}, with Y=C,P,S)""" data = namedtuple('hal_acceptor', 'o o_orig_idx y y_orig_idx') a_set = [] # All oxygens, nitrogen, sulfurs with neighboring carbon, phosphor, nitrogen or sulfur for a in [at for at in atoms if at.atomicnum in [8, 7, 16]]: n_atoms = [na for na in pybel.ob.OBAtomAtomIter(a.OBAtom) if na.GetAtomicNum() in [6, 7, 15, 16]] if len(n_atoms) == 1: # Proximal atom o_orig_idx = self.Mapper.mapid(a.idx, mtype=self.mtype, bsid=self.bsid) y_orig_idx = self.Mapper.mapid(n_atoms[0].GetIdx(), mtype=self.mtype, bsid=self.bsid) a_set.append(data(o=a, o_orig_idx=o_orig_idx, y=pybel.Atom(n_atoms[0]), y_orig_idx=y_orig_idx)) return a_set
Look for halogen bond acceptors (Y-{O|P|N|S}, with Y=C,P,S)
def status(self, status_id, raise_exception_on_failure=False): """Return the status of the generation job.""" query = {"output": "json", "user_credentials": self.api_key} resp = requests.get( "%sstatus/%s" % (self._url, status_id), params=query, timeout=self._timeout ) if raise_exception_on_failure and resp.status_code != 200: raise DocumentStatusFailure(resp.content, resp.status_code) if resp.status_code == 200: as_json = json.loads(resp.content) if as_json["status"] == "completed": as_json["download_key"] = _get_download_key(as_json["download_url"]) return as_json return resp
Return the status of the generation job.
def _divide_widths(self, cli, width): """ Return the widths for all columns. Or None when there is not enough space. """ if not self.children: return [] # Calculate widths. given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None def get_dimension_for_child(c, index): if given_dimensions and given_dimensions[index] is not None: return given_dimensions[index] else: return c.preferred_width(cli, width) dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)] # Sum dimensions sum_dimensions = sum_layout_dimensions(dimensions) # If there is not enough space for both. # Don't do anything. if sum_dimensions.min > width: return # Find optimal sizes. (Start with minimal size, increase until we cover # the whole height.) sizes = [d.min for d in dimensions] child_generator = take_using_weights( items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]) i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.preferred): # Increase until we meet at least the 'preferred' size. if sizes[i] < dimensions[i].preferred: sizes[i] += 1 i = next(child_generator) while sum(sizes) < min(width, sum_dimensions.max): # Increase until we use all the available space. if sizes[i] < dimensions[i].max: sizes[i] += 1 i = next(child_generator) return sizes
Return the widths for all columns. Or None when there is not enough space.
def check_aggregate(self, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): """Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()` """ # compute aggregate from components, return None if no components df_components = self.aggregate(variable, components) if df_components is None: return # filter and groupby data, use `pd.Series.align` for matching index rows = self._apply_filters(variable=variable) df_variable, df_components = ( _aggregate(self.data[rows], 'variable').align(df_components) ) # use `np.isclose` for checking match diff = df_variable[~np.isclose(df_variable, multiplier * df_components, **kwargs)] if len(diff): msg = '`{}` - {} of {} rows are not aggregates of components' logger().info(msg.format(variable, len(diff), len(df_variable))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3, 4])) return IamDataFrame(diff, variable=variable).timeseries()
Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()`
def send_mail(self, subject, to, template, **template_ctx): """ Utility method to send mail with the `mail` template context. """ if not self.mail: from warnings import warn warn('Attempting to send mail without the mail bundle installed! ' 'Please install it, or fix your configuration.') return self.mail.send(subject, to, template, **dict( **self.security.run_ctx_processor('mail'), **template_ctx))
Utility method to send mail with the `mail` template context.
def error_view(template_dir=None): """ Create the Error view Must be instantiated import error_view ErrorView = error_view() :param template_dir: The directory containing the view pages :return: """ if not template_dir: template_dir = "Pylot/Error" template_page = "%s/index.html" % template_dir class Error(Pylot): """ Error Views """ @classmethod def register(cls, app, **kwargs): super(cls, cls).register(app, **kwargs) @app.errorhandler(400) def error_400(error): return cls.index(error, 400) @app.errorhandler(401) def error_401(error): return cls.index(error, 401) @app.errorhandler(403) def error_403(error): return cls.index(error, 403) @app.errorhandler(404) def error_404(error): return cls.index(error, 404) @app.errorhandler(500) def error_500(error): return cls.index(error, 500) @app.errorhandler(503) def error_503(error): return cls.index(error, 503) @classmethod def index(cls, error, code): cls.meta_(title="Error %s" % code) return cls.render(error=error, view_template=template_page), code return Error
Create the Error view Must be instantiated import error_view ErrorView = error_view() :param template_dir: The directory containing the view pages :return:
def fractal_dimension(image): '''Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float ''' pixels = [] for i in range(image.shape[0]): for j in range(image.shape[1]): if image[i, j] > 0: pixels.append((i, j)) lx = image.shape[1] ly = image.shape[0] pixels = np.array(pixels) if len(pixels) < 2: return 0 scales = np.logspace(1, 4, num=20, endpoint=False, base=2) Ns = [] for scale in scales: H, edges = np.histogramdd(pixels, bins=(np.arange(0, lx, scale), np.arange(0, ly, scale))) H_sum = np.sum(H > 0) if H_sum == 0: H_sum = 1 Ns.append(H_sum) coeffs = np.polyfit(np.log(scales), np.log(Ns), 1) hausdorff_dim = -coeffs[0] return hausdorff_dim
Estimates the fractal dimension of an image with box counting. Counts pixels with value 0 as empty and everything else as non-empty. Input image has to be grayscale. See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_. :param image: numpy.ndarray :returns: estimation of fractal dimension :rtype: float
def _verify_params(self): """Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used. """ reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params) if reserved_in_use: raise ValueError("Using a reserved parameter", reserved_in_use)
Verifies the parameters don't use any reserved parameter. Raises: ValueError: If a reserved parameter is used.
def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs): """Create iterator objects for splits of the SST dataset. Arguments: batch_size: Batch_size device: Device to create batches on. Use - 1 for CPU and None for the currently active GPU device. root: The root directory that the dataset's zip archive will be expanded into; therefore the directory in whose trees subdirectory the data files will be stored. vectors: one of the available pretrained vectors or a list with each element one of the available pretrained vectors (see Vocab.load_vectors) Remaining keyword arguments: Passed to the splits method. """ TEXT = data.Field() LABEL = data.Field(sequential=False) train, val, test = cls.splits(TEXT, LABEL, root=root, **kwargs) TEXT.build_vocab(train, vectors=vectors) LABEL.build_vocab(train) return data.BucketIterator.splits( (train, val, test), batch_size=batch_size, device=device)
Create iterator objects for splits of the SST dataset. Arguments: batch_size: Batch_size device: Device to create batches on. Use - 1 for CPU and None for the currently active GPU device. root: The root directory that the dataset's zip archive will be expanded into; therefore the directory in whose trees subdirectory the data files will be stored. vectors: one of the available pretrained vectors or a list with each element one of the available pretrained vectors (see Vocab.load_vectors) Remaining keyword arguments: Passed to the splits method.
def read_raw(self, params=None): """Get information about the current entity. Make an HTTP GET call to ``self.path('self')``. Return the response. :return: A ``requests.response`` object. """ path_type = self._meta.get('read_type', 'self') return client.get( self.path(path_type), params=params, **self._server_config.get_client_kwargs() )
Get information about the current entity. Make an HTTP GET call to ``self.path('self')``. Return the response. :return: A ``requests.response`` object.
def save_vlen(self, key, data): """ Save a sequence of variable-length arrays :param key: name of the dataset :param data: data to store as a list of arrays """ shape = (None,) + data[0].shape[:-1] try: dset = self[key] except KeyError: vdt = h5py.special_dtype(vlen=data[0].dtype) dset = create(self, key, vdt, shape, fillvalue=None) nbytes = dset.attrs.get('nbytes', 0) totlen = dset.attrs.get('totlen', 0) for i, val in enumerate(data): nbytes += val.nbytes totlen += len(val) length = len(dset) dset.resize((length + len(data),) + shape[1:]) for i, arr in enumerate(data): dset[length + i] = arr dset.attrs['nbytes'] = nbytes dset.attrs['totlen'] = totlen
Save a sequence of variable-length arrays :param key: name of the dataset :param data: data to store as a list of arrays
def cli(ctx, feature_id, start, end, organism="", sequence=""): """Set the boundaries of a genomic feature Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_boundaries(feature_id, start, end, organism=organism, sequence=sequence)
Set the boundaries of a genomic feature Output: A standard apollo feature dictionary ({"features": [{...}]})
def get_txn_outputs(raw_tx_hex, output_addr_list, coin_symbol): ''' Used to verify a transaction hex does what's expected of it. Must supply a list of output addresses so that the library can try to convert from script to address using both pubkey and script. Returns a list of the following form: [{'value': 12345, 'address': '1abc...'}, ...] Uses @vbuterin's decoding methods. ''' # Defensive checks: err_msg = 'Library not able to parse %s transactions' % coin_symbol assert lib_can_deserialize_cs(coin_symbol), err_msg assert isinstance(output_addr_list, (list, tuple)) for output_addr in output_addr_list: assert is_valid_address(output_addr), output_addr output_addr_set = set(output_addr_list) # speed optimization outputs = [] deserialized_tx = deserialize(str(raw_tx_hex)) for out in deserialized_tx.get('outs', []): output = {'value': out['value']} # determine if the address is a pubkey address, script address, or op_return pubkey_addr = script_to_address(out['script'], vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_pubkey']) script_addr = script_to_address(out['script'], vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_script']) nulldata = out['script'] if out['script'][0:2] == '6a' else None if pubkey_addr in output_addr_set: address = pubkey_addr output['address'] = address elif script_addr in output_addr_set: address = script_addr output['address'] = address elif nulldata: output['script'] = nulldata output['script_type'] = 'null-data' else: raise Exception('Script %s Does Not Contain a Valid Output Address: %s' % ( out['script'], output_addr_set, )) outputs.append(output) return outputs
Used to verify a transaction hex does what's expected of it. Must supply a list of output addresses so that the library can try to convert from script to address using both pubkey and script. Returns a list of the following form: [{'value': 12345, 'address': '1abc...'}, ...] Uses @vbuterin's decoding methods.
def _GetSignatureMatchParserNames(self, file_object): """Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures. """ parser_names = [] scan_state = pysigscan.scan_state() self._file_scanner.scan_file_object(scan_state, file_object) for scan_result in iter(scan_state.scan_results): format_specification = ( self._formats_with_signatures.GetSpecificationBySignature( scan_result.identifier)) if format_specification.identifier not in parser_names: parser_names.append(format_specification.identifier) return parser_names
Determines if a file-like object matches one of the known signatures. Args: file_object (file): file-like object whose contents will be checked for known signatures. Returns: list[str]: parser names for which the contents of the file-like object matches their known signatures.
def deprecated_for(replace_message): """ Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ... """ def decorator(to_wrap): @functools.wraps(to_wrap) def wrapper(*args, **kwargs): warnings.warn( "%s is deprecated in favor of %s" % (to_wrap.__name__, replace_message), category=DeprecationWarning, stacklevel=2) return to_wrap(*args, **kwargs) return wrapper return decorator
Decorate a deprecated function, with info about what to use instead, like: @deprecated_for("toBytes()") def toAscii(arg): ...
def read_word_data(self, i2c_addr, register, force=None): """ Read a single word (2 bytes) from a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to read :type register: int :param force: :type force: Boolean :return: 2-byte word :rtype: int """ self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA ) ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.word
Read a single word (2 bytes) from a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to read :type register: int :param force: :type force: Boolean :return: 2-byte word :rtype: int
def device_selected(self, index): """Handler for selecting a device from the list in the UI""" device = self.devicelist_model.itemFromIndex(index) print(device.device.addr) self.btnConnect.setEnabled(True)
Handler for selecting a device from the list in the UI
def process_error_labels(value): """ Process the error labels of a dependent variable 'value' to ensure uniqueness. """ observed_error_labels = {} for error in value.get('errors', []): label = error.get('label', 'error') if label not in observed_error_labels: observed_error_labels[label] = 0 observed_error_labels[label] += 1 if observed_error_labels[label] > 1: error['label'] = label + '_' + str(observed_error_labels[label]) # append "_1" to first error label that has a duplicate if observed_error_labels[label] == 2: for error1 in value.get('errors', []): error1_label = error1.get('label', 'error') if error1_label == label: error1['label'] = label + "_1" break
Process the error labels of a dependent variable 'value' to ensure uniqueness.
def destroy_digidoc_session(self): """ Closes DigiDocService session and clears request.session[I{DIGIDOC_SESSION_KEY}] """ # cleanup data too self.destroy_digidoc_session_data() try: session = self.request.session[self.DIGIDOC_SESSION_KEY] if session: try: service = self.flat_service() service.session_code = session service.close_session() except DigiDocError: pass del self.request.session[self.DIGIDOC_SESSION_KEY] except KeyError: pass
Closes DigiDocService session and clears request.session[I{DIGIDOC_SESSION_KEY}]
def listdir(*paths, glob=None): ''' List the (optionally glob filtered) full paths from a dir. Args: *paths ([str,...]): A list of path elements glob (str): An optional fnmatch glob str ''' path = genpath(*paths) names = os.listdir(path) if glob is not None: names = fnmatch.filter(names, glob) retn = [os.path.join(path, name) for name in names] return retn
List the (optionally glob filtered) full paths from a dir. Args: *paths ([str,...]): A list of path elements glob (str): An optional fnmatch glob str
def expose_endpoints (module, *args): """ Expose methods to the given module for each API endpoint """ for op in args: # Capture the closure state def create_method (o): return lambda exp: send_request(o, exp) setattr(sys.modules[__name__], op, create_method(op)) setattr(module, op, getattr(sys.modules[__name__], op))
Expose methods to the given module for each API endpoint
def _fetch_stock_data(self, stock_list): """获取股票信息""" pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None]
获取股票信息
def check_stops(pfeed, *, as_df=False, include_warnings=False): """ Analog of :func:`check_frequencies` for ``pfeed.stops`` """ # Use gtfstk's stop validator if pfeed.stops is not None: stop_times = pd.DataFrame(columns=['stop_id']) feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times, dist_units='km') return gt.check_stops(feed, as_df=as_df, include_warnings=False)
Analog of :func:`check_frequencies` for ``pfeed.stops``
def _new_open_bin(self, width=None, height=None, rid=None): """ Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found """ factories_to_delete = set() # new_bin = None for key, binfac in self._empty_bins.items(): # Only return the new bin if the rect fits. # (If width or height is None, caller doesn't know the size.) if not binfac.fits_inside(width, height): continue # Create bin and add to open_bins new_bin = binfac.new_bin() if new_bin is None: continue self._open_bins.append(new_bin) # If the factory was depleted mark for deletion if binfac.is_empty(): factories_to_delete.add(key) break # Delete marked factories for f in factories_to_delete: del self._empty_bins[f] return new_bin
Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found
def replace_namespaced_config_map(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_config_map # noqa: E501 replace the specified ConfigMap # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_config_map(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ConfigMap (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ConfigMap body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ConfigMap If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_config_map_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
replace_namespaced_config_map # noqa: E501 replace the specified ConfigMap # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_config_map(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ConfigMap (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ConfigMap body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ConfigMap If the method is called asynchronously, returns the request thread.
def sampling_query(sql, fields=None, count=5, sampling=None): """Returns a sampling query for the SQL object. Args: sql: the SQL object to sample fields: an optional list of field names to retrieve. count: an optional count of rows to retrieve which is used if a specific sampling is not specified. sampling: an optional sampling strategy to apply to the table. Returns: A SQL query string for sampling the input sql. """ if sampling is None: sampling = Sampling.default(count=count, fields=fields) return sampling(sql)
Returns a sampling query for the SQL object. Args: sql: the SQL object to sample fields: an optional list of field names to retrieve. count: an optional count of rows to retrieve which is used if a specific sampling is not specified. sampling: an optional sampling strategy to apply to the table. Returns: A SQL query string for sampling the input sql.
def read(filename, backed=False, sheet=None, ext=None, delimiter=None, first_column_names=False, backup_url=None, cache=False, **kwargs) -> AnnData: """Read file and return :class:`~anndata.AnnData` object. To speed up reading, consider passing `cache=True`, which creates an hdf5 cache file. Parameters ---------- filename : `str` If the filename has no file extension, it is interpreted as a key for generating a filename via `sc.settings.writedir + filename + sc.settings.file_format_data`. This is the same behavior as in `sc.read(filename, ...)`. backed : {`False`, `True`, 'r', 'r+'}, optional (default: `False`) Load :class:`~anndata.AnnData` in `backed` mode instead of fully loading it into memory (`memory` mode). Only applies to `.h5ad` files. `True` and 'r' are equivalent. If you want to modify backed attributes of the AnnData object, you need to choose 'r+'. sheet : `str`, optional (default: `None`) Name of sheet/table in hdf5 or Excel file. cache : `bool`, optional (default: `False`) If `False`, read from source, if `True`, read from fast 'h5ad' cache. ext : `str`, optional (default: `None`) Extension that indicates the file type. If `None`, uses extension of filename. delimiter : `str`, optional (default: `None`) Delimiter that separates data within text file. If `None`, will split at arbitrary number of white spaces, which is different from enforcing splitting at any single white space ' '. first_column_names : `bool`, optional (default: `False`) Assume the first column stores row names. This is only necessary if these are not strings: strings in the first column are automatically assumed to be row names. backup_url : `str`, optional (default: `None`) Retrieve the file from an URL if not present on disk. Returns ------- An :class:`~anndata.AnnData` object """ filename = str(filename) # allow passing pathlib.Path objects if is_valid_filename(filename): return _read(filename, backed=backed, sheet=sheet, ext=ext, delimiter=delimiter, first_column_names=first_column_names, backup_url=backup_url, cache=cache, **kwargs) # generate filename and read to dict filekey = filename filename = settings.writedir + filekey + '.' + settings.file_format_data if not Path(filename).exists(): raise ValueError('Reading with filekey "{}" failed, the ' 'inferred filename "{}" does not exist. ' 'If you intended to provide a filename, either ' 'use a filename ending on one of the available extensions {} ' 'or pass the parameter `ext`.' .format(filekey, filename, avail_exts)) return read_h5ad(filename, backed=backed)
Read file and return :class:`~anndata.AnnData` object. To speed up reading, consider passing `cache=True`, which creates an hdf5 cache file. Parameters ---------- filename : `str` If the filename has no file extension, it is interpreted as a key for generating a filename via `sc.settings.writedir + filename + sc.settings.file_format_data`. This is the same behavior as in `sc.read(filename, ...)`. backed : {`False`, `True`, 'r', 'r+'}, optional (default: `False`) Load :class:`~anndata.AnnData` in `backed` mode instead of fully loading it into memory (`memory` mode). Only applies to `.h5ad` files. `True` and 'r' are equivalent. If you want to modify backed attributes of the AnnData object, you need to choose 'r+'. sheet : `str`, optional (default: `None`) Name of sheet/table in hdf5 or Excel file. cache : `bool`, optional (default: `False`) If `False`, read from source, if `True`, read from fast 'h5ad' cache. ext : `str`, optional (default: `None`) Extension that indicates the file type. If `None`, uses extension of filename. delimiter : `str`, optional (default: `None`) Delimiter that separates data within text file. If `None`, will split at arbitrary number of white spaces, which is different from enforcing splitting at any single white space ' '. first_column_names : `bool`, optional (default: `False`) Assume the first column stores row names. This is only necessary if these are not strings: strings in the first column are automatically assumed to be row names. backup_url : `str`, optional (default: `None`) Retrieve the file from an URL if not present on disk. Returns ------- An :class:`~anndata.AnnData` object
def calculate_best_chunk_size(self, data_length): """ Calculates the best chunk size for a list of length data_length. The current implemented formula is more or less an empirical result for multiprocessing case on one machine. :param data_length: A length which defines how many calculations there need to be. :type data_length: int :return: the calculated chunk size :rtype: int TODO: Investigate which is the best chunk size for different settings. """ chunk_size, extra = divmod(data_length, self.n_workers * 5) if extra: chunk_size += 1 return chunk_size
Calculates the best chunk size for a list of length data_length. The current implemented formula is more or less an empirical result for multiprocessing case on one machine. :param data_length: A length which defines how many calculations there need to be. :type data_length: int :return: the calculated chunk size :rtype: int TODO: Investigate which is the best chunk size for different settings.
def pop(self, sexp): ''' Notes: Sequence works a bit different than other nodes. This method (like others) expectes a list. However, sequence matches against the list, whereas other nodes try to match against elements of the list. ''' for t in self.terms: sexp = t.pop(sexp) return sexp
Notes: Sequence works a bit different than other nodes. This method (like others) expectes a list. However, sequence matches against the list, whereas other nodes try to match against elements of the list.
def manifests_parse(self): '''parse manifests present on system''' self.manifests = [] for manifest_path in self.find_manifests(): if self.manifest_path_is_old(manifest_path): print("fw: Manifest (%s) is old; consider 'manifest download'" % (manifest_path)) manifest = self.manifest_parse(manifest_path) if self.semver_major(manifest["format-version"]) != 1: print("fw: Manifest (%s) has major version %d; MAVProxy only understands version 1" % (manifest_path,manifest["format-version"])) continue self.manifests.append(manifest)
parse manifests present on system
def plot(self,axis=None,**kargs): """ - plot(axis=None, **kwarg): Finally, sphviewer.Scene class has its own plotting method. It shows the scene as seen by the camera. It is to say, it plots the particles according to their aparent coordinates; axis makes a reference to an existing axis. In case axis is None, the plot is made on the current axis. The kwargs are :class:`~matplotlib.lines.Line2D` properties: agg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] antialiased or aa: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color or c: any matplotlib color contains: a callable function dash_capstyle: ['butt' | 'round' | 'projecting'] dash_joinstyle: ['miter' | 'round' | 'bevel'] dashes: sequence of on/off ink in points data: 2D array (rows are x, y) or two 1D arrays drawstyle: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] figure: a :class:`matplotlib.figure.Figure` instance fillstyle: ['full' | 'left' | 'right' | 'bottom' | 'top'] gid: an id string label: any string linestyle or ls: [ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | ``' '`` | ``''`` ] and any drawstyle in combination with a linestyle, e.g. ``'steps--'``. linewidth or lw: float value in points lod: [True | False] marker: [ ``7`` | ``4`` | ``5`` | ``6`` | ``'o'`` | ``'D'`` | ``'h'`` | ``'H'`` | ``'_'`` | ``''`` | ``'None'`` | ``' '`` | ``None`` | ``'8'`` | ``'p'`` | ``','`` | ``'+'`` | ``'.'`` | ``'s'`` | ``'*'`` | ``'d'`` | ``3`` | ``0`` | ``1`` | ``2`` | ``'1'`` | ``'3'`` | ``'4'`` | ``'2'`` | ``'v'`` | ``'<'`` | ``'>'`` | ``'^'`` | ``'|'`` | ``'x'`` | ``'$...$'`` | *tuple* | *Nx2 array* ] markeredgecolor or mec: any matplotlib color markeredgewidth or mew: float value in points markerfacecolor or mfc: any matplotlib color markerfacecoloralt or mfcalt: any matplotlib color markersize or ms: float markevery: None | integer | (startind, stride) picker: float distance in points or callable pick function ``fn(artist, event)`` pickradius: float distance in points rasterized: [True | False | None] snap: unknown solid_capstyle: ['butt' | 'round' | 'projecting'] solid_joinstyle: ['miter' | 'round' | 'bevel'] transform: a :class:`matplotlib.transforms.Transform` instance url: a url string visible: [True | False] xdata: 1D array ydata: 1D array zorder: any number kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. Additional kwargs: hold = [True|False] overrides default hold state """ if(axis == None): axis = plt.gca() axis.plot(self.__x, self.__y, 'k.', **kargs)
- plot(axis=None, **kwarg): Finally, sphviewer.Scene class has its own plotting method. It shows the scene as seen by the camera. It is to say, it plots the particles according to their aparent coordinates; axis makes a reference to an existing axis. In case axis is None, the plot is made on the current axis. The kwargs are :class:`~matplotlib.lines.Line2D` properties: agg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] antialiased or aa: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color or c: any matplotlib color contains: a callable function dash_capstyle: ['butt' | 'round' | 'projecting'] dash_joinstyle: ['miter' | 'round' | 'bevel'] dashes: sequence of on/off ink in points data: 2D array (rows are x, y) or two 1D arrays drawstyle: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] figure: a :class:`matplotlib.figure.Figure` instance fillstyle: ['full' | 'left' | 'right' | 'bottom' | 'top'] gid: an id string label: any string linestyle or ls: [ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | ``' '`` | ``''`` ] and any drawstyle in combination with a linestyle, e.g. ``'steps--'``. linewidth or lw: float value in points lod: [True | False] marker: [ ``7`` | ``4`` | ``5`` | ``6`` | ``'o'`` | ``'D'`` | ``'h'`` | ``'H'`` | ``'_'`` | ``''`` | ``'None'`` | ``' '`` | ``None`` | ``'8'`` | ``'p'`` | ``','`` | ``'+'`` | ``'.'`` | ``'s'`` | ``'*'`` | ``'d'`` | ``3`` | ``0`` | ``1`` | ``2`` | ``'1'`` | ``'3'`` | ``'4'`` | ``'2'`` | ``'v'`` | ``'<'`` | ``'>'`` | ``'^'`` | ``'|'`` | ``'x'`` | ``'$...$'`` | *tuple* | *Nx2 array* ] markeredgecolor or mec: any matplotlib color markeredgewidth or mew: float value in points markerfacecolor or mfc: any matplotlib color markerfacecoloralt or mfcalt: any matplotlib color markersize or ms: float markevery: None | integer | (startind, stride) picker: float distance in points or callable pick function ``fn(artist, event)`` pickradius: float distance in points rasterized: [True | False | None] snap: unknown solid_capstyle: ['butt' | 'round' | 'projecting'] solid_joinstyle: ['miter' | 'round' | 'bevel'] transform: a :class:`matplotlib.transforms.Transform` instance url: a url string visible: [True | False] xdata: 1D array ydata: 1D array zorder: any number kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. Additional kwargs: hold = [True|False] overrides default hold state
def from_connection_string(s): """ Credential input format: <domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname> """ cred = KerberosCredential() cred.domain, t = s.split('/', 1) cred.username, t = t.split('/', 1) secret_type, t = t.split(':', 1) secret, target = t.rsplit('@', 1) st = KerberosSecretType(secret_type.upper()) if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS: cred.password = secret elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4: cred.nt_hash = secret cred.kerberos_key_rc4 = secret elif st == KerberosSecretType.AES: cred.kerberos_key_aes_256 = secret cred.kerberos_key_aes_128 = secret elif st == KerberosSecretType.DES: cred.kerberos_key_des = secret elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES: cred.kerberos_key_des3 = secret elif st == KerberosSecretType.CCACHE: cred.ccache = CCACHE.from_file(secret) return cred
Credential input format: <domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname>
def _get_keys_defdict(self): '''Get the keys and the default dictionary of the given function's arguments ''' # inspect argspecs argspec = inspect.getargspec(self.func) keys, defvals = argspec.args, argspec.defaults # convert to (list_of_argkeys, dict_of_default_keys) if defvals is None: return keys, None else: defvals = list(defvals) keys.reverse() defvals.reverse() defdict = dict(zip(keys, defvals)) keys.reverse() return keys, defdict
Get the keys and the default dictionary of the given function's arguments
def _delete_upload_id(conn: Connection, table: Table, upload_id: int) -> int: """Remove all table records with the supplied upload_id :param conn: sql connection :param table: table to modify :param upload_id: target upload_id :return: number of records removed """ return conn.execute(delete(table).where(table.c.upload_id == upload_id)).rowcount if upload_id else 0
Remove all table records with the supplied upload_id :param conn: sql connection :param table: table to modify :param upload_id: target upload_id :return: number of records removed
def refresh_queues(self, fatal=False): """ Updates the list of currently known queues and subqueues """ try: queues = [] prefixes = [q for q in self.config["queues"] if q.endswith("/")] known_subqueues = Queue.all_known(prefixes=prefixes) for q in self.config["queues"]: queues.append(Queue(q)) if q.endswith("/"): for subqueue in known_subqueues: if subqueue.startswith(q): queues.append(Queue(subqueue)) self.queues = queues except Exception as e: # pylint: disable=broad-except self.log.error("When refreshing subqueues: %s", e) if fatal: raise
Updates the list of currently known queues and subqueues
def get_thumbnail_paths(self): """ Helper function used to avoid processing thumbnail files during `os.walk`. """ thumbnail_path_tuples = [] # channel thumbnail channel_info = self.get_channel_info() chthumbnail_path = channel_info.get('thumbnail_chan_path', None) if chthumbnail_path: chthumbnail_path_tuple = path_to_tuple(chthumbnail_path, windows=self.winpaths) thumbnail_path_tuples.append(chthumbnail_path_tuple) # content thumbnails for content_file_path_tuple, row in self.contentcache.items(): thumbnail_path = row.get('thumbnail_chan_path', None) if thumbnail_path: thumbnail_path_tuple = path_to_tuple(thumbnail_path, windows=self.winpaths) thumbnail_path_tuples.append(thumbnail_path_tuple) return thumbnail_path_tuples
Helper function used to avoid processing thumbnail files during `os.walk`.
async def set_lock(self, resource, lock_identifier, lock_timeout): """ Lock this instance and set lock expiration time to lock_timeout :param resource: redis key to set :param lock_identifier: uniquie id of lock :param lock_timeout: timeout for lock in seconds :raises: LockError if lock is not acquired """ lock_timeout_ms = int(lock_timeout * 1000) try: with await self.connect() as redis: await redis.eval( self.set_lock_script, keys=[resource], args=[lock_identifier, lock_timeout_ms] ) except aioredis.errors.ReplyError as exc: # script fault self.log.debug('Can not set lock "%s" on %s', resource, repr(self)) raise LockError('Can not set lock') from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error('Can not set lock "%s" on %s: %s', resource, repr(self), repr(exc)) raise LockError('Can not set lock') from exc except asyncio.CancelledError: self.log.debug('Lock "%s" is cancelled on %s', resource, repr(self)) raise except Exception as exc: self.log.exception('Can not set lock "%s" on %s', resource, repr(self)) raise else: self.log.debug('Lock "%s" is set on %s', resource, repr(self))
Lock this instance and set lock expiration time to lock_timeout :param resource: redis key to set :param lock_identifier: uniquie id of lock :param lock_timeout: timeout for lock in seconds :raises: LockError if lock is not acquired
def load_word_file(filename): """Loads a words file as a list of lines""" words_file = resource_filename(__name__, "words/%s" % filename) handle = open(words_file, 'r') words = handle.readlines() handle.close() return words
Loads a words file as a list of lines
def pwm_scan(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. """ c = self.pwm_min_score() + ( self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for _,pos,_ in result: matches[name].append(pos) return matches
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned.
def correlation(a, b): "Returns correlation distance between a and b" if isinstance(a, list): a = np.array(a) if isinstance(b, list): b = np.array(b) a = a.reshape(1, -1) b = b.reshape(1, -1) return cdist(a, b, 'correlation')
Returns correlation distance between a and b
def format_additional_features_server_configurations(result): ''' Formats the AdditionalFeaturesServerConfigurations object removing arguments that are empty ''' from collections import OrderedDict # Only display parameters that have content order_dict = OrderedDict() if result.is_rservices_enabled is not None: order_dict['isRServicesEnabled'] = result.is_rservices_enabled if result.backup_permissions_for_azure_backup_svc is not None: order_dict['backupPermissionsForAzureBackupSvc'] = result.backup_permissions_for_azure_backup_svc return order_dict
Formats the AdditionalFeaturesServerConfigurations object removing arguments that are empty
def parse_md_to_rst(file): """Read Markdown file and convert to ReStructured Text.""" try: from m2r import parse_from_file return parse_from_file(file).replace( "artwork/", "http://198.27.119.65/" ) except ImportError: # m2r may not be installed in user environment return read(file)
Read Markdown file and convert to ReStructured Text.
def relationships_strict(instance): """Ensure that only the relationship types defined in the specification are used. """ # Don't check objects that aren't relationships or that are custom objects if (instance['type'] != 'relationship' or instance['type'] not in enums.TYPES): return if ('relationship_type' not in instance or 'source_ref' not in instance or 'target_ref' not in instance): # Since these fields are required, schemas will already catch the error return r_type = instance['relationship_type'] try: r_source = re.search(r"(.+)\-\-", instance['source_ref']).group(1) r_target = re.search(r"(.+)\-\-", instance['target_ref']).group(1) except (AttributeError, TypeError): # Schemas already catch errors of these properties not being strings or # not containing the string '--'. return if (r_type in enums.COMMON_RELATIONSHIPS or r_source in enums.NON_SDOS or r_target in enums.NON_SDOS): # If all objects can have this relationship type, no more checks needed # Schemas already catch if source/target type cannot have relationship return if r_source not in enums.RELATIONSHIPS: return JSONError("'%s' is not a suggested relationship source object " "for the '%s' relationship." % (r_source, r_type), instance['id'], 'relationship-types') if r_type not in enums.RELATIONSHIPS[r_source]: return JSONError("'%s' is not a suggested relationship type for '%s' " "objects." % (r_type, r_source), instance['id'], 'relationship-types') if r_target not in enums.RELATIONSHIPS[r_source][r_type]: return JSONError("'%s' is not a suggested relationship target object " "for '%s' objects with the '%s' relationship." % (r_target, r_source, r_type), instance['id'], 'relationship-types')
Ensure that only the relationship types defined in the specification are used.
def handleEvent(self, eventObj): """This method should be called every time through the main loop. It handles all of the dragging Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user finishes dragging by lifting up on the mouse. """ if not self.isEnabled: return False if eventObj.type not in (MOUSEMOTION, MOUSEBUTTONUP, MOUSEBUTTONDOWN) : # The dragger only cares about mouse-related events return False clicked = False if eventObj.type == MOUSEBUTTONDOWN: if self.rect.collidepoint(eventObj.pos): self.dragging = True self.deltaX = eventObj.pos[0] - self.rect.left self.deltaY = eventObj.pos[1] - self.rect.top self.startDraggingX = self.rect.left self.startDraggingY = self.rect.top elif eventObj.type == MOUSEBUTTONUP: if self.dragging: self.dragging = False clicked = True self.mouseUpLoc = (eventObj.pos[0], eventObj.pos[1]) self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY self.setLoc((self.rect.left, self.rect.top)) elif eventObj.type == MOUSEMOTION: if self.dragging: self.rect.left = eventObj.pos[0] - self.deltaX self.rect.top = eventObj.pos[1] - self.deltaY else: self.mouseOver = self.rect.collidepoint(eventObj.pos) if clicked: if self.callBack is not None: self.callBack(self.nickname) return clicked
This method should be called every time through the main loop. It handles all of the dragging Parameters: | eventObj - the event object obtained by calling pygame.event.get() Returns: | False most of the time | True when the user finishes dragging by lifting up on the mouse.
def get_time_slide_id(self, offsetdict, create_new = None, superset_ok = False, nonunique_ok = False): """ Return the time_slide_id corresponding to the offset vector described by offsetdict, a dictionary of instrument/offset pairs. If the optional create_new argument is None (the default), then the table must contain a matching offset vector. The return value is the ID of that vector. If the table does not contain a matching offset vector then KeyError is raised. If the optional create_new argument is set to a Process object (or any other object with a process_id attribute), then if the table does not contain a matching offset vector a new one will be added to the table and marked as having been created by the given process. The return value is the ID of the (possibly newly created) matching offset vector. If the optional superset_ok argument is False (the default) then an offset vector in the table is considered to "match" the requested offset vector only if they contain the exact same set of instruments. If the superset_ok argument is True, then an offset vector in the table is considered to match the requested offset vector as long as it provides the same offsets for the same instruments as the requested vector, even if it provides offsets for other instruments as well. More than one offset vector in the table might match the requested vector. If the optional nonunique_ok argument is False (the default), then KeyError will be raised if more than one offset vector in the table is found to match the requested vector. If the optional nonunique_ok is True then the return value is the ID of one of the matching offset vectors selected at random. """ # look for matching offset vectors if superset_ok: ids = [id for id, slide in self.as_dict().items() if offsetdict == dict((instrument, offset) for instrument, offset in slide.items() if instrument in offsetdict)] else: ids = [id for id, slide in self.as_dict().items() if offsetdict == slide] if len(ids) > 1: # found more than one if nonunique_ok: # and that's OK return ids[0] # and that's not OK raise KeyError(offsetdict) if len(ids) == 1: # found one return ids[0] # offset vector not found in table if create_new is None: # and that's not OK raise KeyError(offsetdict) # that's OK, create new vector id = self.get_next_id() for instrument, offset in offsetdict.items(): row = self.RowType() row.process_id = create_new.process_id row.time_slide_id = id row.instrument = instrument row.offset = offset self.append(row) # return new ID return id
Return the time_slide_id corresponding to the offset vector described by offsetdict, a dictionary of instrument/offset pairs. If the optional create_new argument is None (the default), then the table must contain a matching offset vector. The return value is the ID of that vector. If the table does not contain a matching offset vector then KeyError is raised. If the optional create_new argument is set to a Process object (or any other object with a process_id attribute), then if the table does not contain a matching offset vector a new one will be added to the table and marked as having been created by the given process. The return value is the ID of the (possibly newly created) matching offset vector. If the optional superset_ok argument is False (the default) then an offset vector in the table is considered to "match" the requested offset vector only if they contain the exact same set of instruments. If the superset_ok argument is True, then an offset vector in the table is considered to match the requested offset vector as long as it provides the same offsets for the same instruments as the requested vector, even if it provides offsets for other instruments as well. More than one offset vector in the table might match the requested vector. If the optional nonunique_ok argument is False (the default), then KeyError will be raised if more than one offset vector in the table is found to match the requested vector. If the optional nonunique_ok is True then the return value is the ID of one of the matching offset vectors selected at random.
def decrypt(self, esp, key, icv_size=None): """ Decrypt an ESP packet @param esp: an encrypted ESP packet @param key: the secret key used for encryption @param icv_size: the length of the icv used for integrity check @return: a valid ESP packet encrypted with this algorithm @raise IPSecIntegrityError: if the integrity check fails with an AEAD algorithm """ if icv_size is None: icv_size = self.icv_size if self.is_aead else 0 iv = esp.data[:self.iv_size] data = esp.data[self.iv_size:len(esp.data) - icv_size] icv = esp.data[len(esp.data) - icv_size:] if self.cipher: cipher = self.new_cipher(key, iv, icv) decryptor = cipher.decryptor() if self.is_aead: # Tag value check is done during the finalize method decryptor.authenticate_additional_data( struct.pack('!LL', esp.spi, esp.seq) ) try: data = decryptor.update(data) + decryptor.finalize() except InvalidTag as err: raise IPSecIntegrityError(err) # extract padlen and nh padlen = (data[-2]) nh = data[-1] # then use padlen to determine data and padding data = data[:len(data) - padlen - 2] padding = data[len(data) - padlen - 2: len(data) - 2] return _ESPPlain(spi=esp.spi, seq=esp.seq, iv=iv, data=data, padding=padding, padlen=padlen, nh=nh, icv=icv)
Decrypt an ESP packet @param esp: an encrypted ESP packet @param key: the secret key used for encryption @param icv_size: the length of the icv used for integrity check @return: a valid ESP packet encrypted with this algorithm @raise IPSecIntegrityError: if the integrity check fails with an AEAD algorithm
def request(self, action, data={}, headers={}, method='GET'): """ Append the user authentication details to every incoming request """ data = self.merge(data, {'user': self.username, 'password': self.password, 'api_id': self.apiId}) return Transport.request(self, action, data, headers, method)
Append the user authentication details to every incoming request
def _check_position(self, feature, info): """ Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return: """ pos = info.get('position') if pos is not None: feature_pos = self.get_feature_position(feature) if feature_pos is not None: if feature_pos != pos: message = '{feature} has a forced position on ({pos}) but is on position {feature_pos}.'.format( feature=feature, pos=pos, feature_pos=feature_pos ) self.violations.append((feature, message))
Takes the featur and the info dict and checks for the forced position :param feature: :param info: :return:
def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True): """Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None """ if xOffset is None: xOffset = 0 if yOffset is None: yOffset = 0 if type(xOffset) in (tuple, list): xOffset, yOffset = xOffset[0], xOffset[1] if xOffset == 0 and yOffset == 0: return # no-op case _failSafeCheck() mousex, mousey = platformModule._position() if mouseDownUp: mouseDown(button=button, _pause=False) _mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button) if mouseDownUp: mouseUp(button=button, _pause=False) _autoPause(pause, _pause)
Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None
def hook(self, name): """ Return a decorator that attaches a callback to a hook. """ def wrapper(func): self.hooks.add(name, func) return func return wrapper
Return a decorator that attaches a callback to a hook.
def put(self, namespacePrefix): """Update a specific configuration namespace""" self.reqparse.add_argument('name', type=str, required=True) self.reqparse.add_argument('sortOrder', type=int, required=True) args = self.reqparse.parse_args() ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix) if not ns: return self.make_response('No such namespace: {}'.format(namespacePrefix), HTTP.NOT_FOUND) ns.name = args['name'] ns.sort_order = args['sortOrder'] db.session.add(ns) db.session.commit() self.dbconfig.reload_data() auditlog(event='configNamespace.update', actor=session['user'].username, data=args) return self.make_response('Namespace updated')
Update a specific configuration namespace
def normalize(self, timestamp, steps=0): ''' Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away. ''' # So far, the only commonality with RelativeTime return self.from_bucket( self.to_bucket(timestamp, steps) )
Normalize a timestamp according to the interval configuration. Optionally can be used to calculate the timestamp N steps away.
def returns_true_or_raises(f): """A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True. """ @functools.wraps(f) def wrapped(*args, **kwargs): ret = f(*args, **kwargs) if ret is not True: raise RuntimeError("Unexpected return value %r" % ret) return True return wrapped
A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True.
def set_log_level(logger, level): # type: (logging.Logger, int) -> None """Dynamic reconfiguration of the log level""" if level > 2: level = 2 if level < -1: level = -1 levels = { -1: logging.ERROR, 0: logging.WARN, 1: logging.INFO, 2: logging.DEBUG } logger.setLevel(levels[level])
Dynamic reconfiguration of the log level
def _CreateBudget(client): """Creates the budget. Args: client: an AdWordsClient instance. Returns: a suds.sudsobject.Object representation of the created budget. """ budget_service = client.GetService('BudgetService', version='v201809') # Create the campaign budget operation = { 'operand': { 'name': 'Interplanetary Cruise Budget #%d' % uuid.uuid4(), 'deliveryMethod': 'STANDARD', 'amount': { 'microAmount': 500000 } }, 'operator': 'ADD' } budget = budget_service.mutate([operation])['value'][0] print 'Budget with ID "%d" and name "%s" was created.' % ( budget['budgetId'], budget['name']) return budget
Creates the budget. Args: client: an AdWordsClient instance. Returns: a suds.sudsobject.Object representation of the created budget.
def show_current_number(parser, token): """Show the current page number, or insert it in the context. This tag can for example be useful to change the page title according to the current page number. To just show current page number: .. code-block:: html+django {% show_current_number %} If you use multiple paginations in the same page, you can get the page number for a specific pagination using the querystring key, e.g.: .. code-block:: html+django {% show_current_number using mykey %} The default page when no querystring is specified is 1. If you changed it in the `paginate`_ template tag, you have to call ``show_current_number`` according to your choice, e.g.: .. code-block:: html+django {% show_current_number starting from page 3 %} This can be also achieved using a template variable you passed to the context, e.g.: .. code-block:: html+django {% show_current_number starting from page page_number %} You can of course mix it all (the order of arguments is important): .. code-block:: html+django {% show_current_number starting from page 3 using mykey %} If you want to insert the current page number in the context, without actually displaying it in the template, use the *as* argument, i.e.: .. code-block:: html+django {% show_current_number as page_number %} {% show_current_number starting from page 3 using mykey as page_number %} """ # Validate args. try: tag_name, args = token.contents.split(None, 1) except ValueError: key = None number = None tag_name = token.contents[0] var_name = None else: # Use a regexp to catch args. match = SHOW_CURRENT_NUMBER_EXPRESSION.match(args) if match is None: msg = 'Invalid arguments for %r tag' % tag_name raise template.TemplateSyntaxError(msg) # Retrieve objects. groupdict = match.groupdict() key = groupdict['key'] number = groupdict['number'] var_name = groupdict['var_name'] # Call the node. return ShowCurrentNumberNode(number, key, var_name)
Show the current page number, or insert it in the context. This tag can for example be useful to change the page title according to the current page number. To just show current page number: .. code-block:: html+django {% show_current_number %} If you use multiple paginations in the same page, you can get the page number for a specific pagination using the querystring key, e.g.: .. code-block:: html+django {% show_current_number using mykey %} The default page when no querystring is specified is 1. If you changed it in the `paginate`_ template tag, you have to call ``show_current_number`` according to your choice, e.g.: .. code-block:: html+django {% show_current_number starting from page 3 %} This can be also achieved using a template variable you passed to the context, e.g.: .. code-block:: html+django {% show_current_number starting from page page_number %} You can of course mix it all (the order of arguments is important): .. code-block:: html+django {% show_current_number starting from page 3 using mykey %} If you want to insert the current page number in the context, without actually displaying it in the template, use the *as* argument, i.e.: .. code-block:: html+django {% show_current_number as page_number %} {% show_current_number starting from page 3 using mykey as page_number %}
def call_pre_hook(awsclient, cloudformation): """Invoke the pre_hook BEFORE the config is read. :param awsclient: :param cloudformation: """ # TODO: this is deprecated!! move this to glomex_config_reader # no config available if not hasattr(cloudformation, 'pre_hook'): # hook is not present return hook_func = getattr(cloudformation, 'pre_hook') if not hook_func.func_code.co_argcount: hook_func() # for compatibility with existing templates else: log.error('pre_hock can not have any arguments. The pre_hook it is ' + 'executed BEFORE config is read')
Invoke the pre_hook BEFORE the config is read. :param awsclient: :param cloudformation:
def get_by_username(cls, username): """Get profile by username. :param username: A username to query for (case insensitive). """ return cls.query.filter( UserProfile._username == username.lower() ).one()
Get profile by username. :param username: A username to query for (case insensitive).
def read(self, entity=None, attrs=None, ignore=None, params=None): """Ignore the template inputs when initially reading the job template. Look up each TemplateInput entity separately and afterwords add them to the JobTemplate entity.""" if attrs is None: attrs = self.read_json(params=params) if ignore is None: ignore = set() ignore.add('template_inputs') entity = super(JobTemplate, self).read(entity=entity, attrs=attrs, ignore=ignore, params=params) referenced_entities = [ TemplateInput(entity._server_config, id=entity_id, template=JobTemplate(entity._server_config, id=entity.id)) for entity_id in _get_entity_ids('template_inputs', attrs) ] setattr(entity, 'template_inputs', referenced_entities) return entity
Ignore the template inputs when initially reading the job template. Look up each TemplateInput entity separately and afterwords add them to the JobTemplate entity.