code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def insert(self, i, tag1, tag2, cmd="prevtag", x=None, y=None): """ Inserts a new rule that updates words with tag1 to tag2, given constraints x and y, e.g., Context.append("TO < NN", "VB") """ if " < " in tag1 and not x and not y: tag1, x = tag1.split(" < "); cmd="prevtag" if " > " in tag1 and not x and not y: x, tag1 = tag1.split(" > "); cmd="nexttag" lazylist.insert(self, i, [tag1, tag2, cmd, x or "", y or ""])
Inserts a new rule that updates words with tag1 to tag2, given constraints x and y, e.g., Context.append("TO < NN", "VB")
def loadPage(self, number=0): """loadPage(self, number=0) -> Page""" if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") val = _fitz.Document_loadPage(self, number) if val: val.thisown = True val.parent = weakref.proxy(self) pageCount = self.pageCount n = number while n < 0: n += pageCount val.number = n self._page_refs[id(val)] = val val._annot_refs = weakref.WeakValueDictionary() return val
loadPage(self, number=0) -> Page
def start(self, hash, name=None, service='facebook'): """ Start a recording for the provided hash :param hash: The hash to start recording with :type hash: str :param name: The name of the recording :type name: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ params = {'hash': hash} if name: params['name'] = name return self.request.post(service + '/start', params)
Start a recording for the provided hash :param hash: The hash to start recording with :type hash: str :param name: The name of the recording :type name: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def create_double(self, value: float) -> Double: """ Creates a new :class:`ConstantDouble`, adding it to the pool and returning it. :param value: The value of the new Double. """ self.append((6, value)) self.append(None) return self.get(self.raw_count - 2)
Creates a new :class:`ConstantDouble`, adding it to the pool and returning it. :param value: The value of the new Double.
def loads(self, param): ''' Checks the return parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host. ''' if isinstance(param, ProxyRef): try: return self.lookup_url(param.url, param.klass, param.module) except HostError: print "Can't lookup for the actor received with the call. \ It does not exist or the url is unreachable.", param raise HostError(param) elif isinstance(param, list): return [self.loads(elem) for elem in param] elif isinstance(param, tuple): return tuple([self.loads(elem) for elem in param]) elif isinstance(param, dict): new_dict = param for key in new_dict.keys(): new_dict[key] = self.loads(new_dict[key]) return new_dict else: return param
Checks the return parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host.
def concat_variant_files(orig_files, out_file, regions, ref_file, config): """Concatenate multiple variant files from regions into a single output file. Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails. These both only combine samples and avoid parsing, allowing scaling to large file sizes. """ if not utils.file_exists(out_file): input_file_list = _get_file_list(orig_files, out_file, regions, ref_file, config) try: out_file = _run_concat_variant_files_gatk4(input_file_list, out_file, config) except subprocess.CalledProcessError as msg: if ("We require all VCFs to have complete VCF headers" in str(msg) or "Features added out of order" in str(msg) or "The reference allele cannot be missing" in str(msg)): out_file = _run_concat_variant_files_bcftools(input_file_list, out_file, config, naive=True) else: raise if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
Concatenate multiple variant files from regions into a single output file. Uses GATK4's GatherVcfs, falling back to bcftools concat --naive if it fails. These both only combine samples and avoid parsing, allowing scaling to large file sizes.
def math_func(f): """ Statics the methods. wut. """ @wraps(f) def wrapper(*args, **kwargs): if len(args) > 0: return_type = type(args[0]) if kwargs.has_key('return_type'): return_type = kwargs['return_type'] kwargs.pop('return_type') return return_type(f(*args, **kwargs)) args = list((setify(x) for x in args)) return return_type(f(*args, **kwargs)) return wrapper
Statics the methods. wut.
def setText(self, label, default='', description='Set Text', format='text'): """ Set text in a notebook pipeline (via interaction or with nbconvert) """ obj = self.load(label) if obj == None: obj=default self.save(obj, label) # initialize with default textw = Text(value=obj, description=description) hndl = interact(self.save, obj=textw, label=fixed(label), format=fixed(format))
Set text in a notebook pipeline (via interaction or with nbconvert)
def dump(self, fields=None, exclude=None): """ Dump current object to dict, but the value is string for manytomany fields will not automatically be dumpped, only when they are given in fields parameter """ exclude = exclude or [] d = {} if fields and self._primary_field not in fields: fields = list(fields) fields.append(self._primary_field) for k, v in self.properties.items(): if ((not fields) or (k in fields)) and (not exclude or (k not in exclude)): if not isinstance(v, ManyToMany): t = v.get_value_for_datastore(self) if t is Lazy: self.refresh() t = v.get_value_for_datastore(self) if isinstance(t, Model): t = t._key d[k] = v.to_str(t) else: if fields: d[k] = ','.join([str(x) for x in getattr(self, v._lazy_value(), [])]) if self._primary_field and d and self._primary_field not in d: d[self._primary_field] = str(self._key) return d
Dump current object to dict, but the value is string for manytomany fields will not automatically be dumpped, only when they are given in fields parameter
def NameImport(package, as_name=None, prefix=None): """ Accepts a package (Name node), name to import it as (string), and optional prefix and returns a node: import <package> [as <as_name>] """ if prefix is None: prefix = u"" children = [Name(u"import", prefix=prefix), package] if as_name is not None: children.extend([Name(u"as", prefix=u" "), Name(as_name, prefix=u" ")]) return Node(syms.import_name, children)
Accepts a package (Name node), name to import it as (string), and optional prefix and returns a node: import <package> [as <as_name>]
def convert2hdf5(ClassIn, platform_name, bandnames, scale=1e-06): """Retrieve original RSR data and convert to internal hdf5 format. *scale* is the number which has to be multiplied to the wavelength data in order to get it in the SI unit meter """ import h5py instr = ClassIn(bandnames[0], platform_name) instr_name = instr.instrument.replace('/', '') filename = os.path.join(instr.output_dir, "rsr_{0}_{1}.h5".format(instr_name, platform_name)) with h5py.File(filename, "w") as h5f: h5f.attrs['description'] = ('Relative Spectral Responses for ' + instr.instrument.upper()) h5f.attrs['platform_name'] = platform_name h5f.attrs['band_names'] = bandnames for chname in bandnames: sensor = ClassIn(chname, platform_name) grp = h5f.create_group(chname) wvl = sensor.rsr['wavelength'][~np.isnan(sensor.rsr['wavelength'])] rsp = sensor.rsr['response'][~np.isnan(sensor.rsr['wavelength'])] grp.attrs['central_wavelength'] = get_central_wave(wvl, rsp) arr = sensor.rsr['wavelength'] dset = grp.create_dataset('wavelength', arr.shape, dtype='f') dset.attrs['unit'] = 'm' dset.attrs['scale'] = scale dset[...] = arr arr = sensor.rsr['response'] dset = grp.create_dataset('response', arr.shape, dtype='f') dset[...] = arr
Retrieve original RSR data and convert to internal hdf5 format. *scale* is the number which has to be multiplied to the wavelength data in order to get it in the SI unit meter
def eth_getCode(self, address, block=BLOCK_TAG_LATEST): """https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode :param address: Address of contract :type address: str :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :return: code :rtype: str """ block = validate_block(block) return (yield from self.rpc_call('eth_getCode', [address, block]))
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode :param address: Address of contract :type address: str :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :return: code :rtype: str
def save_fig_with_metadata(fig, filename, fig_kwds=None, **kwds): """ Save plot to file with metadata included. Kewords translate to metadata that is stored directly in the plot file. Limited format types available. Parameters ---------- fig: matplotlib figure The matplotlib figure to save to the file filename: str Name of file to store the plot. """ if fig_kwds is None: fig_kwds = {} try: extension = os.path.splitext(filename)[1] kwds['version'] = pycbc.version.git_verbose_msg _metadata_saver[extension](fig, filename, fig_kwds, kwds) except KeyError: raise TypeError('Cannot save file %s with metadata, extension %s not ' 'supported at this time' % (filename, extension))
Save plot to file with metadata included. Kewords translate to metadata that is stored directly in the plot file. Limited format types available. Parameters ---------- fig: matplotlib figure The matplotlib figure to save to the file filename: str Name of file to store the plot.
def roc(self): """ROC plot """ return plot.roc(self.y_true, self.y_score, ax=_gen_ax())
ROC plot
def parse_package_for_version(name): """ Searches for a variable named __version__ in name's __init__.py file and returns the value. This function parses the source text. It does not load the module. """ from utool import util_regex init_fpath = join(name, '__init__.py') version_errmsg = textwrap.dedent( ''' You must include a __version__ variable in %s\'s __init__.py file. Try something like: __version__ = '1.0.0.dev1' ''' % (name,)) if not exists(init_fpath): raise AssertionError(version_errmsg) val_regex = util_regex.named_field('version', '[0-9a-zA-Z.]+') regexstr = '__version__ *= *[\'"]' + val_regex def parse_version(line): # Helper line = line.replace(' ', '').replace('\t', '') match_dict = util_regex.regex_parse(regexstr, line) if match_dict is not None: return match_dict['version'] # Find the version in the text of the source #version = 'UNKNOWN_VERSION' with open(init_fpath, 'r') as file_: for line in file_.readlines(): if line.startswith('__version__'): version = parse_version(line) if version is not None: return version raise AssertionError(version_errmsg)
Searches for a variable named __version__ in name's __init__.py file and returns the value. This function parses the source text. It does not load the module.
def dump_json(token_dict, dump_path): """write json data to file """ if sys.version > '3': with open(dump_path, 'w', encoding='utf-8') as output_file: json.dump(token_dict, output_file, indent=4) else: with open(dump_path, 'w') as output_file: json.dump(token_dict, output_file, indent=4)
write json data to file
def execute_download_request(request): """ Executes download request. :param request: DownloadRequest to be executed :type request: DownloadRequest :return: downloaded data or None :rtype: numpy array, other possible data type or None :raises: DownloadFailedException """ if request.save_response and request.data_folder is None: raise ValueError('Data folder is not specified. ' 'Please give a data folder name in the initialization of your request.') if not request.will_download: return None try_num = SHConfig().max_download_attempts response = None while try_num > 0: try: if request.is_aws_s3(): response = _do_aws_request(request) response_content = response['Body'].read() else: response = _do_request(request) response.raise_for_status() response_content = response.content LOGGER.debug('Successful download from %s', request.url) break except requests.RequestException as exception: try_num -= 1 if try_num > 0 and (_is_temporal_problem(exception) or (isinstance(exception, requests.HTTPError) and exception.response.status_code >= requests.status_codes.codes.INTERNAL_SERVER_ERROR) or _request_limit_reached(exception)): LOGGER.debug('Download attempt failed: %s\n%d attempts left, will retry in %ds', exception, try_num, SHConfig().download_sleep_time) sleep_time = SHConfig().download_sleep_time if _request_limit_reached(exception): sleep_time = max(sleep_time, 60) time.sleep(sleep_time) else: if request.url.startswith(SHConfig().aws_metadata_url) and \ isinstance(exception, requests.HTTPError) and \ exception.response.status_code == requests.status_codes.codes.NOT_FOUND: raise AwsDownloadFailedException('File in location %s is missing' % request.url) raise DownloadFailedException(_create_download_failed_message(exception, request.url)) _save_if_needed(request, response_content) if request.return_data: return decode_data(response_content, request.data_type, entire_response=response) return None
Executes download request. :param request: DownloadRequest to be executed :type request: DownloadRequest :return: downloaded data or None :rtype: numpy array, other possible data type or None :raises: DownloadFailedException
def submit_job(self, bundle, job_config=None): """Submit a Streams Application Bundle (sab file) to this Streaming Analytics service. Args: bundle(str): path to a Streams application bundle (sab file) containing the application to be submitted job_config(JobConfig): a job configuration overlay Returns: dict: JSON response from service containing 'name' field with unique job name assigned to submitted job, or, 'error_status' and 'description' fields if submission was unsuccessful. """ return self._delegator._submit_job(bundle=bundle, job_config=job_config)
Submit a Streams Application Bundle (sab file) to this Streaming Analytics service. Args: bundle(str): path to a Streams application bundle (sab file) containing the application to be submitted job_config(JobConfig): a job configuration overlay Returns: dict: JSON response from service containing 'name' field with unique job name assigned to submitted job, or, 'error_status' and 'description' fields if submission was unsuccessful.
def raw_clean(self, datas): """ Apply a cleaning on raw datas. """ datas = strip_tags(datas) # Remove HTML datas = STOP_WORDS.rebase(datas, '') # Remove STOP WORDS datas = PUNCTUATION.sub('', datas) # Remove punctuation datas = datas.lower() return [d for d in datas.split() if len(d) > 1]
Apply a cleaning on raw datas.
def get_view(self): """ Get the page to display. If a view has already been created and is cached, use that otherwise initialize the view and proxy. If defer loading is used, wrap the view in a FrameLayout and defer add view until later. """ d = self.declaration if d.cached and self.widget: return self.widget if d.defer_loading: self.widget = FrameLayout(self.get_context()) app = self.get_context() app.deferred_call( lambda: self.widget.addView(self.load_view(), 0)) else: self.widget = self.load_view() return self.widget
Get the page to display. If a view has already been created and is cached, use that otherwise initialize the view and proxy. If defer loading is used, wrap the view in a FrameLayout and defer add view until later.
def purge_archives(self): """ Delete older archived items. Use the class attribute NUM_KEEP_ARCHIVED to control how many items are kept. """ klass = self.get_version_class() qs = klass.normal.filter(object_id=self.object_id, state=self.ARCHIVED).order_by('-last_save')[self.NUM_KEEP_ARCHIVED:] for obj in qs: obj._delete_reverses() klass.normal.filter(vid=obj.vid).delete()
Delete older archived items. Use the class attribute NUM_KEEP_ARCHIVED to control how many items are kept.
def migratable_vc_domains(self): """ Gets the VC Migration Manager API client. Returns: MigratableVcDomains: """ if not self.__migratable_vc_domains: self.__migratable_vc_domains = MigratableVcDomains(self.__connection) return self.__migratable_vc_domains
Gets the VC Migration Manager API client. Returns: MigratableVcDomains:
def deserialize(self, obj): """Deserialize an object from the front-end.""" if obj['immutable']: return obj['value'] else: guid = obj['value'] if not guid in object_registry: instance = JSObject(self, guid) object_registry[guid] = instance return object_registry[guid]
Deserialize an object from the front-end.
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ NWSRFS Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and parse with open(path, 'r') as nwsrfsFile: for line in nwsrfsFile: sline = line.strip().split() # Cases if sline[0].lower() == 'number_bands:': self.numBands = sline[1] elif sline[0].lower() == 'lower_elevation': """DO NOTHING""" else: # Create GSSHAPY NwsrfsRecord object record = NwsrfsRecord(lowerElev=sline[0], upperElev=sline[1], mfMin=sline[2], mfMax=sline[3], scf=sline[4], frUse=sline[5], tipm=sline[6], nmf=sline[7], fua=sline[8], plwhc=sline[9]) # Associate NwsrfsRecord with NwsrfsFile record.nwsrfsFile = self
NWSRFS Read from File Method
def changeLogType(self): '''Populate log program list to correspond with log type selection.''' logType = self.selectedType() programs = self.logList.get(logType)[0] default = self.logList.get(logType)[1] if logType in self.logList: self.programName.clear() self.programName.addItems(programs) self.programName.setCurrentIndex(programs.index(default))
Populate log program list to correspond with log type selection.
def get_monolayer(self): """ Returns the primitive unit surface area density of the adsorbate. """ unit_a = self.get_unit_primitive_area Nsurfs = self.Nsurfs_ads_in_slab Nads = self.Nads_in_slab return Nads / (unit_a * Nsurfs)
Returns the primitive unit surface area density of the adsorbate.
def use(self, *middleware: MiddlewareType) -> None: """ Register Middleware :param middleware: The Middleware Function """ for m in middleware: if is_middleware(m): self.middleware.append(m)
Register Middleware :param middleware: The Middleware Function
def to_simplex(y): r""" Interprets the last index of ``y`` as stick breaking fractions in logit space and returns a non-negative array of the same shape where the last dimension always sums to unity. A unit simplex is a list of non-negative numbers :math:`(x_1,...,x_K)` that sum to one, :math:`\sum_{k=1}^K x_k=1`, for example, the probabilities of an K-sided die. It is sometimes desireable to parameterize this object with variables that are unconstrained and "decorrelated". To this end, we imagine :math:`\vec{x}` as a partition of the unit stick :math:`[0,1]` with :math:`K-1` break points between :math:`K` successive intervals of respective lengths :math:`(x_1,...,x_K)`. Instead of storing the interval lengths, we start from the left-most break point and iteratively store the breaking fractions, :math:`z_k`, of the remaining stick. This gives the formula :math:`z_k=x_k / (1-\sum_{k'=1}^{k-1}x_k)` with the convention :math:`x_0:=0`, which has an inverse formula :math:`x_k = z_k(1-z_{k-1})\cdots(1-z_1)`. Note that :math:`z_K=1` since the last stick is not broken; this is the result of the redundant information imposed by :math:`\sum_{k=1}^K x_k=1`. To unbound the parameters :math:`z_k` into the real line, we pass through the logit function, :math:`\operatorname{logit}(p)=\log\frac{p}{1-p}`, to end up with the parameterization :math:`y_k=\operatorname{logit}(z_k)+\log(K-k)`, with the convention :math:`y_K=0`. The shift by :math:`\log(K-k)` is largely asthetic and causes the uniform simplex :math:`\vec{x}=(1/K,1/K,...,1/K)` to be mapped to :math:`\vec{x}=(0,0,...,0)`. Inverse to :func:`from_simplex`. :param np.ndarray: Array of logit space stick breaking fractions along the last index. :rtype: ``np.ndarray`` """ n = y.shape[-1] # z are the stick breaking fractions in [0,1] z = expit(y - np.log(n - np.arange(1, n+1))) x = np.empty(y.shape) x[..., 0] = z[..., 0] x[..., 1:] = z[..., 1:] * (1 - z[..., :-1]).cumprod(axis=-1) return x
r""" Interprets the last index of ``y`` as stick breaking fractions in logit space and returns a non-negative array of the same shape where the last dimension always sums to unity. A unit simplex is a list of non-negative numbers :math:`(x_1,...,x_K)` that sum to one, :math:`\sum_{k=1}^K x_k=1`, for example, the probabilities of an K-sided die. It is sometimes desireable to parameterize this object with variables that are unconstrained and "decorrelated". To this end, we imagine :math:`\vec{x}` as a partition of the unit stick :math:`[0,1]` with :math:`K-1` break points between :math:`K` successive intervals of respective lengths :math:`(x_1,...,x_K)`. Instead of storing the interval lengths, we start from the left-most break point and iteratively store the breaking fractions, :math:`z_k`, of the remaining stick. This gives the formula :math:`z_k=x_k / (1-\sum_{k'=1}^{k-1}x_k)` with the convention :math:`x_0:=0`, which has an inverse formula :math:`x_k = z_k(1-z_{k-1})\cdots(1-z_1)`. Note that :math:`z_K=1` since the last stick is not broken; this is the result of the redundant information imposed by :math:`\sum_{k=1}^K x_k=1`. To unbound the parameters :math:`z_k` into the real line, we pass through the logit function, :math:`\operatorname{logit}(p)=\log\frac{p}{1-p}`, to end up with the parameterization :math:`y_k=\operatorname{logit}(z_k)+\log(K-k)`, with the convention :math:`y_K=0`. The shift by :math:`\log(K-k)` is largely asthetic and causes the uniform simplex :math:`\vec{x}=(1/K,1/K,...,1/K)` to be mapped to :math:`\vec{x}=(0,0,...,0)`. Inverse to :func:`from_simplex`. :param np.ndarray: Array of logit space stick breaking fractions along the last index. :rtype: ``np.ndarray``
def survival_rate(work_db): """Calcuate the survival rate for the results in a WorkDB. """ kills = sum(r.is_killed for _, r in work_db.results) num_results = work_db.num_results if not num_results: return 0 return (1 - kills / num_results) * 100
Calcuate the survival rate for the results in a WorkDB.
def _temporal_distance_cdf(self): """ Temporal distance cumulative density function. Returns ------- x_values: numpy.array values for the x-axis cdf: numpy.array cdf values """ distance_split_points = set() for block in self._profile_blocks: if block.distance_start != float('inf'): distance_split_points.add(block.distance_end) distance_split_points.add(block.distance_start) distance_split_points_ordered = numpy.array(sorted(list(distance_split_points))) temporal_distance_split_widths = distance_split_points_ordered[1:] - distance_split_points_ordered[:-1] trip_counts = numpy.zeros(len(temporal_distance_split_widths)) delta_peaks = defaultdict(lambda: 0) for block in self._profile_blocks: if block.distance_start == block.distance_end: delta_peaks[block.distance_end] += block.width() else: start_index = numpy.searchsorted(distance_split_points_ordered, block.distance_end) end_index = numpy.searchsorted(distance_split_points_ordered, block.distance_start) trip_counts[start_index:end_index] += 1 unnormalized_cdf = numpy.array([0] + list(numpy.cumsum(temporal_distance_split_widths * trip_counts))) if not (numpy.isclose( [unnormalized_cdf[-1]], [self._end_time - self._start_time - sum(delta_peaks.values())], atol=1E-4 ).all()): print(unnormalized_cdf[-1], self._end_time - self._start_time - sum(delta_peaks.values())) raise RuntimeError("Something went wrong with cdf computation!") if len(delta_peaks) > 0: for peak in delta_peaks.keys(): if peak == float('inf'): continue index = numpy.nonzero(distance_split_points_ordered == peak)[0][0] unnormalized_cdf = numpy.insert(unnormalized_cdf, index, unnormalized_cdf[index]) distance_split_points_ordered = numpy.insert(distance_split_points_ordered, index, distance_split_points_ordered[index]) # walk_waiting_time_fraction = walk_total_time / (self.end_time_dep - self.start_time_dep) unnormalized_cdf[(index + 1):] = unnormalized_cdf[(index + 1):] + delta_peaks[peak] norm_cdf = unnormalized_cdf / (unnormalized_cdf[-1] + delta_peaks[float('inf')]) return distance_split_points_ordered, norm_cdf
Temporal distance cumulative density function. Returns ------- x_values: numpy.array values for the x-axis cdf: numpy.array cdf values
def d3logpdf_dlink3(self, link_f, y, Y_metadata=None): """ Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = -\\beta^{3}\\frac{d^{2}\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\ \\alpha_{i} = \\beta y_{i} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in gamma distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array """ # d3lik_dlink3 = (1. - self.beta) / (y - link_f)**3 c = np.zeros_like(y) if Y_metadata is not None and 'censored' in Y_metadata.keys(): c = Y_metadata['censored'] # uncensored = (1-c)* ((y ** self.r) * np.exp(-link_f)) # censored = c*np.exp(-link_f)*y**self.r uncensored = (1-c)*(-2/link_f**3+ 6*y**self.r/link_f**4) censored = c*6*y**self.r/link_f**4 d3lik_dlink3 = uncensored + censored # d3lik_dlink3 = (y ** self.r) * np.exp(-link_f) return d3lik_dlink3
Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = -\\beta^{3}\\frac{d^{2}\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\ \\alpha_{i} = \\beta y_{i} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in gamma distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array
async def start(self): """Start the supervisor server.""" await self.server.start() self.port = self.server.port
Start the supervisor server.
def reject_source(ident, comment): '''Reject a source for automatic harvesting''' source = get_source(ident) source.validation.on = datetime.now() source.validation.comment = comment source.validation.state = VALIDATION_REFUSED if current_user.is_authenticated: source.validation.by = current_user._get_current_object() source.save() return source
Reject a source for automatic harvesting
def Histograms(self, run, tag): """Retrieve the histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.HistogramEvents`. """ accumulator = self.GetAccumulator(run) return accumulator.Histograms(tag)
Retrieve the histogram events associated with a run and tag. Args: run: A string name of the run for which values are retrieved. tag: A string name of the tag for which values are retrieved. Raises: KeyError: If the run is not found, or the tag is not available for the given run. Returns: An array of `event_accumulator.HistogramEvents`.
def resize_image_with_crop_or_pad(image, target_height, target_width, dynamic_shape=False): """Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. If `width` or `height` is greater than the specified `target_width` or `target_height` respectively, this op centrally crops along that dimension. If `width` or `height` is smaller than the specified `target_width` or `target_height` respectively, this op centrally pads with 0 along that dimension. Args: image: 3-D tensor of shape [height, width, channels] target_height: Target height. target_width: Target width. dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Cropped and/or padded image of shape `[target_height, target_width, channels]` """ image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=(not dynamic_shape)) original_height, original_width, _ = _ImageDimensions(image, dynamic_shape=dynamic_shape) if target_width <= 0: raise ValueError('target_width must be > 0.') if target_height <= 0: raise ValueError('target_height must be > 0.') if dynamic_shape: max_ = math_ops.maximum min_ = math_ops.minimum else: max_ = max min_ = min width_diff = target_width - original_width offset_crop_width = max_(-width_diff // 2, 0) offset_pad_width = max_(width_diff // 2, 0) height_diff = target_height - original_height offset_crop_height = max_(-height_diff // 2, 0) offset_pad_height = max_(height_diff // 2, 0) # Maybe crop if needed. cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width, min_(target_height, original_height), min_(target_width, original_width), dynamic_shape=dynamic_shape) # Maybe pad if needed. resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width, target_height, target_width, dynamic_shape=dynamic_shape) if resized.get_shape().ndims is None: raise ValueError('resized contains no shape.') if not resized.get_shape()[0].is_compatible_with(target_height): raise ValueError('resized height is not correct.') if not resized.get_shape()[1].is_compatible_with(target_width): raise ValueError('resized width is not correct.') return resized
Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. If `width` or `height` is greater than the specified `target_width` or `target_height` respectively, this op centrally crops along that dimension. If `width` or `height` is smaller than the specified `target_width` or `target_height` respectively, this op centrally pads with 0 along that dimension. Args: image: 3-D tensor of shape [height, width, channels] target_height: Target height. target_width: Target width. dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Cropped and/or padded image of shape `[target_height, target_width, channels]`
def _submit(primitive, port_index, tuple_): """Internal method to submit a tuple""" args = (_get_opc(primitive), port_index, tuple_) _ec._submit(args)
Internal method to submit a tuple
def notify_about_new_variables(callback): """Calls `callback(var)` for all newly created variables. Callback should not modify the variable passed in. Use cases that require variables to be modified should use `variable_creator_scope` directly and sit within the variable creator stack. >>> variables = [] >>> with notify_about_variables(variables.append): ... v = tf.Variable(1.0, name='v') ... w = tf.get_variable('w', []) >>> assert variables == [v, w] Args: callback: a callable taking a single argument which is a tf.Variable. Yields: `None` - used for contextmanager API. """ def _tracking_creator(getter, **kwargs): v = getter(**kwargs) callback(v) return v with tf.variable_creator_scope(_tracking_creator): yield
Calls `callback(var)` for all newly created variables. Callback should not modify the variable passed in. Use cases that require variables to be modified should use `variable_creator_scope` directly and sit within the variable creator stack. >>> variables = [] >>> with notify_about_variables(variables.append): ... v = tf.Variable(1.0, name='v') ... w = tf.get_variable('w', []) >>> assert variables == [v, w] Args: callback: a callable taking a single argument which is a tf.Variable. Yields: `None` - used for contextmanager API.
def scalars_impl(self, run, tag_regex_string): """Given a tag regex and single run, return ScalarEvents. Args: run: A run string. tag_regex_string: A regular expression that captures portions of tags. Raises: ValueError: if the scalars plugin is not registered. Returns: A dictionary that is the JSON-able response. """ if not tag_regex_string: # The user provided no regex. return { _REGEX_VALID_PROPERTY: False, _TAG_TO_EVENTS_PROPERTY: {}, } # Construct the regex. try: regex = re.compile(tag_regex_string) except re.error: return { _REGEX_VALID_PROPERTY: False, _TAG_TO_EVENTS_PROPERTY: {}, } # Fetch the tags for the run. Filter for tags that match the regex. run_to_data = self._multiplexer.PluginRunToTagToContent( scalars_metadata.PLUGIN_NAME) tag_to_data = None try: tag_to_data = run_to_data[run] except KeyError: # The run could not be found. Perhaps a configuration specified a run that # TensorBoard has not read from disk yet. payload = {} if tag_to_data: scalars_plugin_instance = self._get_scalars_plugin() if not scalars_plugin_instance: raise ValueError(('Failed to respond to request for /scalars. ' 'The scalars plugin is oddly not registered.')) form = scalars_plugin.OutputFormat.JSON payload = { tag: scalars_plugin_instance.scalars_impl(tag, run, None, form)[0] for tag in tag_to_data.keys() if regex.match(tag) } return { _REGEX_VALID_PROPERTY: True, _TAG_TO_EVENTS_PROPERTY: payload, }
Given a tag regex and single run, return ScalarEvents. Args: run: A run string. tag_regex_string: A regular expression that captures portions of tags. Raises: ValueError: if the scalars plugin is not registered. Returns: A dictionary that is the JSON-able response.
def _build_full_partition( optional_parts, sequence_var_partition: Sequence[int], subjects: Sequence[Expression], operation: Operation ) -> List[Sequence[Expression]]: """Distribute subject operands among pattern operands. Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence variable gets assigned). """ i = 0 var_index = 0 opt_index = 0 result = [] for operand in op_iter(operation): wrap_associative = False if isinstance(operand, Wildcard): count = operand.min_count if operand.optional is None else 0 if not operand.fixed_size or isinstance(operation, AssociativeOperation): count += sequence_var_partition[var_index] var_index += 1 wrap_associative = operand.fixed_size and operand.min_count elif operand.optional is not None: count = optional_parts[opt_index] opt_index += 1 else: count = 1 operand_expressions = list(op_iter(subjects))[i:i + count] i += count if wrap_associative and len(operand_expressions) > wrap_associative: fixed = wrap_associative - 1 operand_expressions = tuple(operand_expressions[:fixed]) + ( create_operation_expression(operation, operand_expressions[fixed:]), ) result.append(operand_expressions) return result
Distribute subject operands among pattern operands. Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence variable gets assigned).
def sig_cmp(sig1, sig2): """ Compares two normalized type signatures for validation purposes. """ types1 = sig1.required types2 = sig2.required if len(types1) != len(types2): return False dup_pos = [] dup_kw = {} for t1, t2 in zip(types1, types2): match = type_cmp(t1, t2) if match: dup_pos.append(match) else: break else: return tuple(dup_pos) kw_range = slice(len(dup_pos), len(types1)) kwds1 = sig1.parameters[kw_range] kwds2 = sig2.parameters[kw_range] if set(kwds1) != set(kwds2): return False kwtypes1 = dict(zip(sig1.parameters, types1)) kwtypes2 = dict(zip(sig2.parameters, types2)) for kw in kwds1: match = type_cmp(kwtypes1[kw], kwtypes2[kw]) if match: dup_kw[kw] = match else: break else: return tuple(dup_pos), dup_kw return False
Compares two normalized type signatures for validation purposes.
def get_node_by_id(self, node_id): """ Gets a node with requested ID. Returns a tuple, where first value is node ID, second - a dictionary of all node attributes. :param node_id: string with ID of node. """ tmp_nodes = self.diagram_graph.nodes(data=True) for node in tmp_nodes: if node[0] == node_id: return node
Gets a node with requested ID. Returns a tuple, where first value is node ID, second - a dictionary of all node attributes. :param node_id: string with ID of node.
def _process_if(self, node): """Process an if node.""" creg_name = node.children[0].name creg = self.dag.cregs[creg_name] cval = node.children[1].value self.condition = (creg, cval) self._process_node(node.children[2]) self.condition = None
Process an if node.
def postSolve(self): ''' This method adds consumption at m=0 to the list of stable arm points, then constructs the consumption function as a cubic interpolation over those points. Should be run after the backshooting routine is complete. Parameters ---------- none Returns ------- none ''' # Add bottom point to the stable arm points self.solution[0].mNrm_list.insert(0,0.0) self.solution[0].cNrm_list.insert(0,0.0) self.solution[0].MPC_list.insert(0,self.MPCmax) # Construct an interpolation of the consumption function from the stable arm points self.solution[0].cFunc = CubicInterp(self.solution[0].mNrm_list,self.solution[0].cNrm_list,self.solution[0].MPC_list,self.PFMPC*(self.h-1.0),self.PFMPC) self.solution[0].cFunc_U = lambda m : self.PFMPC*m
This method adds consumption at m=0 to the list of stable arm points, then constructs the consumption function as a cubic interpolation over those points. Should be run after the backshooting routine is complete. Parameters ---------- none Returns ------- none
def fit(self, X): """Fit the PyNNDescent transformer to build KNN graphs with neighbors given by the dataset X. Parameters ---------- X : array-like, shape (n_samples, n_features) Sample data Returns ------- transformer : PyNNDescentTransformer The trained transformer """ self.n_samples_fit = X.shape[0] if self.metric_kwds is None: metric_kwds = {} else: metric_kwds = self.metric_kwds self.pynndescent_ = NNDescent( X, self.metric, metric_kwds, self.n_neighbors, self.n_trees, self.leaf_size, self.pruning_level, self.tree_init, self.random_state, self.algorithm, self.max_candidates, self.n_iters, self.early_termination_value, self.sampling_rate, ) return self
Fit the PyNNDescent transformer to build KNN graphs with neighbors given by the dataset X. Parameters ---------- X : array-like, shape (n_samples, n_features) Sample data Returns ------- transformer : PyNNDescentTransformer The trained transformer
def prune(self, cutoff: int = 2): """ Prunes the CAG by removing redundant paths. If there are multiple (directed) paths between two nodes, this function removes all but the longest paths. Subsequently, it restricts the graph to the largest connected component. Args: cutoff: The maximum path length to consider for finding redundant paths. Higher values of this parameter correspond to more aggressive pruning. """ # Remove redundant paths. for node_pair in tqdm(list(permutations(self.nodes(), 2))): paths = [ list(pairwise(path)) for path in nx.all_simple_paths(self, *node_pair, cutoff) ] if len(paths) > 1: for path in paths: if len(path) == 1: self.delete_edge(*path[0]) if any(self.degree(n) == 0 for n in path[0]): self.add_edge(*path[0]) break
Prunes the CAG by removing redundant paths. If there are multiple (directed) paths between two nodes, this function removes all but the longest paths. Subsequently, it restricts the graph to the largest connected component. Args: cutoff: The maximum path length to consider for finding redundant paths. Higher values of this parameter correspond to more aggressive pruning.
def convert_notebook(self, name): """Converts a notebook into a python file.""" #subprocess.call(["jupyter","nbconvert","--to","python", # self.get_path("%s.ipynb"%name)]) exporter = nbconvert.exporters.python.PythonExporter() relative_path = self.convert_path(name) file_path = self.get_path("%s.ipynb"%relative_path) code = exporter.from_filename(file_path)[0] self.write_code(name, code) self.clean_code(name, [])
Converts a notebook into a python file.
def is_repository(self, path): """ Check if there is a Repository in path. :Parameters: #. path (string): The real path of the directory where to check if there is a repository. :Returns: #. result (boolean): Whether it's a repository or not. """ if path.strip() in ('','.'): path = os.getcwd() repoPath = os.path.realpath( os.path.expanduser(path) ) if os.path.isfile( os.path.join(repoPath,self.__repoFile) ): return True else: try: from .OldRepository import Repository REP = Repository() result = REP.is_repository(repoPath) except: return False else: if result: warnings.warn("This is an old repository version 2.x.y! Make sure to start using repositories 3.x.y ") return result
Check if there is a Repository in path. :Parameters: #. path (string): The real path of the directory where to check if there is a repository. :Returns: #. result (boolean): Whether it's a repository or not.
def clear_weights(self): ''' clear weights of the graph ''' self.weighted = False for layer in self.layer_list: layer.weights = None
clear weights of the graph
def get_processed_data(self, *args, **kwargs): """ Get and process forecast data. Parameters ---------- *args: positional arguments Passed to get_data **kwargs: keyword arguments Passed to get_data and process_data Returns ------- data: DataFrame Processed forecast data """ return self.process_data(self.get_data(*args, **kwargs), **kwargs)
Get and process forecast data. Parameters ---------- *args: positional arguments Passed to get_data **kwargs: keyword arguments Passed to get_data and process_data Returns ------- data: DataFrame Processed forecast data
def process(self, batch, device=None): """ Process a list of examples to create a torch.Tensor. Pad, numericalize, and postprocess a batch and create a tensor. Args: batch (list(object)): A list of object from a batch of examples. Returns: torch.autograd.Variable: Processed object given the input and custom postprocessing Pipeline. """ padded = self.pad(batch) tensor = self.numericalize(padded, device=device) return tensor
Process a list of examples to create a torch.Tensor. Pad, numericalize, and postprocess a batch and create a tensor. Args: batch (list(object)): A list of object from a batch of examples. Returns: torch.autograd.Variable: Processed object given the input and custom postprocessing Pipeline.
def validate(self, auth_rest): """Validate user credentials whether format is right for Sha1 :param auth_rest: User credentials' part without auth_type :return: Dict with a hash and a salt part of user credentials :raises ValueError: If credentials' part doesn't contain delimiter between a salt and a hash. """ try: auth_salt, auth_hash = auth_rest.split('$') except ValueError: raise ValueError("Missing '$' in %s" % auth_rest) if len(auth_salt) == 0: raise ValueError("Salt must have non-zero length!") if len(auth_hash) != 40: raise ValueError("Hash must have 40 chars!") if not all(c in string.hexdigits for c in auth_hash): raise ValueError("Hash must be hexadecimal!") return dict(salt=auth_salt, hash=auth_hash)
Validate user credentials whether format is right for Sha1 :param auth_rest: User credentials' part without auth_type :return: Dict with a hash and a salt part of user credentials :raises ValueError: If credentials' part doesn't contain delimiter between a salt and a hash.
def add_color_stop_rgb(self, offset, red, green, blue): """Same as :meth:`add_color_stop_rgba` with ``alpha=1``. Kept for compatibility with pycairo. """ cairo.cairo_pattern_add_color_stop_rgb( self._pointer, offset, red, green, blue) self._check_status()
Same as :meth:`add_color_stop_rgba` with ``alpha=1``. Kept for compatibility with pycairo.
def is_initialised( self ): """ Check whether the simulation has been initialised. Args: None Returns: None """ if not self.lattice: raise AttributeError('Running a simulation needs the lattice to be initialised') if not self.atoms: raise AttributeError('Running a simulation needs the atoms to be initialised') if not self.number_of_jumps and not self.for_time: raise AttributeError('Running a simulation needs number_of_jumps or for_time to be set')
Check whether the simulation has been initialised. Args: None Returns: None
def save_figures(image_path, fig_count, gallery_conf): """Save all open matplotlib figures of the example code-block Parameters ---------- image_path : str Path where plots are saved (format string which accepts figure number) fig_count : int Previous figure number count. Figure number add from this number Returns ------- list of strings containing the full path to each figure """ figure_list = [] fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers() for fig_mngr in fig_managers: # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_mngr.num) kwargs = {} to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr): kwargs[attr] = fig_attr current_fig = image_path.format(fig_count + fig_mngr.num) fig.savefig(current_fig, **kwargs) figure_list.append(current_fig) if gallery_conf.get('find_mayavi_figures', False): from mayavi import mlab e = mlab.get_engine() last_matplotlib_fig_num = len(figure_list) total_fig_num = last_matplotlib_fig_num + len(e.scenes) mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num) for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums): current_fig = image_path.format(mayavi_fig_num) mlab.savefig(current_fig, figure=scene) # make sure the image is not too large scale_image(current_fig, current_fig, 850, 999) figure_list.append(current_fig) mlab.close(all=True) return figure_list
Save all open matplotlib figures of the example code-block Parameters ---------- image_path : str Path where plots are saved (format string which accepts figure number) fig_count : int Previous figure number count. Figure number add from this number Returns ------- list of strings containing the full path to each figure
def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """ def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor
A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110'
def verify_quote(self, quote_id, extra): """Verifies that a quote order is valid. :: extras = { 'hardware': {'hostname': 'test', 'domain': 'testing.com'}, 'quantity': 2 } manager = ordering.OrderingManager(env.client) result = manager.verify_quote(12345, extras) :param int quote_id: ID for the target quote :param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra) clean_container = {} # There are a few fields that wil cause exceptions in the XML endpoing if you send in '' # reservedCapacityId and hostId specifically. But we clean all just to be safe. # This for some reason is only a problem on verify_quote. for key in container.keys(): if container.get(key) != '': clean_container[key] = container[key] return self.client.call('SoftLayer_Billing_Order_Quote', 'verifyOrder', clean_container, id=quote_id)
Verifies that a quote order is valid. :: extras = { 'hardware': {'hostname': 'test', 'domain': 'testing.com'}, 'quantity': 2 } manager = ordering.OrderingManager(env.client) result = manager.verify_quote(12345, extras) :param int quote_id: ID for the target quote :param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order :param int quantity: Quantity to override default
def oauthgetm(method, param_dict, socket_timeout=None): try: import oauth2 # lazy import this so oauth2 is not a hard dep except ImportError: raise Exception("You must install the python-oauth2 library to use this method.") """ Call the api! With Oauth! Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params put them in a list. ** note, if we require 2.6, we can get rid of this timeout munging. """ def build_request(url): params = { 'oauth_version': "1.0", 'oauth_nonce': oauth2.generate_nonce(), 'oauth_timestamp': int(time.time()) } consumer = oauth2.Consumer(key=config.ECHO_NEST_CONSUMER_KEY, secret=config.ECHO_NEST_SHARED_SECRET) params['oauth_consumer_key'] = config.ECHO_NEST_CONSUMER_KEY req = oauth2.Request(method='GET', url=url, parameters=params) signature_method = oauth2.SignatureMethod_HMAC_SHA1() req.sign_request(signature_method, consumer, None) return req param_dict['api_key'] = config.ECHO_NEST_API_KEY param_list = [] if not socket_timeout: socket_timeout = config.CALL_TIMEOUT for key,val in param_dict.iteritems(): if isinstance(val, list): param_list.extend( [(key,subval) for subval in val] ) elif val is not None: if isinstance(val, unicode): val = val.encode('utf-8') param_list.append( (key,val) ) params = urllib.urlencode(param_list) orig_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(socket_timeout) """ just a normal GET call """ url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION, method, params) req = build_request(url) f = opener.open(req.to_url()) socket.setdefaulttimeout(orig_timeout) # try/except response_dict = get_successful_response(f) return response_dict
Call the api! With Oauth! Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params put them in a list. ** note, if we require 2.6, we can get rid of this timeout munging.
def get_cur_batch(items): """Retrieve name of the batch shared between all items in a group. """ batches = [] for data in items: batch = tz.get_in(["metadata", "batch"], data, []) batches.append(set(batch) if isinstance(batch, (list, tuple)) else set([batch])) combo_batches = reduce(lambda b1, b2: b1.intersection(b2), batches) if len(combo_batches) == 1: return combo_batches.pop() elif len(combo_batches) == 0: return None else: raise ValueError("Found multiple overlapping batches: %s -- %s" % (combo_batches, batches))
Retrieve name of the batch shared between all items in a group.
def on_rabbitmq_close(self, reply_code, reply_text): """Called when RabbitMQ has been connected to. :param int reply_code: The code for the disconnect :param str reply_text: The disconnect reason """ global rabbitmq_connection LOGGER.warning('RabbitMQ has disconnected (%s): %s', reply_code, reply_text) rabbitmq_connection = None self._set_rabbitmq_channel(None) self._connect_to_rabbitmq()
Called when RabbitMQ has been connected to. :param int reply_code: The code for the disconnect :param str reply_text: The disconnect reason
def run_parallel(pipeline, input_gen, options={}, ncpu=4, chunksize=200): """ Run a pipeline in parallel over a input generator cutting it into small chunks. >>> # if we have a simple component >>> from reliure.pipeline import Composable >>> # that we want to run over a given input: >>> input = "abcde" >>> import string >>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) >>> res = run_parallel(pipeline, input, ncpu=2, chunksize=2) >>> #Note: res should be equals to [['C', 'D'], ['A', 'B'], ['E']] >>> #but it seems that there is a bug with py.test and mp... """ t0 = time() #FIXME: there is a know issue when pipeline results are "big" object, the merge is bloking... to be investigate #TODO: add get_pipeline args to prodvide a fct to build the pipeline (in each worker) logger = logging.getLogger("reliure.run_parallel") jobs = [] results = [] Qdata = mp.JoinableQueue(ncpu*2) # input queue Qresult = mp.Queue() # result queue # ensure input_gen is realy an itertor not a list if hasattr(input_gen, "__len__"): input_gen = iter(input_gen) for wnum in range(ncpu): logger.debug("create worker #%s" % wnum) worker = mp.Process(target=_reliure_worker, args=(wnum, Qdata, Qresult, pipeline, options)) worker.start() jobs.append(worker) while True: # consume chunksize elements from input_gen chunk = tuple(islice(input_gen, chunksize)) if not len(chunk): break logger.info("send a chunk of %s elemets to a worker" % len(chunk)) Qdata.put(chunk) logger.info("all data has beed send to workers") # wait until all task are done Qdata.join() logger.debug("wait for workers...") for worker in jobs: worker.terminate() logger.debug("merge results") try: while not Qresult.empty(): logger.debug("result queue still have %d elements" % Qresult.qsize()) res = Qresult.get_nowait() results.append(res) except mp.Queue.Empty: logger.debug("result queue is empty") pass logger.info("Pipeline executed in %1.3f sec" % (time() - t0)) return results
Run a pipeline in parallel over a input generator cutting it into small chunks. >>> # if we have a simple component >>> from reliure.pipeline import Composable >>> # that we want to run over a given input: >>> input = "abcde" >>> import string >>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) >>> res = run_parallel(pipeline, input, ncpu=2, chunksize=2) >>> #Note: res should be equals to [['C', 'D'], ['A', 'B'], ['E']] >>> #but it seems that there is a bug with py.test and mp...
def append_md5_if_too_long(component, size): """ Trims the component if it is longer than size and appends the component's md5. Total must be of length size. :param str component: component to work on :param int size: component's size limit :return str: component and appended md5 trimmed to be of length size """ if len(component) > size: if size > 32: component_size = size - 32 - 1 return "%s_%s" % (component[:component_size], hashlib.md5(component.encode('utf-8')).hexdigest()) else: return hashlib.md5(component.encode('utf-8')).hexdigest()[:size] else: return component
Trims the component if it is longer than size and appends the component's md5. Total must be of length size. :param str component: component to work on :param int size: component's size limit :return str: component and appended md5 trimmed to be of length size
def before_insert(mapper, conn, target): """event.listen method for Sqlalchemy to set the seqience_id for this object and create an ObjectNumber value for the id_""" # from identity import ObjectNumber # assert not target.fk_vid or not ObjectNumber.parse(target.fk_vid).revision if target.sequence_id is None: from ambry.orm.exc import DatabaseError raise DatabaseError('Must have sequence_id before insertion') # Check that the id column is always sequence id 1 assert (target.name == 'id') == (target.sequence_id == 1), (target.name, target.sequence_id) Column.before_update(mapper, conn, target)
event.listen method for Sqlalchemy to set the seqience_id for this object and create an ObjectNumber value for the id_
def _check_dedup(data): """Check configuration for de-duplication. Defaults to no de-duplication for RNA-seq and small RNA, the back compatible default. Allow overwriting with explicit `mark_duplicates: true` setting. Also defaults to false for no alignment inputs. """ if dd.get_analysis(data).lower() in ["rna-seq", "smallrna-seq"] or not dd.get_aligner(data): dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), False) else: dup_param = utils.get_in(data, ("config", "algorithm", "mark_duplicates"), True) if dup_param and isinstance(dup_param, six.string_types): logger.info("Warning: bcbio no longer support explicit setting of mark_duplicate algorithm. " "Using best-practice choice based on input data.") dup_param = True return dup_param
Check configuration for de-duplication. Defaults to no de-duplication for RNA-seq and small RNA, the back compatible default. Allow overwriting with explicit `mark_duplicates: true` setting. Also defaults to false for no alignment inputs.
def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret
Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12
def interpolation_points(self, N): """ N Chebyshev points in [-1, 1], boundaries included """ if N == 1: return np.array([0.]) return np.cos(np.arange(N)*np.pi/(N-1))
N Chebyshev points in [-1, 1], boundaries included
def _GetAttributes(self): """Retrieves the attributes. Returns: list[NTFSAttribute]: attributes. """ if self._attributes is None: self._attributes = [] for fsntfs_attribute in self._fsntfs_file_entry.attributes: attribute_class = self._ATTRIBUTE_TYPE_CLASS_MAPPINGS.get( fsntfs_attribute.attribute_type, NTFSAttribute) attribute_object = attribute_class(fsntfs_attribute) self._attributes.append(attribute_object) return self._attributes
Retrieves the attributes. Returns: list[NTFSAttribute]: attributes.
def sorted_stats(self): """Get the stats sorted by an alias (if present) or key.""" key = self.get_key() return sorted(self.stats, key=lambda stat: tuple(map( lambda part: int(part) if part.isdigit() else part.lower(), re.split(r"(\d+|\D+)", self.has_alias(stat[key]) or stat[key]) )))
Get the stats sorted by an alias (if present) or key.
def _aggr_mean(inList): """ Returns mean of non-None elements of the list """ aggrSum = 0 nonNone = 0 for elem in inList: if elem != SENTINEL_VALUE_FOR_MISSING_DATA: aggrSum += elem nonNone += 1 if nonNone != 0: return aggrSum / nonNone else: return None
Returns mean of non-None elements of the list
def get_go2color_inst(self, hdrgo): """Get a copy of go2color with GO group header colored.""" go2color = self.go2color.copy() go2color[hdrgo] = self.hdrgo_dflt_color return go2color
Get a copy of go2color with GO group header colored.
def parse_header(header): """ Convert a list of the form `['fieldname:fieldtype:fieldsize',...]` into a numpy composite dtype. The parser understands headers generated by :func:`openquake.commonlib.writers.build_header`. Here is an example: >>> parse_header(['PGA:float32', 'PGV', 'avg:float32:2']) (['PGA', 'PGV', 'avg'], dtype([('PGA', '<f4'), ('PGV', '<f4'), ('avg', '<f4', (2,))])) :params header: a list of type descriptions :returns: column names and the corresponding composite dtype """ triples = [] fields = [] for col_str in header: col = col_str.strip().split(':') n = len(col) if n == 1: # default dtype and no shape col = [col[0], 'float32', ''] elif n == 2: if castable_to_int(col[1]): # default dtype and shape col = [col[0], 'float32', col[1]] else: # dtype and no shape col = [col[0], col[1], ''] elif n > 3: raise ValueError('Invalid column description: %s' % col_str) field = col[0] numpytype = col[1] shape = () if not col[2].strip() else (int(col[2]),) triples.append((field, numpytype, shape)) fields.append(field) return fields, numpy.dtype(triples)
Convert a list of the form `['fieldname:fieldtype:fieldsize',...]` into a numpy composite dtype. The parser understands headers generated by :func:`openquake.commonlib.writers.build_header`. Here is an example: >>> parse_header(['PGA:float32', 'PGV', 'avg:float32:2']) (['PGA', 'PGV', 'avg'], dtype([('PGA', '<f4'), ('PGV', '<f4'), ('avg', '<f4', (2,))])) :params header: a list of type descriptions :returns: column names and the corresponding composite dtype
def run_from_argv(self, prog, subcommand, global_options, argv): """ Set up any environment changes requested, then run this command. """ self.prog_name = prog parser = self.create_parser(prog, subcommand) options, args = parser.parse_args(argv) self.global_options = global_options self.options = options self.args = args self.execute(args, options, global_options)
Set up any environment changes requested, then run this command.
def from_chars(chars): """Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I". e.g. "XZIY" => X(0) * Z(1) * Y(3) Args: chars (str): Written in "X", "Y", "Z" or "I". Returns: Term: A `Term` object. Raises: ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I". """ paulis = [pauli_from_char(c, n) for n, c in enumerate(chars) if c != "I"] if not paulis: return 1.0 * I if len(paulis) == 1: return 1.0 * paulis[0] return reduce(lambda a, b: a * b, paulis)
Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I". e.g. "XZIY" => X(0) * Z(1) * Y(3) Args: chars (str): Written in "X", "Y", "Z" or "I". Returns: Term: A `Term` object. Raises: ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I".
def log_cert_info(logger, msg_str, cert_obj): """Dump basic certificate values to the log. Args: logger: Logger Logger to which to write the certificate values. msg_str: str A message to write to the log before the certificate values. cert_obj: cryptography.Certificate Certificate containing values to log. Returns: None """ list( map( logger, ["{}:".format(msg_str)] + [ " {}".format(v) for v in [ "Subject: {}".format( _get_val_str(cert_obj, ["subject", "value"], reverse=True) ), "Issuer: {}".format( _get_val_str(cert_obj, ["issuer", "value"], reverse=True) ), "Not Valid Before: {}".format( cert_obj.not_valid_before.isoformat() ), "Not Valid After: {}".format(cert_obj.not_valid_after.isoformat()), "Subject Alt Names: {}".format( _get_ext_val_str( cert_obj, "SUBJECT_ALTERNATIVE_NAME", ["value", "value"] ) ), "CRL Distribution Points: {}".format( _get_ext_val_str( cert_obj, "CRL_DISTRIBUTION_POINTS", ["value", "full_name", "value", "value"], ) ), "Authority Access Location: {}".format( extract_issuer_ca_cert_url(cert_obj) or "<not found>" ), ] ], ) )
Dump basic certificate values to the log. Args: logger: Logger Logger to which to write the certificate values. msg_str: str A message to write to the log before the certificate values. cert_obj: cryptography.Certificate Certificate containing values to log. Returns: None
def wait_for_element_to_disappear(self, locator, params=None, timeout=None): """ Waits until the element is not visible (hidden) or no longer attached to the DOM. Raises TimeoutException if element does not become invisible. :param locator: locator tuple or WebElement instance :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: None """ exp_cond = eec.invisibility_of if isinstance(locator, WebElement) else ec.invisibility_of_element_located try: self._get(locator, exp_cond, params, timeout, error_msg="Element never disappeared") except (StaleElementReferenceException, NoSuchElementException): return True
Waits until the element is not visible (hidden) or no longer attached to the DOM. Raises TimeoutException if element does not become invisible. :param locator: locator tuple or WebElement instance :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: None
def request(self, method, path, query=None, content=None): """ Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned """ if not path.startswith("/"): raise ClientError("Implementation error: Called with bad path %s" % path) body = None if content is not None: data = self._json_encoder.encode(content) body = StringProducer(data) url = self._base_url + path if query: prepare_query(query) params = urlencode(query, doseq=True) url += "?%s" % params log.msg("Sending request to %s %s %s" % (url, self.headers, body), system="Gentleman") d = self._agent.request(method, url, headers=self.headers, bodyProducer=body) protocol = JsonResponseProtocol(d) @d.addErrback def connectionFailed(failure): failure.trap(ConnectionRefusedError) raise GanetiApiError("Connection refused!") @d.addCallback def cb(response): if response.code != 200: raise NotOkayError(code=response.code) response.deliverBody(protocol) return protocol.getData()
Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned
async def provStacks(self, offs, size): ''' Return stream of (iden, provenance stack) tuples at the given offset. ''' count = 0 for iden, stack in self.cell.provstor.provStacks(offs, size): count += 1 if not count % 1000: await asyncio.sleep(0) yield s_common.ehex(iden), stack
Return stream of (iden, provenance stack) tuples at the given offset.
def process_pkcs7(self, data, name): """ Process PKCS7 signature with certificate in it. :param data: :param name: :return: """ from cryptography.hazmat.backends.openssl.backend import backend from cryptography.hazmat.backends.openssl.x509 import _Certificate # DER conversion is_pem = startswith(data, '-----') if self.re_match(r'^[a-zA-Z0-9-\s+=/]+$', data): is_pem = True try: der = data if is_pem: data = data.decode('utf8') data = re.sub(r'\s*-----\s*BEGIN\s+PKCS7\s*-----', '', data) data = re.sub(r'\s*-----\s*END\s+PKCS7\s*-----', '', data) der = base64.b64decode(data) bio = backend._bytes_to_bio(der) pkcs7 = backend._lib.d2i_PKCS7_bio(bio.bio, backend._ffi.NULL) backend.openssl_assert(pkcs7 != backend._ffi.NULL) signers = backend._lib.PKCS7_get0_signers(pkcs7, backend._ffi.NULL, 0) backend.openssl_assert(signers != backend._ffi.NULL) backend.openssl_assert(backend._lib.sk_X509_num(signers) > 0) x509_ptr = backend._lib.sk_X509_value(signers, 0) backend.openssl_assert(x509_ptr != backend._ffi.NULL) x509_ptr = backend._ffi.gc(x509_ptr, backend._lib.X509_free) x509 = _Certificate(backend, x509_ptr) self.num_pkcs7_cert += 1 return [self.process_x509(x509, name=name, pem=False, source='pkcs7-cert', aux='')] except Exception as e: logger.debug('Error in PKCS7 processing %s: %s' % (name, e)) self.trace_logger.log(e)
Process PKCS7 signature with certificate in it. :param data: :param name: :return:
def pexpire(self, key, milliseconds): """Emulate pexpire""" return self._expire(self._encode(key), timedelta(milliseconds=milliseconds))
Emulate pexpire
def count_duplicate_starts(bam_file, sample_size=10000000): """ Return a set of x, y points where x is the number of reads sequenced and y is the number of unique start sites identified If sample size < total reads in a file the file will be downsampled. """ count = Counter() with bam.open_samfile(bam_file) as samfile: # unmapped reads should not be counted filtered = ifilter(lambda x: not x.is_unmapped, samfile) def read_parser(read): return ":".join([str(read.tid), str(read.pos)]) samples = utils.reservoir_sample(filtered, sample_size, read_parser) count.update(samples) return count
Return a set of x, y points where x is the number of reads sequenced and y is the number of unique start sites identified If sample size < total reads in a file the file will be downsampled.
def setParent(self, other): """ Sets the parent for this layer to the inputed layer. :param other | <XNodeLayer> || None :return <bool> changed """ if self._parent == other: return False # remove this layer from its parent if self._parent and self in self._parent._children: self._parent._children.remove(self) self._parent = other # add this layer to its parent's list of children if self._parent and not self in self._children: self._parent._children.append(self) self.sync() return True
Sets the parent for this layer to the inputed layer. :param other | <XNodeLayer> || None :return <bool> changed
def register(): """Uses the new style of registration based on GitHub Pelican issue #314.""" signals.initialized.connect(initialized) try: signals.content_object_init.connect(detect_content) signals.all_generators_finalized.connect(detect_images_and_galleries) signals.article_writer_finalized.connect(resize_photos) except Exception as e: logger.exception('Plugin failed to execute: {}'.format(pprint.pformat(e)))
Uses the new style of registration based on GitHub Pelican issue #314.
def get_current_temperature(self, refresh=False): """Get current temperature""" if refresh: self.refresh() try: return float(self.get_value('temperature')) except (TypeError, ValueError): return None
Get current temperature
def neg_loglik(self,beta): """ Creates the negative log likelihood of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- The negative log logliklihood of the model """ _, _, _, F, v = self._model(self.data,beta) loglik = 0.0 for i in range(0,self.data.shape[0]): loglik += np.linalg.slogdet(F[:,:,i])[1] + np.dot(v[i],np.dot(np.linalg.pinv(F[:,:,i]),v[i])) return -(-((self.data.shape[0]/2)*np.log(2*np.pi))-0.5*loglik.T[0].sum())
Creates the negative log likelihood of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- The negative log logliklihood of the model
def display(self): """Displays the network to the screen.""" size = list(range(len(self.layers))) size.reverse() for i in size: layer = self.layers[i] if layer.active: print('%s layer (size %d)' % (layer.name, layer.size)) tlabel, olabel = '', '' if (layer.type == 'Output'): if self.countWrong: tlabel = ' (%s)' % self.classify(layer.target.tolist()) olabel = ' (%s)' % self.classify(layer.activation.tolist()) if olabel == tlabel: self.numRight += 1 else: olabel += ' *** WRONG ***' self.numWrong += 1 if self.actDisplay is not None: self.actDisplay.showWrong() print('Target : %s%s' % (pretty(layer.target, max=15), tlabel)) print('Activation: %s%s' % (pretty(layer.activation, max=15), olabel)) if self.patterned and layer.type != 'Hidden': targetWord, diff = self.getWord( layer.target, returnDiff = 1) if layer.kind == 'Output': if targetWord == None: print("Target Pattern = %s" % "No match") else: if diff == 0.0: print("Target Pattern = '%s'" % targetWord) else: print("Target Pattern = '%s'; difference = %f)" % (targetWord, diff)) actWord, diff = self.getWord( layer.activation, returnDiff = 1 ) if (layer.kind == 'Input' or layer.kind == 'Output'): if actWord == None: print("Matching Pattern = %s" % "No match") else: if diff == 0.0: print("Matching Pattern = '%s'" % actWord) else: print("Matching Pattern = '%s'; difference = %f" % (actWord, diff)) print("------------------------------------")
Displays the network to the screen.
def reload(self): """ Reload file again from storage. """ text = self._read(self.location) cursor_position = min(self.buffer.cursor_position, len(text)) self.buffer.document = Document(text, cursor_position) self._file_content = text
Reload file again from storage.
def handleOneNodeMsg(self, wrappedMsg): """ Validate and process one message from a node. :param wrappedMsg: Tuple of message and the name of the node that sent the message """ try: vmsg = self.validateNodeMsg(wrappedMsg) if vmsg: logger.trace("{} msg validated {}".format(self, wrappedMsg), extra={"tags": ["node-msg-validation"]}) self.unpackNodeMsg(*vmsg) else: logger.debug("{} invalidated msg {}".format(self, wrappedMsg), extra={"tags": ["node-msg-validation"]}) except SuspiciousNode as ex: self.reportSuspiciousNodeEx(ex) except Exception as ex: msg, frm = wrappedMsg self.discard(msg, ex, logger.info)
Validate and process one message from a node. :param wrappedMsg: Tuple of message and the name of the node that sent the message
def uint(nstr, schema): """ !~~uint """ if isinstance(nstr, basestring): if not nstr.isdigit(): return False nstr = long(nstr) elif not isinstance(nstr, (int, long)): return False return nstr > 0
!~~uint
def request_frame(self): """Construct initiating frame.""" self.session_id = get_new_session_id() return FrameCommandSendRequest(node_ids=[self.node_id], parameter=self.parameter, session_id=self.session_id)
Construct initiating frame.
def on_subscript(self, node): # ('value', 'slice', 'ctx') """Subscript handling -- one of the tricky parts.""" val = self.run(node.value) nslice = self.run(node.slice) ctx = node.ctx.__class__ if ctx in (ast.Load, ast.Store): if isinstance(node.slice, (ast.Index, ast.Slice, ast.Ellipsis)): return val.__getitem__(nslice) elif isinstance(node.slice, ast.ExtSlice): return val[nslice] else: msg = "subscript with unknown context" self.raise_exception(node, msg=msg)
Subscript handling -- one of the tricky parts.
def ask_pascal_16(self, next_rva_ptr): """The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked with the possible length contained in the first word. """ length = self.__get_pascal_16_length() if length == (next_rva_ptr - (self.rva_ptr+2)) / 2: self.length = length return True return False
The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked with the possible length contained in the first word.
def update_warning(self): """ Updates the icon and tip based on the validity of the array content. """ widget = self._button_warning if not self.is_valid(): tip = _('Array dimensions not valid') widget.setIcon(ima.icon('MessageBoxWarning')) widget.setToolTip(tip) QToolTip.showText(self._widget.mapToGlobal(QPoint(0, 5)), tip) else: self._button_warning.setToolTip('')
Updates the icon and tip based on the validity of the array content.
def remove_namespace(doc, namespace): '''Remove namespace in the passed document in place.''' ns = u'{%s}' % namespace nsl = len(ns) for elem in doc.getiterator(): if elem.tag.startswith(ns): elem.tag = elem.tag[nsl:] elem.attrib['oxmlns'] = namespace
Remove namespace in the passed document in place.
def _process_output(output, parse_json=True): """Process output.""" output = output.strip() _LOGGER.debug('Received: %s', output) if not output: return None elif 'decrypt_verify' in output: raise RequestError( 'Please compile coap-client without debug output. See ' 'instructions at ' 'https://github.com/ggravlingen/pytradfri#installation') elif output.startswith(CLIENT_ERROR_PREFIX): raise ClientError(output) elif output.startswith(SERVER_ERROR_PREFIX): raise ServerError(output) elif not parse_json: return output return json.loads(output)
Process output.
def batch_delete_jobs( self, parent, filter_, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a list of ``Job``\ s by filter. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.JobServiceClient() >>> >>> parent = client.project_path('[PROJECT]') >>> >>> # TODO: Initialize `filter_`: >>> filter_ = '' >>> >>> client.batch_delete_jobs(parent, filter_) Args: parent (str): Required. The resource name of the project under which the job is created. The format is "projects/{project\_id}", for example, "projects/api-test-project". filter_ (str): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/api-test-project/companies/123" AND requisitionId = "req-1" retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "batch_delete_jobs" not in self._inner_api_calls: self._inner_api_calls[ "batch_delete_jobs" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_delete_jobs, default_retry=self._method_configs["BatchDeleteJobs"].retry, default_timeout=self._method_configs["BatchDeleteJobs"].timeout, client_info=self._client_info, ) request = job_service_pb2.BatchDeleteJobsRequest(parent=parent, filter=filter_) self._inner_api_calls["batch_delete_jobs"]( request, retry=retry, timeout=timeout, metadata=metadata )
Deletes a list of ``Job``\ s by filter. Example: >>> from google.cloud import talent_v4beta1 >>> >>> client = talent_v4beta1.JobServiceClient() >>> >>> parent = client.project_path('[PROJECT]') >>> >>> # TODO: Initialize `filter_`: >>> filter_ = '' >>> >>> client.batch_delete_jobs(parent, filter_) Args: parent (str): Required. The resource name of the project under which the job is created. The format is "projects/{project\_id}", for example, "projects/api-test-project". filter_ (str): Required. The filter string specifies the jobs to be deleted. Supported operator: =, AND The fields eligible for filtering are: - ``companyName`` (Required) - ``requisitionId`` (Required) Sample Query: companyName = "projects/api-test-project/companies/123" AND requisitionId = "req-1" retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def get_environment_requirements_list(): """ Take the requirements list from the current running environment :return: string """ requirement_list = [] requirements = check_output([sys.executable, '-m', 'pip', 'freeze']) for requirement in requirements.split(): requirement_list.append(requirement.decode("utf-8")) return requirement_list
Take the requirements list from the current running environment :return: string
def create(host, port): """ Prepare server to execute :return: Modules to execute, cmd line function :rtype: list[WrapperServer], callable | None """ wrapper = WrapperServer({ 'server': None }) d = { 'listen_port': port, 'changer': wrapper } if host: d['listen_bind_ip'] = host ses = MeasureServer(d) wrapper.server = ses return [wrapper], cmd_line
Prepare server to execute :return: Modules to execute, cmd line function :rtype: list[WrapperServer], callable | None
def typed_subtopic_data(fc, subid): '''Returns typed subtopic data from an FC.''' # I don't think this code will change after we fix the data race bug. ---AG ty = subtopic_type(subid) data = get_unicode_feature(fc, subid) assert isinstance(data, unicode), \ 'data should be `unicode` but is %r' % type(data) if ty == 'image': img_data = get_unicode_feature(fc, subid + '|data') img = re.sub('^data:image/[a-zA-Z]+;base64,', '', img_data) img = base64.b64decode(img.encode('utf-8')) return data, img elif ty in ('text', 'manual'): return data raise ValueError('unrecognized subtopic type "%s"' % ty)
Returns typed subtopic data from an FC.
def filter(self, *args): """ 为文本 ``(text)`` 消息添加 handler 的简便方法。 使用 ``@filter("xxx")``, ``@filter(re.compile("xxx"))`` 或 ``@filter("xxx", "xxx2")`` 的形式为特定内容添加 handler。 """ def wraps(f): self.add_filter(func=f, rules=list(args)) return f return wraps
为文本 ``(text)`` 消息添加 handler 的简便方法。 使用 ``@filter("xxx")``, ``@filter(re.compile("xxx"))`` 或 ``@filter("xxx", "xxx2")`` 的形式为特定内容添加 handler。
def __insert_frond_LF(d_w, d_u, dfs_data): """Encapsulates the process of inserting a frond uw into the left side frond group.""" # --Add the frond to the left side dfs_data['LF'].append( (d_w, d_u) ) dfs_data['FG']['l'] += 1 dfs_data['last_inserted_side'] = 'LF'
Encapsulates the process of inserting a frond uw into the left side frond group.
def upload_file_to(self, addressinfo, timeout): """Uploads the raw firmware file to iLO Uploads the raw firmware file (already set as attribute in FirmwareImageControllerBase constructor) to iLO, whose address information is passed to this method. :param addressinfo: tuple of hostname and port of the iLO :param timeout: timeout in secs, used for connecting to iLO :raises: IloInvalidInputError, if raw firmware file not found :raises: IloError, for other internal problems :returns: the cookie so sent back from iLO on successful upload """ self.hostname, self.port = addressinfo self.timeout = timeout filename = self.fw_file firmware = open(filename, 'rb').read() # generate boundary boundary = b('------hpiLO3t' + str(random.randint(100000, 1000000)) + 'z') while boundary in firmware: boundary = b('------hpiLO3t' + str(random.randint(100000, 1000000)) + 'z') # generate body parts parts = [ # body1 b("--") + boundary + b("""\r\nContent-Disposition: form-data; """ """name="fileType"\r\n\r\n"""), # body2 b("\r\n--") + boundary + b('''\r\nContent-Disposition: form-data; name="fwimgfile"; ''' '''filename="''') + b(filename) + b('''"\r\nContent-Type: application/octet-stream\r\n\r\n'''), # firmware image firmware, # body3 b("\r\n--") + boundary + b("--\r\n"), ] total_bytes = sum([len(x) for x in parts]) sock = self._get_socket() # send the firmware image sock.write(b(self.HTTP_UPLOAD_HEADER % (total_bytes, boundary.decode('ascii')))) for part in parts: sock.write(part) data = '' try: while True: d = sock.read() data += d.decode('latin-1') if not d: break except socket.sslerror: # Connection closed e = sys.exc_info()[1] if not data: raise exception.IloConnectionError( "Communication with %(hostname)s:%(port)d failed: " "%(error)s" % {'hostname': self.hostname, 'port': self.port, 'error': str(e)}) # Received len(data) bytes cookie_match = re.search('Set-Cookie: *(.*)', data) if not cookie_match: raise exception.IloError("Uploading of file: %s failed due " "to unknown reason." % filename) # return the cookie return cookie_match.group(1)
Uploads the raw firmware file to iLO Uploads the raw firmware file (already set as attribute in FirmwareImageControllerBase constructor) to iLO, whose address information is passed to this method. :param addressinfo: tuple of hostname and port of the iLO :param timeout: timeout in secs, used for connecting to iLO :raises: IloInvalidInputError, if raw firmware file not found :raises: IloError, for other internal problems :returns: the cookie so sent back from iLO on successful upload