code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool: """ Checks if both addresses are same or not. """ if not is_address(left) or not is_address(right): raise ValueError("Both values must be valid addresses") else: return to_normalized_address(left) == to_normalized_address(right)
Checks if both addresses are same or not.
def customize_ruleset(self, custom_ruleset_file=None): """ Updates the ruleset to include a set of custom rules. These rules will be _added_ to the existing ruleset or replace the existing rule with the same ID. Args: custom_ruleset_file (optional): The filepath to the custom rules. Defaults to `None`. If `custom_ruleset_file` isn't passed, the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be checked. If a filepath isn't specified by either of these methods, the ruleset will not be updated. Raises: `IOError` if the specified file does not exist. Examples: To include the rules defined in `axe-core-custom-rules.js`:: page.a11y_audit.config.customize_ruleset( "axe-core-custom-rules.js" ) Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` to specify the path to the file containing the custom rules. Documentation for how to write rules: https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md An example of a custom rules file can be found at https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js """ custom_file = custom_ruleset_file or os.environ.get( "BOKCHOY_A11Y_CUSTOM_RULES_FILE" ) if not custom_file: return with open(custom_file, "r") as additional_rules: custom_rules = additional_rules.read() if "var customRules" not in custom_rules: raise A11yAuditConfigError( "Custom rules file must include \"var customRules\"" ) self.custom_rules = custom_rules
Updates the ruleset to include a set of custom rules. These rules will be _added_ to the existing ruleset or replace the existing rule with the same ID. Args: custom_ruleset_file (optional): The filepath to the custom rules. Defaults to `None`. If `custom_ruleset_file` isn't passed, the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be checked. If a filepath isn't specified by either of these methods, the ruleset will not be updated. Raises: `IOError` if the specified file does not exist. Examples: To include the rules defined in `axe-core-custom-rules.js`:: page.a11y_audit.config.customize_ruleset( "axe-core-custom-rules.js" ) Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` to specify the path to the file containing the custom rules. Documentation for how to write rules: https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md An example of a custom rules file can be found at https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
def drag(duration: int, amp: complex, sigma: float, beta: float, name: str = None) -> SamplePulse: r"""Generates Y-only correction DRAG `SamplePulse` for standard nonlinear oscillator (SNO) [1]. Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity. Applies `left` sampling strategy to generate discrete pulse from continuous function. [1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K. Analytic control methods for high-fidelity unitary operations in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011). Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `center`. sigma: Width (standard deviation) of pulse. beta: Y correction amplitude. For the SNO this is $\beta=-\frac{\lambda_1^2}{4\Delta_2}$. Where $\lambds_1$ is the relative coupling strength between the first excited and second excited states and $\Delta_2$ is the detuning between the resepective excited states. name: Name of pulse. """ center = duration/2 zeroed_width = duration + 2 return _sampled_drag_pulse(duration, amp, center, sigma, beta, zeroed_width=zeroed_width, rescale_amp=True, name=name)
r"""Generates Y-only correction DRAG `SamplePulse` for standard nonlinear oscillator (SNO) [1]. Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity. Applies `left` sampling strategy to generate discrete pulse from continuous function. [1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K. Analytic control methods for high-fidelity unitary operations in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011). Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `center`. sigma: Width (standard deviation) of pulse. beta: Y correction amplitude. For the SNO this is $\beta=-\frac{\lambda_1^2}{4\Delta_2}$. Where $\lambds_1$ is the relative coupling strength between the first excited and second excited states and $\Delta_2$ is the detuning between the resepective excited states. name: Name of pulse.
def hash(self): """ Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only. """ renderer_str = "{}|{}|{}|{}".format( self.renderer.__class__.__name__, self.renderer.colormap, self.renderer.fill_value, self.renderer.background_color ) if isinstance(self.renderer, StretchedRenderer): renderer_str = "{}|{}|{}".format(renderer_str, self.renderer.method, self.renderer.colorspace) elif isinstance(self.renderer, UniqueValuesRenderer): renderer_str = "{}|{}".format(renderer_str, self.renderer.labels) return hash("{}/{}/{}".format(self.variable.pk, renderer_str, self.time_index))
Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only.
def _sphinx_build(self, kind): """ Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ if kind not in ('html', 'latex'): raise ValueError('kind must be html or latex, ' 'not {}'.format(kind)) cmd = ['sphinx-build', '-b', kind] if self.num_jobs: cmd += ['-j', str(self.num_jobs)] if self.warnings_are_errors: cmd += ['-W', '--keep-going'] if self.verbosity: cmd.append('-{}'.format('v' * self.verbosity)) cmd += ['-d', os.path.join(BUILD_PATH, 'doctrees'), SOURCE_PATH, os.path.join(BUILD_PATH, kind)] return subprocess.call(cmd)
Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html')
def register_metric_descriptor(self, oc_md): """Register a metric descriptor with stackdriver.""" metric_type = self.get_metric_type(oc_md) with self._md_lock: if metric_type in self._md_cache: return self._md_cache[metric_type] descriptor = self.get_metric_descriptor(oc_md) project_name = self.client.project_path(self.options.project_id) sd_md = self.client.create_metric_descriptor(project_name, descriptor) with self._md_lock: self._md_cache[metric_type] = sd_md return sd_md
Register a metric descriptor with stackdriver.
def on_clipboard_mode_change(self, clipboard_mode): """Notification when the shared clipboard mode changes. in clipboard_mode of type :class:`ClipboardMode` The new shared clipboard mode. """ if not isinstance(clipboard_mode, ClipboardMode): raise TypeError("clipboard_mode can only be an instance of type ClipboardMode") self._call("onClipboardModeChange", in_p=[clipboard_mode])
Notification when the shared clipboard mode changes. in clipboard_mode of type :class:`ClipboardMode` The new shared clipboard mode.
def migrate(src_path, src_passphrase, src_backend, dst_path, dst_passphrase, dst_backend): """Migrate all keys in a source stash to a destination stash The migration process will decrypt all keys using the source stash's passphrase and then encrypt them based on the destination stash's passphrase. re-encryption will take place only if the passphrases are differing """ src_storage = STORAGE_MAPPING[src_backend](**_parse_path_string(src_path)) dst_storage = STORAGE_MAPPING[dst_backend](**_parse_path_string(dst_path)) src_stash = Stash(src_storage, src_passphrase) dst_stash = Stash(dst_storage, dst_passphrase) # TODO: Test that re-encryption does not occur on similar # passphrases keys = src_stash.export() dst_stash.load(src_passphrase, keys=keys)
Migrate all keys in a source stash to a destination stash The migration process will decrypt all keys using the source stash's passphrase and then encrypt them based on the destination stash's passphrase. re-encryption will take place only if the passphrases are differing
def gcd(*numbers): """ Returns the greatest common divisor for a sequence of numbers. Args: \*numbers: Sequence of numbers. Returns: (int) Greatest common divisor of numbers. """ n = numbers[0] for i in numbers: n = pygcd(n, i) return n
Returns the greatest common divisor for a sequence of numbers. Args: \*numbers: Sequence of numbers. Returns: (int) Greatest common divisor of numbers.
def line_is_interesting(self, line): """Return True, False, or None. True means always output, False means never output, None means output only if there are interesting lines. """ if line.startswith('Name'): return None if line.startswith('--------'): return None if line.startswith('TOTAL'): return None if '100%' in line: return False if line == '\n': return None if self._last_line_was_printable else False return True
Return True, False, or None. True means always output, False means never output, None means output only if there are interesting lines.
def _BuildOobLink(self, param, mode): """Builds out-of-band URL. Gitkit API GetOobCode() is called and the returning code is combined with Gitkit widget URL to building the out-of-band url. Args: param: dict of request. mode: string, Gitkit widget mode to handle the oob action after user clicks the oob url in the email. Raises: GitkitClientError: if oob code is not returned. Returns: A string of oob url. """ code = self.rpc_helper.GetOobCode(param) if code: parsed = list(parse.urlparse(self.widget_url)) query = dict(parse.parse_qsl(parsed[4])) query.update({'mode': mode, 'oobCode': code}) try: parsed[4] = parse.urlencode(query) except AttributeError: parsed[4] = urllib.urlencode(query) return code, parse.urlunparse(parsed) raise errors.GitkitClientError('invalid request')
Builds out-of-band URL. Gitkit API GetOobCode() is called and the returning code is combined with Gitkit widget URL to building the out-of-band url. Args: param: dict of request. mode: string, Gitkit widget mode to handle the oob action after user clicks the oob url in the email. Raises: GitkitClientError: if oob code is not returned. Returns: A string of oob url.
def SplitPatch(data): """Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename. """ patches = [] filename = None diff = [] for line in data.splitlines(True): new_filename = None if line.startswith('Index:'): unused, new_filename = line.split(':', 1) new_filename = new_filename.strip() elif line.startswith('Property changes on:'): unused, temp_filename = line.split(':', 1) # When a file is modified, paths use '/' between directories, however # when a property is modified '\' is used on Windows. Make them the same # otherwise the file shows up twice. temp_filename = to_slash(temp_filename.strip()) if temp_filename != filename: # File has property changes but no modifications, create a new diff. new_filename = temp_filename if new_filename: if filename and diff: patches.append((filename, ''.join(diff))) filename = new_filename diff = [line] continue if diff is not None: diff.append(line) if filename and diff: patches.append((filename, ''.join(diff))) return patches
Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename.
def cfht_megacam_tap_query(ra_deg=180.0, dec_deg=0.0, width=1, height=1, date=None): """Do a query of the CADC Megacam table. Get all observations inside the box (right now it turns width/height into a radius, should not do this). @rtype : Table @param ra_deg: center of search region, in degrees @param dec_deg: center of search region in degrees @param width: width of search region in degrees @param height: height of search region in degrees @param date: ISO format date string. Query will be +/- 0.5 days from date given. """ radius = min(90, max(width, height) / 2.0) query = ("SELECT " "COORD1(CENTROID(Plane.position_bounds)) AS RAJ2000," "COORD2(CENTROID(Plane.position_bounds)) AS DEJ2000," "target_name " "FROM " "caom2.Observation as o " "JOIN caom2.Plane as Plane on o.obsID=Plane.obsID " "WHERE o.collection = 'CFHT' " "AND o.instrument_name = 'MegaPrime' " "AND INTERSECTS( CIRCLE('ICRS', %f, %f, %f), Plane.position_bounds ) = 1") query = query % (ra_deg, dec_deg, radius) if date is not None: mjd = Time(date, scale='utc').mjd query += " AND Plane.time_bounds_lower <= {} AND {} <= Plane.time_bounds_upper ".format(mjd+0.5, mjd-0.5) data = {"QUERY": query, "REQUEST": "doQuery", "LANG": "ADQL", "FORMAT": "votable"} url = "http://www.cadc.hia.nrc.gc.ca/tap/sync" warnings.simplefilter('ignore') ff = StringIO(requests.get(url, params=data).content) ff.seek(0) table = votable.parse(ff).get_first_table().to_table() assert isinstance(table, Table) return table
Do a query of the CADC Megacam table. Get all observations inside the box (right now it turns width/height into a radius, should not do this). @rtype : Table @param ra_deg: center of search region, in degrees @param dec_deg: center of search region in degrees @param width: width of search region in degrees @param height: height of search region in degrees @param date: ISO format date string. Query will be +/- 0.5 days from date given.
def getmoduleinfo(path): """Get the module name, suffix, mode, and module type for a given file.""" filename = os.path.basename(path) suffixes = map(lambda (suffix, mode, mtype): (-len(suffix), suffix, mode, mtype), imp.get_suffixes()) suffixes.sort() # try longest suffixes first, in case they overlap for neglen, suffix, mode, mtype in suffixes: if filename[neglen:] == suffix: return filename[:neglen], suffix, mode, mtype
Get the module name, suffix, mode, and module type for a given file.
def _compute(self): """Computes the state variable tendencies in time for implicit processes. To calculate the new state the :func:`_implicit_solver()` method is called for daughter classes. This however returns the new state of the variables, not just the tendencies. Therefore, the adjustment is calculated which is the difference between the new and the old state and stored in the object's attribute adjustment. Calculating the new model states through solving the matrix problem already includes the multiplication with the timestep. The derived adjustment is divided by the timestep to calculate the implicit subprocess tendencies, which can be handeled by the :func:`~climlab.process.time_dependent_process.TimeDependentProcess.compute` method of the parent :class:`~climlab.process.time_dependent_process.TimeDependentProcess` class. :ivar dict adjustment: holding all state variables' adjustments of the implicit process which are the differences between the new states (which have been solved through matrix inversion) and the old states. """ newstate = self._implicit_solver() adjustment = {} tendencies = {} for name, var in self.state.items(): adjustment[name] = newstate[name] - var tendencies[name] = adjustment[name] / self.timestep # express the adjustment (already accounting for the finite time step) # as a tendency per unit time, so that it can be applied along with explicit self.adjustment = adjustment self._update_diagnostics(newstate) return tendencies
Computes the state variable tendencies in time for implicit processes. To calculate the new state the :func:`_implicit_solver()` method is called for daughter classes. This however returns the new state of the variables, not just the tendencies. Therefore, the adjustment is calculated which is the difference between the new and the old state and stored in the object's attribute adjustment. Calculating the new model states through solving the matrix problem already includes the multiplication with the timestep. The derived adjustment is divided by the timestep to calculate the implicit subprocess tendencies, which can be handeled by the :func:`~climlab.process.time_dependent_process.TimeDependentProcess.compute` method of the parent :class:`~climlab.process.time_dependent_process.TimeDependentProcess` class. :ivar dict adjustment: holding all state variables' adjustments of the implicit process which are the differences between the new states (which have been solved through matrix inversion) and the old states.
def from_argparse(cls, args): """Generate the Settings from parsed arguments.""" settings = vars(args) settings['repository_name'] = settings.pop('repository') settings['cacert'] = settings.pop('cert') return cls(**settings)
Generate the Settings from parsed arguments.
def initialize_simulation(components: List, input_config: Mapping=None, plugin_config: Mapping=None) -> InteractiveContext: """Construct a simulation from a list of components, component configuration, and a plugin configuration. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- components A list of initialized simulation components. Corresponds to the components block of a model specification. input_config A nested dictionary with any additional simulation configuration information needed. Corresponds to the configuration block of a model specification. plugin_config A dictionary containing a description of any simulation plugins to include in the simulation. If you're using this argument, you're either deep in the process of simulation development or the maintainers have done something wrong. Corresponds to the plugins block of a model specification. Returns ------- An initialized (but not set up) simulation context. """ config = build_simulation_configuration() config.update(input_config) plugin_manager = PluginManager(plugin_config) return InteractiveContext(config, components, plugin_manager)
Construct a simulation from a list of components, component configuration, and a plugin configuration. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- components A list of initialized simulation components. Corresponds to the components block of a model specification. input_config A nested dictionary with any additional simulation configuration information needed. Corresponds to the configuration block of a model specification. plugin_config A dictionary containing a description of any simulation plugins to include in the simulation. If you're using this argument, you're either deep in the process of simulation development or the maintainers have done something wrong. Corresponds to the plugins block of a model specification. Returns ------- An initialized (but not set up) simulation context.
def ref_frequency(self, context): """ Reference frequency data source """ num_chans = self._manager.spectral_window_table.getcol(MS.NUM_CHAN) ref_freqs = self._manager.spectral_window_table.getcol(MS.REF_FREQUENCY) data = np.hstack((np.repeat(rf, bs) for bs, rf in zip(num_chans, ref_freqs))) return data.reshape(context.shape).astype(context.dtype)
Reference frequency data source
def get(self, block_alias, context): """Main method returning block contents (static or dynamic).""" contents = [] dynamic_block_contents = self.get_contents_dynamic(block_alias, context) if dynamic_block_contents: contents.append(dynamic_block_contents) static_block_contents = self.get_contents_static(block_alias, context) if static_block_contents: contents.append(static_block_contents) if not contents: return '' return choice(contents)
Main method returning block contents (static or dynamic).
def get_solution(self, parameters=None): """stub""" if not self.has_solution(): raise IllegalState() return DisplayText(self.my_osid_object._my_map['solution'])
stub
def select(self, ): """Store the selected taskfileinfo self.selected and accept the dialog :returns: None :rtype: None :raises: None """ s = self.browser.selected_indexes(self.browser.get_depth()-1) if not s: return i = s[0].internalPointer() if i: tfi = i.internal_data() self.selected = tfi self.accept()
Store the selected taskfileinfo self.selected and accept the dialog :returns: None :rtype: None :raises: None
def circle(rad=0.5): """Draw a circle""" _ctx = _state["ctx"] _ctx.arc(0, 0, rad, 0, 2 * math.pi) _ctx.set_line_width(0) _ctx.stroke_preserve() # _ctx.set_source_rgb(0.3, 0.4, 0.6) _ctx.fill()
Draw a circle
def pivot(self, index, column, value): """ Pivot the frame designated by the three columns: index, column, and value. Index and column should be of type enum, int, or time. For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame :param index: Index is a column that will be the row label :param column: The labels for the columns in the pivoted Frame :param value: The column of values for the given index and column label :returns: """ assert_is_type(index, str) assert_is_type(column, str) assert_is_type(value, str) col_names = self.names if index not in col_names: raise H2OValueError("Index not in H2OFrame") if column not in col_names: raise H2OValueError("Column not in H2OFrame") if value not in col_names: raise H2OValueError("Value column not in H2OFrame") if self.type(column) not in ["enum","time","int"]: raise H2OValueError("'column' argument is not type enum, time or int") if self.type(index) not in ["enum","time","int"]: raise H2OValueError("'index' argument is not type enum, time or int") return H2OFrame._expr(expr=ExprNode("pivot",self,index,column,value))
Pivot the frame designated by the three columns: index, column, and value. Index and column should be of type enum, int, or time. For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame :param index: Index is a column that will be the row label :param column: The labels for the columns in the pivoted Frame :param value: The column of values for the given index and column label :returns:
def plot_fit(self, **kwargs): """ Plots the fit of the model Returns ---------- None (plots data and the fit) """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: plt.figure(figsize=figsize) date_index = self.index[max(self.p, self.q):] t_params = self.transform_z() sigma2, Y, ___ = self._model(self.latent_variables.get_z_values()) plt.plot(date_index, np.abs(Y-t_params[-1]), label=self.data_name + ' Absolute Demeaned Values') plt.plot(date_index, np.power(sigma2,0.5), label='GARCH(' + str(self.p) + ',' + str(self.q) + ') Conditional Volatility',c='black') plt.title(self.data_name + " Volatility Plot") plt.legend(loc=2) plt.show()
Plots the fit of the model Returns ---------- None (plots data and the fit)
def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info, **kwargs): """Update the BB models and the estimates""" stratum_idx = extra_info['stratum'] self._BB_TP.update(ell*ell_hat, stratum_idx) self._BB_PP.update(ell_hat, stratum_idx) self._BB_P.update(ell, stratum_idx) # Update model covariance matrix for stratum_idx self._update_cov_model(strata_to_update = [stratum_idx]) # Update F-measure estimate, estimator variance, exp. variance decrease self._update_estimates()
Update the BB models and the estimates
def _overlapping(files): """Quick method to see if a file list contains overlapping files """ segments = set() for path in files: seg = file_segment(path) for s in segments: if seg.intersects(s): return True segments.add(seg) return False
Quick method to see if a file list contains overlapping files
def env_set(context): """Set $ENVs to specified string. from the pypyr context. Args: context: is dictionary-like. context is mandatory. context['env']['set'] must exist. It's a dictionary. Values are strings to write to $ENV. Keys are the names of the $ENV values to which to write. For example, say input context is: key1: value1 key2: value2 key3: value3 env: set: MYVAR1: {key1} MYVAR2: before_{key3}_after MYVAR3: arbtexthere This will result in the following $ENVs: $MYVAR1 = value1 $MYVAR2 = before_value3_after $MYVAR3 = arbtexthere Note that the $ENVs are not persisted system-wide, they only exist for pypyr sub-processes, and as such for the following steps during this pypyr pipeline execution. If you set an $ENV here, don't expect to see it in your system environment variables after the pipeline finishes running. """ env_set = context['env'].get('set', None) exists = False if env_set: logger.debug("started") for k, v in env_set.items(): logger.debug(f"setting ${k} to context[{v}]") os.environ[k] = context.get_formatted_string(v) logger.info(f"set {len(env_set)} $ENVs from context.") exists = True logger.debug("done") return exists
Set $ENVs to specified string. from the pypyr context. Args: context: is dictionary-like. context is mandatory. context['env']['set'] must exist. It's a dictionary. Values are strings to write to $ENV. Keys are the names of the $ENV values to which to write. For example, say input context is: key1: value1 key2: value2 key3: value3 env: set: MYVAR1: {key1} MYVAR2: before_{key3}_after MYVAR3: arbtexthere This will result in the following $ENVs: $MYVAR1 = value1 $MYVAR2 = before_value3_after $MYVAR3 = arbtexthere Note that the $ENVs are not persisted system-wide, they only exist for pypyr sub-processes, and as such for the following steps during this pypyr pipeline execution. If you set an $ENV here, don't expect to see it in your system environment variables after the pipeline finishes running.
def render_honeypot_field(field_name=None): """ Renders honeypot field named field_name (defaults to HONEYPOT_FIELD_NAME). """ if not field_name: field_name = settings.HONEYPOT_FIELD_NAME value = getattr(settings, 'HONEYPOT_VALUE', '') if callable(value): value = value() return {'fieldname': field_name, 'value': value}
Renders honeypot field named field_name (defaults to HONEYPOT_FIELD_NAME).
def validate(self, value): """Validate string by regex :param value: str :return: """ if not self._compiled_regex.match(value): raise ValidationError( 'value {:s} not match r"{:s}"'.format(value, self._regex))
Validate string by regex :param value: str :return:
def meta_features_path(self, path): """Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder """ return os.path.join( path, app.config['XCESSIV_META_FEATURES_FOLDER'], str(self.id) ) + '.npy'
Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder
def _parse_CHANLIMIT(value): """ >>> res = FeatureSet._parse_CHANLIMIT('ibe:250,xyz:100') >>> len(res) 6 >>> res['x'] 100 >>> res['i'] == res['b'] == res['e'] == 250 True """ pairs = map(string_int_pair, value.split(',')) return dict( (target, number) for target_keys, number in pairs for target in target_keys )
>>> res = FeatureSet._parse_CHANLIMIT('ibe:250,xyz:100') >>> len(res) 6 >>> res['x'] 100 >>> res['i'] == res['b'] == res['e'] == 250 True
def WriteSignedBinary(binary_urn, binary_content, private_key, public_key, chunk_size = 1024, token = None): """Signs a binary and saves it to the datastore. If a signed binary with the given URN already exists, its contents will get overwritten. Args: binary_urn: URN that should serve as a unique identifier for the binary. binary_content: Contents of the binary, as raw bytes. private_key: Key that should be used for signing the binary contents. public_key: Key that should be used to verify the signature generated using the private key. chunk_size: Size, in bytes, of the individual blobs that the binary contents will be split to before saving to the datastore. token: ACL token to use with the legacy (non-relational) datastore. """ if _ShouldUseLegacyDatastore(): collects.GRRSignedBlob.NewFromContent( binary_content, binary_urn, chunk_size=chunk_size, token=token, private_key=private_key, public_key=public_key) if data_store.RelationalDBEnabled(): blob_references = rdf_objects.BlobReferences() for chunk_offset in range(0, len(binary_content), chunk_size): chunk = binary_content[chunk_offset:chunk_offset + chunk_size] blob_rdf = rdf_crypto.SignedBlob() blob_rdf.Sign(chunk, private_key, verify_key=public_key) blob_id = data_store.BLOBS.WriteBlobWithUnknownHash( blob_rdf.SerializeToString()) blob_references.items.Append( rdf_objects.BlobReference( offset=chunk_offset, size=len(chunk), blob_id=blob_id)) data_store.REL_DB.WriteSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn), blob_references)
Signs a binary and saves it to the datastore. If a signed binary with the given URN already exists, its contents will get overwritten. Args: binary_urn: URN that should serve as a unique identifier for the binary. binary_content: Contents of the binary, as raw bytes. private_key: Key that should be used for signing the binary contents. public_key: Key that should be used to verify the signature generated using the private key. chunk_size: Size, in bytes, of the individual blobs that the binary contents will be split to before saving to the datastore. token: ACL token to use with the legacy (non-relational) datastore.
def from_str(cls, emotestr): """Create an emote from the emote tag key :param emotestr: the tag key, e.g. ``'123:0-4'`` :type emotestr: :class:`str` :returns: an emote :rtype: :class:`Emote` :raises: None """ emoteid, occstr = emotestr.split(':') occurences = [] for occ in occstr.split(','): start, end = occ.split('-') occurences.append((int(start), int(end))) return cls(int(emoteid), occurences)
Create an emote from the emote tag key :param emotestr: the tag key, e.g. ``'123:0-4'`` :type emotestr: :class:`str` :returns: an emote :rtype: :class:`Emote` :raises: None
def maybe_center_plot(result): """Embeds a possible tikz image inside a center environment. Searches for matplotlib2tikz last commend line to detect tikz images. Args: result: The code execution result Returns: The input result if no tikzpicture was found, otherwise a centered version. """ begin = re.search('(% .* matplotlib2tikz v.*)', result) if begin: result = ('\\begin{center}\n' + result[begin.end():] + '\n\\end{center}') return result
Embeds a possible tikz image inside a center environment. Searches for matplotlib2tikz last commend line to detect tikz images. Args: result: The code execution result Returns: The input result if no tikzpicture was found, otherwise a centered version.
def reverse( self, query, radius=None, exactly_one=True, maxresults=None, pageinformation=None, language=None, mode='retrieveAddresses', timeout=DEFAULT_SENTINEL ): """ Return an address by location point. This implementation supports only a subset of all available parameters. A list of all parameters of the pure REST API is available here: https://developer.here.com/documentation/geocoder/topics/resource-reverse-geocode.html :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param float radius: Proximity radius in meters. :param bool exactly_one: Return one result or a list of results, if available. :param int maxresults: Defines the maximum number of items in the response structure. If not provided and there are multiple results the HERE API will return 10 results by default. This will be reset to one if ``exactly_one`` is True. :param int pageinformation: A key which identifies the page to be returned when the response is separated into multiple pages. Only useful when ``maxresults`` is also provided. :param str language: Affects the language of the response, must be a RFC 4647 language code, e.g. 'en-US'. :param str mode: Affects the type of returned response items, must be one of: 'retrieveAddresses' (default), 'retrieveAreas', 'retrieveLandmarks', 'retrieveAll', or 'trackPosition'. See online documentation for more information. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``. """ point = self._coerce_point_to_string(query) params = { 'app_id': self.app_id, 'app_code': self.app_code, 'mode': mode, 'prox': point, } if radius is not None: params['prox'] = '%s,%s' % (params['prox'], float(radius)) if pageinformation: params['pageinformation'] = pageinformation if maxresults: params['maxresults'] = maxresults if exactly_one: params['maxresults'] = 1 if language: params['language'] = language url = "%s?%s" % (self.reverse_api, urlencode(params)) logger.debug("%s.reverse: %s", self.__class__.__name__, url) return self._parse_json( self._call_geocoder(url, timeout=timeout), exactly_one )
Return an address by location point. This implementation supports only a subset of all available parameters. A list of all parameters of the pure REST API is available here: https://developer.here.com/documentation/geocoder/topics/resource-reverse-geocode.html :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param float radius: Proximity radius in meters. :param bool exactly_one: Return one result or a list of results, if available. :param int maxresults: Defines the maximum number of items in the response structure. If not provided and there are multiple results the HERE API will return 10 results by default. This will be reset to one if ``exactly_one`` is True. :param int pageinformation: A key which identifies the page to be returned when the response is separated into multiple pages. Only useful when ``maxresults`` is also provided. :param str language: Affects the language of the response, must be a RFC 4647 language code, e.g. 'en-US'. :param str mode: Affects the type of returned response items, must be one of: 'retrieveAddresses' (default), 'retrieveAreas', 'retrieveLandmarks', 'retrieveAll', or 'trackPosition'. See online documentation for more information. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.
def load(self, text, fieldnames=None): """Item from TSV representation.""" lines = text.split('\n') fieldnames = load_line(lines[0]) values = load_line(lines[1]) self.__dict__ = dict(zip(fieldnames, values))
Item from TSV representation.
def _redirect_with_params(url_name, *args, **kwargs): """Helper method to create a redirect response with URL params. This builds a redirect string that converts kwargs into a query string. Args: url_name: The name of the url to redirect to. kwargs: the query string param and their values to build. Returns: A properly formatted redirect string. """ url = urlresolvers.reverse(url_name, args=args) params = parse.urlencode(kwargs, True) return "{0}?{1}".format(url, params)
Helper method to create a redirect response with URL params. This builds a redirect string that converts kwargs into a query string. Args: url_name: The name of the url to redirect to. kwargs: the query string param and their values to build. Returns: A properly formatted redirect string.
def json_to_pages(json, user, preferred_lang=None): """ Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated. """ from .models import Page if not preferred_lang: preferred_lang = settings.PAGE_DEFAULT_LANGUAGE d = simplejson.loads(json) try: errors = validate_pages_json_data(d, preferred_lang) except KeyError as e: errors = [_('JSON file is invalid: %s') % (e.args[0],)] pages_created = [] if not errors: # pass one for p in d['pages']: pages_created.append( Page.objects.create_and_update_from_json_data(p, user)) # pass two for p, results in zip(d['pages'], pages_created): page, created, messages = results rtcs = p['redirect_to_complete_slug'] if rtcs: messages.extend(page.update_redirect_to_from_json(rtcs)) # clean up MPTT links #Page.objects.rebuild() return errors, pages_created
Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated.
def parse_arg(f, kwd, offset=0): """ convert dictionary of keyword argument and value to positional argument equivalent to:: vnames = describe(f) return tuple([kwd[k] for k in vnames[offset:]]) """ vnames = describe(f) return tuple([kwd[k] for k in vnames[offset:]])
convert dictionary of keyword argument and value to positional argument equivalent to:: vnames = describe(f) return tuple([kwd[k] for k in vnames[offset:]])
def loadtxt2(fname, dtype=None, delimiter=' ', newline='\n', comment_character='#', skiplines=0): """ Known issues delimiter and newline is not respected. string quotation with space is broken. """ dtypert = [None, None, None] def preparedtype(dtype): dtypert[0] = dtype flatten = flatten_dtype(dtype) dtypert[1] = flatten dtypert[2] = numpy.dtype([('a', (numpy.int8, flatten.itemsize))]) buf = numpy.empty((), dtype=dtypert[1]) converters = [_default_conv[flatten[name].char] for name in flatten.names] return buf, converters, flatten.names def fileiter(fh): converters = [] buf = None if dtype is not None: buf, converters, names = preparedtype(dtype) yield None for lineno, line in enumerate(fh): if lineno < skiplines: continue if line[0] in comment_character: if buf is None and line[1] == '?': ddtype = pickle.loads(base64.b64decode(line[2:])) buf, converters, names = preparedtype(ddtype) yield None continue for word, c, name in zip(line.split(), converters, names): buf[name] = c(word) buf2 = buf.copy().view(dtype=dtypert[2]) yield buf2 if isinstance(fname, basestring): fh = file(fh, 'r') cleanup = lambda : fh.close() else: fh = iter(fname) cleanup = lambda : None try: i = fileiter(fh) i.next() return numpy.fromiter(i, dtype=dtypert[2]).view(dtype=dtypert[0]) finally: cleanup()
Known issues delimiter and newline is not respected. string quotation with space is broken.
def setup_icons(self, ): """Set all icons on buttons :returns: None :rtype: None :raises: None """ plus_icon = get_icon('glyphicons_433_plus_bright.png', asicon=True) self.addnew_tb.setIcon(plus_icon)
Set all icons on buttons :returns: None :rtype: None :raises: None
def get_status(task, prefix, expnum, version, ccd, return_message=False): """ Report back status of the given program by looking up the associated VOSpace annotation. @param task: name of the process or task that will be checked. @param prefix: prefix of the file that was processed (often fk or None) @param expnum: which exposure number (or base filename) @param version: which version of that exposure (p, s, o) @param ccd: which CCD within the exposure. @param return_message: Return what did the TAG said or just /True/False/ for Success/Failure? @return: the status of the processing based on the annotation value. """ key = get_process_tag(prefix+task, ccd, version) status = get_tag(expnum, key) logger.debug('%s: %s' % (key, status)) if return_message: return status else: return status == SUCCESS
Report back status of the given program by looking up the associated VOSpace annotation. @param task: name of the process or task that will be checked. @param prefix: prefix of the file that was processed (often fk or None) @param expnum: which exposure number (or base filename) @param version: which version of that exposure (p, s, o) @param ccd: which CCD within the exposure. @param return_message: Return what did the TAG said or just /True/False/ for Success/Failure? @return: the status of the processing based on the annotation value.
def is_callable_type(tp): """Test if the type is a generic callable type, including subclasses excluding non-generic types and callables. Examples:: is_callable_type(int) == False is_callable_type(type) == False is_callable_type(Callable) == True is_callable_type(Callable[..., int]) == True is_callable_type(Callable[[int, int], Iterable[str]]) == True class MyClass(Callable[[int], int]): ... is_callable_type(MyClass) == True For more general tests use callable(), for more precise test (excluding subclasses) use:: get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7 """ if NEW_TYPING: return (tp is Callable or isinstance(tp, _GenericAlias) and tp.__origin__ is collections.abc.Callable or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, collections.abc.Callable)) return type(tp) is CallableMeta
Test if the type is a generic callable type, including subclasses excluding non-generic types and callables. Examples:: is_callable_type(int) == False is_callable_type(type) == False is_callable_type(Callable) == True is_callable_type(Callable[..., int]) == True is_callable_type(Callable[[int, int], Iterable[str]]) == True class MyClass(Callable[[int], int]): ... is_callable_type(MyClass) == True For more general tests use callable(), for more precise test (excluding subclasses) use:: get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7
def transform_audio(self, y): '''Compute the tempogram Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['tempogram'] : np.ndarray, shape=(n_frames, win_length) The tempogram ''' n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) tgram = tempogram(y=y, sr=self.sr, hop_length=self.hop_length, win_length=self.win_length).astype(np.float32) tgram = fix_length(tgram, n_frames) return {'tempogram': tgram.T[self.idx]}
Compute the tempogram Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['tempogram'] : np.ndarray, shape=(n_frames, win_length) The tempogram
def body(self): """ Return body request parameter :return: Body parameter :rtype: Parameter or None """ body = self.get_parameters_by_location(['body']) return self.root.schemas.get(body[0].type) if body else None
Return body request parameter :return: Body parameter :rtype: Parameter or None
def _hook_unmapped(self, uc, access, address, size, value, data): """ We hit an unmapped region; map it into unicorn. """ try: self.sync_unicorn_to_manticore() logger.warning(f"Encountered an operation on unmapped memory at {hex(address)}") m = self._cpu.memory.map_containing(address) self.copy_memory(m.start, m.end - m.start) except MemoryException as e: logger.error("Failed to map memory {}-{}, ({}): {}".format(hex(address), hex(address + size), access, e)) self._to_raise = e self._should_try_again = False return False self._should_try_again = True return False
We hit an unmapped region; map it into unicorn.
def at_time(self, time, asof=False, axis=None): """ Select values at particular time of day (e.g. 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_at_time(time, asof=asof) except AttributeError: raise TypeError('Index must be DatetimeIndex') return self._take(indexer, axis=axis)
Select values at particular time of day (e.g. 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4
def _clean_flags(args, caller): ''' Sanitize flags passed into df ''' flags = '' if args is None: return flags allowed = ('a', 'B', 'h', 'H', 'i', 'k', 'l', 'P', 't', 'T', 'x', 'v') for flag in args: if flag in allowed: flags += flag else: raise CommandExecutionError( 'Invalid flag passed to {0}'.format(caller) ) return flags
Sanitize flags passed into df
def _gl_initialize(self): """ Deal with compatibility; desktop does not have sprites enabled by default. ES has. """ if '.es' in gl.current_backend.__name__: pass # ES2: no action required else: # Desktop, enable sprites GL_VERTEX_PROGRAM_POINT_SIZE = 34370 GL_POINT_SPRITE = 34913 gl.glEnable(GL_VERTEX_PROGRAM_POINT_SIZE) gl.glEnable(GL_POINT_SPRITE) if self.capabilities['max_texture_size'] is None: # only do once self.capabilities['gl_version'] = gl.glGetParameter(gl.GL_VERSION) self.capabilities['max_texture_size'] = \ gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE) this_version = self.capabilities['gl_version'].split(' ')[0] this_version = LooseVersion(this_version)
Deal with compatibility; desktop does not have sprites enabled by default. ES has.
def xd(self): """get xarray dataset file handle to LSM files""" if self._xd is None: path_to_lsm_files = path.join(self.lsm_input_folder_path, self.lsm_search_card) self._xd = pa.open_mfdataset(path_to_lsm_files, lat_var=self.lsm_lat_var, lon_var=self.lsm_lon_var, time_var=self.lsm_time_var, lat_dim=self.lsm_lat_dim, lon_dim=self.lsm_lon_dim, time_dim=self.lsm_time_dim, loader=self.pangaea_loader) self.lsm_time_dim = 'time' self.lsm_time_var = 'time' return self._xd
get xarray dataset file handle to LSM files
def flip(self, reactions): """Flip the specified reactions.""" for reaction in reactions: if reaction in self._flipped: self._flipped.remove(reaction) else: self._flipped.add(reaction)
Flip the specified reactions.
def exec(self, container: Container, command: str, context: Optional[str] = None, stdout: bool = True, stderr: bool = False, time_limit: Optional[int] = None ) -> ExecResponse: """ Executes a given command inside a provided container. Parameters: container: the container to which the command should be issued. command: the command that should be executed. context: the working directory that should be used to perform the execution. If no context is provided, then the command will be executed at the root of the container. stdout: specifies whether or not output to the stdout should be included in the execution summary. stderr: specifies whether or not output to the stderr should be included in the execution summary. time_limit: an optional time limit that is applied to the execution. If the command fails to execute within the time limit, the command will be aborted and treated as a failure. Returns: a summary of the outcome of the execution. Raises: KeyError: if the container no longer exists on the server. """ # FIXME perhaps these should be encoded as path variables? payload = { 'command': command, 'context': context, 'stdout': stdout, 'stderr': stderr, 'time-limit': time_limit } path = "containers/{}/exec".format(container.uid) r = self.__api.post(path, json=payload) if r.status_code == 200: return ExecResponse.from_dict(r.json()) if r.status_code == 404: raise KeyError("no container found with given UID: {}".format(container.uid)) self.__api.handle_erroneous_response(r)
Executes a given command inside a provided container. Parameters: container: the container to which the command should be issued. command: the command that should be executed. context: the working directory that should be used to perform the execution. If no context is provided, then the command will be executed at the root of the container. stdout: specifies whether or not output to the stdout should be included in the execution summary. stderr: specifies whether or not output to the stderr should be included in the execution summary. time_limit: an optional time limit that is applied to the execution. If the command fails to execute within the time limit, the command will be aborted and treated as a failure. Returns: a summary of the outcome of the execution. Raises: KeyError: if the container no longer exists on the server.
def start(): """Start the CherryPy application server.""" setupdir = dirname(dirname(__file__)) curdir = os.getcwd() # First look on the command line for a desired config file, # if it's not on the command line, then look for 'setup.py' # in the current directory. If there, load configuration # from a file called 'dev.cfg'. If it's not there, the project # is probably installed and we'll look first for a file called # 'prod.cfg' in the current directory and then for a default # config file called 'default.cfg' packaged in the egg. if len(sys.argv) > 1: configfile = sys.argv[1] elif exists(join(setupdir, "setup.py")): configfile = join(setupdir, "dev.cfg") elif exists(join(curdir, "prod.cfg")): configfile = join(curdir, "prod.cfg") else: try: configfile = pkg_resources.resource_filename( pkg_resources.Requirement.parse("tgpisa"), "config/default.cfg") except pkg_resources.DistributionNotFound: raise ConfigurationError("Could not find default configuration.") turbogears.update_config(configfile=configfile, modulename="tgpisa.config") from tgpisa.controllers import Root turbogears.start_server(Root())
Start the CherryPy application server.
def open(self): """ Open a connection to the AMQP compliant broker. """ self._connection = \ amqp.Connection(host='%s:%s' % (self.hostname, self.port), userid=self.username, password=self.password, virtual_host=self.virtual_host, insist=False) self.channel = self._connection.channel()
Open a connection to the AMQP compliant broker.
def bin_b64_type(arg): """An argparse type representing binary data encoded in base64.""" try: arg = base64.standard_b64decode(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid base64 data".format(repr(arg))) return arg
An argparse type representing binary data encoded in base64.
def export(name: str, value: Any): """ Exports a named stack output. :param str name: The name to assign to this output. :param Any value: The value of this output. """ stack = get_root_resource() if stack is not None: stack.output(name, value)
Exports a named stack output. :param str name: The name to assign to this output. :param Any value: The value of this output.
def _raise_for_status(response): """Raises stored :class:`HTTPError`, if one occurred. This is the :meth:`requests.models.Response.raise_for_status` method, modified to add the response from Space-Track, if given. """ http_error_msg = '' if 400 <= response.status_code < 500: http_error_msg = '%s Client Error: %s for url: %s' % ( response.status_code, response.reason, response.url) elif 500 <= response.status_code < 600: http_error_msg = '%s Server Error: %s for url: %s' % ( response.status_code, response.reason, response.url) if http_error_msg: spacetrack_error_msg = None try: json = response.json() if isinstance(json, Mapping): spacetrack_error_msg = json['error'] except (ValueError, KeyError): pass if not spacetrack_error_msg: spacetrack_error_msg = response.text if spacetrack_error_msg: http_error_msg += '\nSpace-Track response:\n' + spacetrack_error_msg raise requests.HTTPError(http_error_msg, response=response)
Raises stored :class:`HTTPError`, if one occurred. This is the :meth:`requests.models.Response.raise_for_status` method, modified to add the response from Space-Track, if given.
def load_terms(self, terms): """Create a builder from a sequence of terms, usually a TermInterpreter""" #if self.root and len(self.root.children) > 0: # raise MetatabError("Can't run after adding terms to document.") for t in terms: t.doc = self if t.term_is('root.root'): if not self.root: self.root = t self.add_section(t) continue if t.term_is('root.section'): self.add_section(t) elif t.parent_term_lc == 'root': self.add_term(t) else: # These terms aren't added to the doc because they are attached to a # parent term that is added to the doc. assert t.parent is not None try: dd = terms.declare_dict self.decl_terms.update(dd['terms']) self.decl_sections.update(dd['sections']) self.super_terms.update(terms.super_terms()) kf = lambda e: e[1] # Sort on the value self.derived_terms ={ k:set( e[0] for e in g) for k, g in groupby(sorted(self.super_terms.items(), key=kf), kf)} except AttributeError as e: pass try: self.errors = terms.errors_as_dict() except AttributeError: self.errors = {} return self
Create a builder from a sequence of terms, usually a TermInterpreter
def buildURL(self, action, **query): """Build a URL relative to the server base_url, with the given query parameters added.""" base = urlparse.urljoin(self.server.base_url, action) return appendArgs(base, query)
Build a URL relative to the server base_url, with the given query parameters added.
def print_tb(tb, limit=None, file=None): """Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method. """ if file is None: file = sys.stderr if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name _print(file, ' File "%s", line %d, in %s' % (filename, lineno, name)) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: _print(file, ' ' + line.strip()) tb = tb.tb_next n = n+1
Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method.
def to_bluesky( traffic: Traffic, filename: Union[str, Path], minimum_time: Optional[timelike] = None, ) -> None: """Generates a Bluesky scenario file.""" if minimum_time is not None: minimum_time = to_datetime(minimum_time) traffic = traffic.query(f"timestamp >= '{minimum_time}'") if isinstance(filename, str): filename = Path(filename) if not filename.parent.exists(): filename.parent.mkdir(parents=True) altitude = ( "baro_altitude" if "baro_altitude" in traffic.data.columns else "altitude" ) if "mdl" not in traffic.data.columns: traffic = aircraft.merge(traffic) if "cas" not in traffic.data.columns: traffic = Traffic( traffic.data.assign( cas=vtas2cas(traffic.data.ground_speed, traffic.data[altitude]) ) ) with filename.open("w") as fh: t_delta = traffic.data.timestamp - traffic.start_time data = ( traffic.assign_id() .data.groupby("flight_id") .filter(lambda x: x.shape[0] > 3) .assign(timedelta=t_delta.apply(fmt_timedelta)) .sort_values(by="timestamp") ) for column in data.columns: data[column] = data[column].astype(np.str) is_created: List[str] = [] is_deleted: List[str] = [] start_time = cast(pd.Timestamp, traffic.start_time).time() fh.write(f"00:00:00> TIME {start_time}\n") # Add some bluesky command for the visualisation # fh.write("00:00:00>trail on\n") # fh.write("00:00:00>ssd conflicts\n") # We remove an object when it's its last data point buff = data.groupby("flight_id").timestamp.max() dd = pd.DataFrame( columns=["timestamp"], data=buff.values, index=buff.index.values ) map_icao24_last_point = {} for i, v in dd.iterrows(): map_icao24_last_point[i] = v[0] # Main loop to write lines in the scenario file for _, v in data.iterrows(): if v.flight_id not in is_created: # If the object is not created then create it is_created.append(v.flight_id) fh.write( f"{v.timedelta}> CRE {v.callsign} {v.mdl} " f"{v.latitude} {v.longitude} {v.track} " f"{v[altitude]} {v.cas}\n" ) elif v.timestamp == map_icao24_last_point[v.flight_id]: # Remove an aircraft when no data are available if v.flight_id not in is_deleted: is_deleted.append(v.flight_id) fh.write(f"{v.timedelta}> DEL {v.callsign}\n") elif v.flight_id not in is_deleted: # Otherwise update the object position fh.write( f"{v.timedelta}> MOVE {v.callsign} " f"{v.latitude} {v.longitude} {v[altitude]} " f"{v.track} {v.cas} {v.vertical_rate}\n" ) logging.info(f"Scenario file {filename} written")
Generates a Bluesky scenario file.
def uniquelines(q): """ Given all the facets, convert it into a set of unique lines. Specifically used for converting convex hull facets into line pairs of coordinates. Args: q: A 2-dim sequence, where each row represents a facet. E.g., [[1,2,3],[3,6,7],...] Returns: setoflines: A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....) """ setoflines = set() for facets in q: for line in itertools.combinations(facets, 2): setoflines.add(tuple(sorted(line))) return setoflines
Given all the facets, convert it into a set of unique lines. Specifically used for converting convex hull facets into line pairs of coordinates. Args: q: A 2-dim sequence, where each row represents a facet. E.g., [[1,2,3],[3,6,7],...] Returns: setoflines: A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
def gdal_rasterize(src, dst, options): """ a simple wrapper for gdal.Rasterize Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Rasterize; see :osgeo:func:`gdal.RasterizeOptions` Returns ------- """ out = gdal.Rasterize(dst, src, options=gdal.RasterizeOptions(**options)) out = None
a simple wrapper for gdal.Rasterize Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Rasterize; see :osgeo:func:`gdal.RasterizeOptions` Returns -------
def strerror(errno): """Translate an error code to a message string.""" from pypy.module._codecs.locale import str_decode_locale_surrogateescape return str_decode_locale_surrogateescape(os.strerror(errno))
Translate an error code to a message string.
def open_file(filename): """ Multi-platform way to make the OS open a file with its default application """ if sys.platform.startswith("darwin"): subprocess.call(("open", filename)) elif sys.platform == "cygwin": subprocess.call(("cygstart", filename)) elif os.name == "nt": os.system("start %s" % filename) elif os.name == "posix": subprocess.call(("xdg-open", filename))
Multi-platform way to make the OS open a file with its default application
def add_user(self, attrs): """ Add a user to the backend :param attrs: attributes of the user :type attrs: dict ({<attr>: <value>}) .. warning:: raise UserAlreadyExists if user already exists """ username = attrs[self.key] if username in self.users: raise UserAlreadyExists(username, self.backend_name) self.users[username] = attrs self.users[username]['groups'] = set([])
Add a user to the backend :param attrs: attributes of the user :type attrs: dict ({<attr>: <value>}) .. warning:: raise UserAlreadyExists if user already exists
def __get_metrics(self): """ Each metric must have its own filters copy to modify it freely""" esfilters_closed = None esfilters_opened = None if self.esfilters: esfilters_closed = self.esfilters.copy() esfilters_opened = self.esfilters.copy() closed = self.closed_class(self.es_url, self.es_index, start=self.start, end=self.end, esfilters=esfilters_closed, interval=self.interval) opened = self.opened_class(self.es_url, self.es_index, start=self.start, end=self.end, esfilters=esfilters_opened, interval=self.interval) return (closed, opened)
Each metric must have its own filters copy to modify it freely
def _expect_empty(self): """ Checks if the token stream is empty. * Raises a ``ParseError` exception if a token is found. """ item = self._lexer.get_token() if item: line_no, token = item raise ParseError(u"Unexpected token '{0}' on line {1}" .format(common.from_utf8(token.strip()), line_no))
Checks if the token stream is empty. * Raises a ``ParseError` exception if a token is found.
def create_from_request_pdu(pdu): """ Create instance from request PDU. :param pdu: A request PDU. :return: Instance of this class. """ _, starting_address, quantity = struct.unpack('>BHH', pdu) instance = ReadHoldingRegisters() instance.starting_address = starting_address instance.quantity = quantity return instance
Create instance from request PDU. :param pdu: A request PDU. :return: Instance of this class.
def _parse(self, line): """Parse the output line""" try: result = line.split(':', maxsplit=4) filename, line_num_txt, column_txt, message_type, text = result except ValueError: return try: self.line_num = int(line_num_txt.strip()) self.column = int(column_txt.strip()) except ValueError: return self.filename = filename self.message_type = message_type.strip() self.text = text.strip() self.valid = True
Parse the output line
def configure(self, options, conf): """Configure which kinds of exceptions trigger plugin. """ self.conf = conf self.enabled = options.debugErrors or options.debugFailures self.enabled_for_errors = options.debugErrors self.enabled_for_failures = options.debugFailures
Configure which kinds of exceptions trigger plugin.
def get_host_from_service_info(service_info): """ Get hostname or IP from service_info. """ host = None port = None if (service_info and service_info.port and (service_info.server or service_info.address)): if service_info.address: host = socket.inet_ntoa(service_info.address) else: host = service_info.server.lower() port = service_info.port return (host, port)
Get hostname or IP from service_info.
def is_symlink(self): """ Whether this path is a symbolic link. """ try: return S_ISLNK(self.lstat().st_mode) except OSError as e: if e.errno != ENOENT: raise # Path doesn't exist return False
Whether this path is a symbolic link.
def combine(objs): """Combine the specified `~gala.dynamics.PhaseSpacePosition` or `~gala.dynamics.Orbit` objects. Parameters ---------- objs : iterable An iterable of either `~gala.dynamics.PhaseSpacePosition` or `~gala.dynamics.Orbit` objects. """ from .orbit import Orbit # have to special-case this because they are iterable if isinstance(objs, PhaseSpacePosition) or isinstance(objs, Orbit): raise ValueError("You must pass a non-empty iterable to combine.") elif not isiterable(objs) or len(objs) < 1: raise ValueError("You must pass a non-empty iterable to combine.") elif len(objs) == 1: # short circuit return objs[0] # We only support these two types to combine: if objs[0].__class__ not in [PhaseSpacePosition, Orbit]: raise TypeError("Objects must be either PhaseSpacePosition or Orbit " "instances.") # Validate objects: # - check type # - check dimensionality # - check frame, potential # - Right now, we only support Cartesian for obj in objs: # Check to see if they are all the same type of object: if obj.__class__ != objs[0].__class__: raise TypeError("All objects must have the same type.") # Make sure they have same dimensionality if obj.ndim != objs[0].ndim: raise ValueError("All objects must have the same ndim.") # Check that all objects have the same reference frame if obj.frame != objs[0].frame: raise ValueError("All objects must have the same frame.") # Check that (for orbits) they all have the same potential if hasattr(obj, 'potential') and obj.potential != objs[0].potential: raise ValueError("All objects must have the same potential.") # For orbits, time arrays must be the same if (hasattr(obj, 't') and obj.t is not None and objs[0].t is not None and not quantity_allclose(obj.t, objs[0].t, atol=1E-13*objs[0].t.unit)): raise ValueError("All orbits must have the same time array.") if 'cartesian' not in obj.pos.get_name(): raise NotImplementedError("Currently, combine only works for " "Cartesian-represented objects.") # Now we prepare the positions, velocities: if objs[0].__class__ == PhaseSpacePosition: pos = [] vel = [] for i, obj in enumerate(objs): if i == 0: pos_unit = obj.pos.xyz.unit vel_unit = obj.vel.d_xyz.unit pos.append(atleast_2d(obj.pos.xyz.to(pos_unit).value, insert_axis=1)) vel.append(atleast_2d(obj.vel.d_xyz.to(vel_unit).value, insert_axis=1)) pos = np.concatenate(pos, axis=1) * pos_unit vel = np.concatenate(vel, axis=1) * vel_unit return PhaseSpacePosition(pos=pos, vel=vel, frame=objs[0].frame) elif objs[0].__class__ == Orbit: pos = [] vel = [] for i, obj in enumerate(objs): if i == 0: pos_unit = obj.pos.xyz.unit vel_unit = obj.vel.d_xyz.unit p = obj.pos.xyz.to(pos_unit).value v = obj.vel.d_xyz.to(vel_unit).value if p.ndim < 3: p = p.reshape(p.shape + (1,)) v = v.reshape(v.shape + (1,)) pos.append(p) vel.append(v) pos = np.concatenate(pos, axis=2) * pos_unit vel = np.concatenate(vel, axis=2) * vel_unit return Orbit(pos=pos, vel=vel, t=objs[0].t, frame=objs[0].frame, potential=objs[0].potential) else: raise RuntimeError("should never get here...")
Combine the specified `~gala.dynamics.PhaseSpacePosition` or `~gala.dynamics.Orbit` objects. Parameters ---------- objs : iterable An iterable of either `~gala.dynamics.PhaseSpacePosition` or `~gala.dynamics.Orbit` objects.
def pyhttp(self, value): """ converts a no namespaces uri to a python excessable name """ if value.startswith("pyuri_"): return value parts = self.parse_uri(value) return "pyuri_%s_%s" % (base64.b64encode(bytes(parts[0], "utf-8")).decode(), parts[1])
converts a no namespaces uri to a python excessable name
def role_definitions(self): """Instance depends on the API version: * 2015-07-01: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2015_07_01.operations.RoleDefinitionsOperations>` * 2018-01-01-preview: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2018_01_01_preview.operations.RoleDefinitionsOperations>` """ api_version = self._get_api_version('role_definitions') if api_version == '2015-07-01': from .v2015_07_01.operations import RoleDefinitionsOperations as OperationClass elif api_version == '2018-01-01-preview': from .v2018_01_01_preview.operations import RoleDefinitionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2015-07-01: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2015_07_01.operations.RoleDefinitionsOperations>` * 2018-01-01-preview: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2018_01_01_preview.operations.RoleDefinitionsOperations>`
def stream_json_lines(file): """ Load a JSON stream and return a generator, yielding one object at a time. """ if isinstance(file, string_type): file = open(file, 'rb') for line in file: line = line.strip() if line: if isinstance(line, bytes): line = line.decode('utf-8') yield json.loads(line)
Load a JSON stream and return a generator, yielding one object at a time.
def read(cls, proto): """ Intercepts TemporalMemory deserialization request in order to initialize `self.infActiveState` @param proto (DynamicStructBuilder) Proto object @return (TemporalMemory) TemporalMemory shim instance """ tm = super(TMShimMixin, cls).read(proto) tm.infActiveState = {"t": None} return tm
Intercepts TemporalMemory deserialization request in order to initialize `self.infActiveState` @param proto (DynamicStructBuilder) Proto object @return (TemporalMemory) TemporalMemory shim instance
def format_private_ip_address(result): ''' Formats the PrivateIPAddress object removing arguments that are empty ''' from collections import OrderedDict # Only display parameters that have content order_dict = OrderedDict() if result.ip_address is not None: order_dict['ipAddress'] = result.ip_address if result.subnet_resource_id is not None: order_dict['subnetResourceId'] = result.subnet_resource_id return order_dict
Formats the PrivateIPAddress object removing arguments that are empty
def embed(self, url, **kwargs): """ The heart of the matter """ try: # first figure out the provider provider = self.provider_for_url(url) except OEmbedMissingEndpoint: raise else: try: # check the database for a cached response, because of certain # race conditions that exist with get_or_create(), do a filter # lookup and just grab the first item stored_match = StoredOEmbed.objects.filter( match=url, maxwidth=kwargs.get('maxwidth', None), maxheight=kwargs.get('maxheight', None), date_expires__gte=datetime.datetime.now())[0] return OEmbedResource.create_json(stored_match.response_json) except IndexError: # query the endpoint and cache response in db # prevent None from being passed in as a GET param params = dict([(k, v) for k, v in kwargs.items() if v]) # request an oembed resource for the url resource = provider.request_resource(url, **params) try: cache_age = int(resource.cache_age) if cache_age < MIN_OEMBED_TTL: cache_age = MIN_OEMBED_TTL except: cache_age = DEFAULT_OEMBED_TTL date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age) stored_oembed, created = StoredOEmbed.objects.get_or_create( match=url, maxwidth=kwargs.get('maxwidth', None), maxheight=kwargs.get('maxheight', None)) stored_oembed.response_json = resource.json stored_oembed.resource_type = resource.type stored_oembed.date_expires = date_expires if resource.content_object: stored_oembed.content_object = resource.content_object stored_oembed.save() return resource
The heart of the matter
def deploy( src, requirements=None, local_package=None, config_file='config.yaml', profile_name=None, preserve_vpc=False ): """Deploys a new function to AWS Lambda. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Copy all the pip dependencies required to run your code into a temporary # folder then add the handler file in the root of this directory. # Zip the contents of this folder into a single file and output to the dist # directory. path_to_zip_file = build( src, config_file=config_file, requirements=requirements, local_package=local_package, ) existing_config = get_function_config(cfg) if existing_config: update_function(cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc) else: create_function(cfg, path_to_zip_file)
Deploys a new function to AWS Lambda. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi)
def generate(env): """Add Builders and construction variables for dvips to an Environment.""" global PSAction if PSAction is None: PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR') global DVIPSAction if DVIPSAction is None: DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction) global PSBuilder if PSBuilder is None: PSBuilder = SCons.Builder.Builder(action = PSAction, prefix = '$PSPREFIX', suffix = '$PSSUFFIX', src_suffix = '.dvi', src_builder = 'DVI', single_source=True) env['BUILDERS']['PostScript'] = PSBuilder env['DVIPS'] = 'dvips' env['DVIPSFLAGS'] = SCons.Util.CLVar('') # I'm not quite sure I got the directories and filenames right for variant_dir # We need to be in the correct directory for the sake of latex \includegraphics eps included files. env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}' env['PSPREFIX'] = '' env['PSSUFFIX'] = '.ps'
Add Builders and construction variables for dvips to an Environment.
def _submit_result(self): """ Adding current values as a Raw Result and Resetting everything. Notice that we are not calculating final result of assay. We just set NP and GP values and in Bika, AS will have a Calculation to generate final result based on NP and GP values. """ if self._cur_res_id and self._cur_values: # Setting DefaultResult just because it is obligatory. However, # it won't be used because AS must have a Calculation based on # GP and NP results. self._cur_values[self._keyword]['DefaultResult'] = 'DefResult' self._cur_values[self._keyword]['DefResult'] = '' # If we add results as a raw result, AnalysisResultsImporter will # automatically import them to the system. The only important thing # here is to respect the dictionary format. self._addRawResult(self._cur_res_id, self._cur_values) self._reset()
Adding current values as a Raw Result and Resetting everything. Notice that we are not calculating final result of assay. We just set NP and GP values and in Bika, AS will have a Calculation to generate final result based on NP and GP values.
def get_tags(self, rev=None): """ Get the tags for the given revision specifier (or the current revision if not specified). """ rev_num = self._get_rev_num(rev) # rev_num might end with '+', indicating local modifications. return ( set(self._read_tags_for_rev(rev_num)) if not rev_num.endswith('+') else set([]) )
Get the tags for the given revision specifier (or the current revision if not specified).
def simplify(self): """ Return a simplified expr in canonical form. This means double negations are canceled out and all contained boolean objects are in their canonical form. """ if self.iscanonical: return self expr = self.cancel() if not isinstance(expr, self.__class__): return expr.simplify() if expr.args[0] in (self.TRUE, self.FALSE,): return expr.args[0].dual expr = self.__class__(expr.args[0].simplify()) expr.iscanonical = True return expr
Return a simplified expr in canonical form. This means double negations are canceled out and all contained boolean objects are in their canonical form.
def scoreatpercentile(inlist, percent): """ Returns the score at a given percentile relative to the distribution given by inlist. Usage: lscoreatpercentile(inlist,percent) """ if percent > 1: print("\nDividing percent>1 by 100 in lscoreatpercentile().\n") percent = percent / 100.0 targetcf = percent * len(inlist) h, lrl, binsize, extras = histogram(inlist) cumhist = cumsum(copy.deepcopy(h)) for i in range(len(cumhist)): if cumhist[i] >= targetcf: break score = binsize * ((targetcf - cumhist[i - 1]) / float(h[i])) + (lrl + binsize * i) return score
Returns the score at a given percentile relative to the distribution given by inlist. Usage: lscoreatpercentile(inlist,percent)
def rollback_savepoint(self, savepoint): """Rolls back to the given savepoint. :param savepoint: the name of the savepoint to rollback to :raise: pydbal.exception.DBALConnectionError """ if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self.ensure_connected() self._platform.rollback_savepoint(savepoint)
Rolls back to the given savepoint. :param savepoint: the name of the savepoint to rollback to :raise: pydbal.exception.DBALConnectionError
def patch(self, endpoint, data): """ Method to update an item The headers must include an If-Match containing the object _etag. headers = {'If-Match': contact_etag} The data dictionary contain the fields that must be modified. If the patching fails because the _etag object do not match with the provided one, a BackendException is raised with code = 412. If inception is True, this method makes e new get request on the endpoint to refresh the _etag and then a new patch is called. If an HTTP 412 error occurs, a BackendException is raised. This exception is: - code: 412 - message: response content - response: backend response All other HTTP error raises a BackendException. If some _issues are provided by the backend, this exception is: - code: HTTP error code - message: response content - response: JSON encoded backend response (including '_issues' dictionary ...) If no _issues are provided and an _error is signaled by the backend, this exception is: - code: backend error code - message: backend error message - response: JSON encoded backend response :param endpoint: endpoint (API URL) :type endpoint: str :param data: properties of item to update :type data: dict :param headers: headers (example: Content-Type). 'If-Match' required :type headers: dict :param inception: if True tries to get the last _etag :type inception: bool :return: dictionary containing patch response from the backend :rtype: dict """ response = self.get_response(method='PATCH', endpoint=endpoint, json=data, headers={'Content-Type': 'application/json'}) if response.status_code == 200: return self.decode(response=response) return response
Method to update an item The headers must include an If-Match containing the object _etag. headers = {'If-Match': contact_etag} The data dictionary contain the fields that must be modified. If the patching fails because the _etag object do not match with the provided one, a BackendException is raised with code = 412. If inception is True, this method makes e new get request on the endpoint to refresh the _etag and then a new patch is called. If an HTTP 412 error occurs, a BackendException is raised. This exception is: - code: 412 - message: response content - response: backend response All other HTTP error raises a BackendException. If some _issues are provided by the backend, this exception is: - code: HTTP error code - message: response content - response: JSON encoded backend response (including '_issues' dictionary ...) If no _issues are provided and an _error is signaled by the backend, this exception is: - code: backend error code - message: backend error message - response: JSON encoded backend response :param endpoint: endpoint (API URL) :type endpoint: str :param data: properties of item to update :type data: dict :param headers: headers (example: Content-Type). 'If-Match' required :type headers: dict :param inception: if True tries to get the last _etag :type inception: bool :return: dictionary containing patch response from the backend :rtype: dict
def read_all(filename): """ Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes. :param filename: the file with the serialized objects :type filename: str :return: the list of JB_OBjects :rtype: list """ array = javabridge.static_call( "Lweka/core/SerializationHelper;", "readAll", "(Ljava/lang/String;)[Ljava/lang/Object;", filename) if array is None: return None else: return javabridge.get_env().get_object_array_elements(array)
Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes. :param filename: the file with the serialized objects :type filename: str :return: the list of JB_OBjects :rtype: list
def calc_synch_eta(b, ne, delta, sinth, nu, E0=1.): """Calculate the relativistic synchrotron emission coefficient η_ν. This is Dulk (1985) equation 40, which is an approximation assuming a power-law electron population. Arguments are: b Magnetic field strength in Gauss ne The density of electrons per cubic centimeter with energies greater than E0. delta The power-law index defining the energy distribution of the electron population, with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``. sinth The sine of the angle between the line of sight and the magnetic field direction. It's not specified for what range of values the expressions work well. nu The frequency at which to calculate η, in Hz. The equation is valid for It's not specified for what range of values the expressions work well. E0 The minimum energy of electrons to consider, in MeV. Defaults to 1 so that these functions can be called identically to the gyrosynchrotron functions. The return value is the emission coefficient (AKA "emissivity"), in units of ``erg s^-1 Hz^-1 cm^-3 sr^-1``. No complaints are raised if you attempt to use the equation outside of its range of validity. """ s = nu / calc_nu_b(b) return (b * ne * 8.6e-24 * (delta - 1) * sinth * (0.175 * s / (E0**2 * sinth))**(0.5 * (1 - delta)))
Calculate the relativistic synchrotron emission coefficient η_ν. This is Dulk (1985) equation 40, which is an approximation assuming a power-law electron population. Arguments are: b Magnetic field strength in Gauss ne The density of electrons per cubic centimeter with energies greater than E0. delta The power-law index defining the energy distribution of the electron population, with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``. sinth The sine of the angle between the line of sight and the magnetic field direction. It's not specified for what range of values the expressions work well. nu The frequency at which to calculate η, in Hz. The equation is valid for It's not specified for what range of values the expressions work well. E0 The minimum energy of electrons to consider, in MeV. Defaults to 1 so that these functions can be called identically to the gyrosynchrotron functions. The return value is the emission coefficient (AKA "emissivity"), in units of ``erg s^-1 Hz^-1 cm^-3 sr^-1``. No complaints are raised if you attempt to use the equation outside of its range of validity.
def get_opt_repairs_add_remove_edges_greedy(instance,nm, edges): ''' only apply with elementary path consistency notion ''' sem = [sign_cons_prg, elem_path_prg, fwd_prop_prg, bwd_prop_prg] inst = instance.to_file() f_edges = TermSet(edges).to_file() prg = [ inst, f_edges, remove_edges_prg, min_repairs_prg, show_rep_prg, ] + sem + scenfit coptions = str(nm)+' --project --opt-strategy=5 --opt-mode=optN --quiet=1' solver = GringoClasp(clasp_options=coptions) models = solver.run(prg, collapseTerms=True, collapseAtoms=False) #print(models) #nscenfit = models[0].score[0] #nrepscore = models[0].score[1] #print('scenfit: ', nscenfit) #print('repscore: ', nrepscore) os.unlink(f_edges) os.unlink(inst) return models
only apply with elementary path consistency notion
def list(self, resource, url_prefix, auth, session, send_opts): """List all resources of the same type as the given resource. Args: resource (intern.resource.boss.BossResource): List resources of the same type as this.. url_prefix (string): Protocol + host such as https://api.theboss.io auth (string): Token to send in the request header. session (requests.Session): HTTP session to use for request. send_opts (dictionary): Additional arguments to pass to session.send(). Returns: (list): List of resources. Each resource is a dictionary. Raises: requests.HTTPError on failure. """ req = self.get_request( resource, 'GET', 'application/json', url_prefix, auth, proj_list_req=True) prep = session.prepare_request(req) resp = session.send(prep, **send_opts) if resp.status_code == 200: return self._get_resource_list(resp.json()) err = ('List failed on {}, got HTTP response: ({}) - {}'.format( resource.name, resp.status_code, resp.text)) raise HTTPError(err, request = req, response = resp)
List all resources of the same type as the given resource. Args: resource (intern.resource.boss.BossResource): List resources of the same type as this.. url_prefix (string): Protocol + host such as https://api.theboss.io auth (string): Token to send in the request header. session (requests.Session): HTTP session to use for request. send_opts (dictionary): Additional arguments to pass to session.send(). Returns: (list): List of resources. Each resource is a dictionary. Raises: requests.HTTPError on failure.
def _extract_alphabet(self, grammar): """ Extract an alphabet from the given grammar. """ alphabet = set([]) for terminal in grammar.Terminals: alphabet |= set([x for x in terminal]) self.alphabet = list(alphabet)
Extract an alphabet from the given grammar.
def findAllCfgTasksUnderDir(aDir): """ Finds all installed tasks by examining any .cfg files found on disk at and under the given directory, as an installation might be. This returns a dict of { file name : task name } """ retval = {} for f in irafutils.rglob(aDir, '*.cfg'): retval[f] = getEmbeddedKeyVal(f, TASK_NAME_KEY, '') return retval
Finds all installed tasks by examining any .cfg files found on disk at and under the given directory, as an installation might be. This returns a dict of { file name : task name }
def add_node_collection(self, node, collection): """Add the collected test items from a node Collection is complete once all nodes have submitted their collection. In this case its pending list is set to an empty list. When the collection is already completed this submission is from a node which was restarted to replace a dead node. In this case we already assign the pending items here. In either case ``.schedule()`` will instruct the node to start running the required tests. """ assert node in self.node2pending if not self.collection_is_completed: self.node2collection[node] = list(collection) self.node2pending[node] = [] if len(self.node2collection) >= self.numnodes: self.collection_is_completed = True elif self._removed2pending: for deadnode in self._removed2pending: if deadnode.gateway.spec == node.gateway.spec: dead_collection = self.node2collection[deadnode] if collection != dead_collection: msg = report_collection_diff( dead_collection, collection, deadnode.gateway.id, node.gateway.id, ) self.log(msg) return pending = self._removed2pending.pop(deadnode) self.node2pending[node] = pending break
Add the collected test items from a node Collection is complete once all nodes have submitted their collection. In this case its pending list is set to an empty list. When the collection is already completed this submission is from a node which was restarted to replace a dead node. In this case we already assign the pending items here. In either case ``.schedule()`` will instruct the node to start running the required tests.
def new(cls, gen: Generator, sign_key: SignKey) -> 'VerKey': """ Creates and returns BLS ver key that corresponds to the given generator and sign key. :param: gen - Generator :param: sign_key - Sign Key :return: BLS verification key """ logger = logging.getLogger(__name__) logger.debug("VerKey::new: >>>") c_instance = c_void_p() do_call(cls.new_handler, gen.c_instance, sign_key.c_instance, byref(c_instance)) res = cls(c_instance) logger.debug("VerKey::new: <<< res: %r", res) return res
Creates and returns BLS ver key that corresponds to the given generator and sign key. :param: gen - Generator :param: sign_key - Sign Key :return: BLS verification key
def transform(self, data): """ Transforms the data. """ if not self._get("fitted"): raise RuntimeError("`transform` called before `fit` or `fit_transform`.") data = data.copy() output_column_prefix = self._get("output_column_prefix") if output_column_prefix is None: prefix = "" else: prefix = output_column_prefix + '.' transform_function = self._get("transform_function") feature_columns = self._get("features") feature_columns = _internal_utils.select_feature_subset(data, feature_columns) for f in feature_columns: data[prefix + f] = transform_function(data[f]) return data
Transforms the data.
def encrypt(self, key): """This method encrypts and signs the state to make it unreadable by the server, since it contains information that would allow faking proof of storage. :param key: the key to encrypt and sign with """ if (self.encrypted): return # encrypt self.iv = Random.new().read(AES.block_size) aes = AES.new(key, AES.MODE_CFB, self.iv) self.f_key = aes.encrypt(self.f_key) self.alpha_key = aes.encrypt(self.alpha_key) self.encrypted = True # sign self.hmac = self.get_hmac(key)
This method encrypts and signs the state to make it unreadable by the server, since it contains information that would allow faking proof of storage. :param key: the key to encrypt and sign with
def add_tokens_for_single(self, ignore=False): """Add the tokens for the single signature""" args = self.single.args name = self.single.python_name # Reset indentation to proper amount and add signature self.reset_indentation(self.indent_type * self.single.indent) self.result.extend(self.tokens.make_single(name, args)) # Add skip if necessary if ignore: self.single.skipped = True self.result.extend(self.tokens.test_skip) self.groups.finish_signature()
Add the tokens for the single signature
def bot_config(player_config_path: Path, team: Team) -> 'PlayerConfig': """ A function to cover the common case of creating a config for a bot. """ bot_config = PlayerConfig() bot_config.bot = True bot_config.rlbot_controlled = True bot_config.team = team.value bot_config.config_path = str(player_config_path.absolute()) # TODO: Refactor to use Path's config_bundle = get_bot_config_bundle(bot_config.config_path) bot_config.name = config_bundle.name bot_config.loadout_config = load_bot_appearance(config_bundle.get_looks_config(), bot_config.team) return bot_config
A function to cover the common case of creating a config for a bot.