code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _get_referenced_services(specs):
"""
Returns all services that are referenced in specs.apps.depends.services,
or in specs.bundles.services
"""
active_services = set()
for app_spec in specs['apps'].values():
for service in app_spec['depends']['services']:
active_services.add(service)
for bundle_spec in specs['bundles'].values():
for service in bundle_spec['services']:
active_services.add(service)
return active_services
|
Returns all services that are referenced in specs.apps.depends.services,
or in specs.bundles.services
|
def wrap(self, cause):
"""
Wraps another exception into an application exception object.
If original exception is of ApplicationException type it is returned without changes.
Otherwise a new ApplicationException is created and original error is set as its cause.
:param cause: an original error object
:return: an original or newly created ApplicationException
"""
if isinstance(cause, ApplicationException):
return cause
self.with_cause(cause)
return self
|
Wraps another exception into an application exception object.
If original exception is of ApplicationException type it is returned without changes.
Otherwise a new ApplicationException is created and original error is set as its cause.
:param cause: an original error object
:return: an original or newly created ApplicationException
|
def _sample_oat(problem, N, num_levels=4):
"""Generate trajectories without groups
Arguments
---------
problem : dict
The problem definition
N : int
The number of samples to generate
num_levels : int, default=4
The number of grid levels
"""
group_membership = np.asmatrix(np.identity(problem['num_vars'],
dtype=int))
num_params = group_membership.shape[0]
sample = np.zeros((N * (num_params + 1), num_params))
sample = np.array([generate_trajectory(group_membership,
num_levels)
for n in range(N)])
return sample.reshape((N * (num_params + 1), num_params))
|
Generate trajectories without groups
Arguments
---------
problem : dict
The problem definition
N : int
The number of samples to generate
num_levels : int, default=4
The number of grid levels
|
def get_dict(self, only_attributes=None, exclude_attributes=None, df_format=False):
"""Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
"""
to_exclude = ['coach_bsites', 'coach_ec', 'coach_go_mf', 'coach_go_bp', 'coach_go_cc']
if not exclude_attributes:
excluder = to_exclude
else:
excluder = ssbio.utils.force_list(exclude_attributes)
excluder.extend(to_exclude)
summary_dict = StructProp.get_dict(self, only_attributes=only_attributes,
exclude_attributes=excluder,
df_format=df_format)
if self.coach_bsites:
tmp = {'top_bsite_' + k:v for k, v in self.coach_bsites[0].items()}
summary_dict.update(tmp)
if self.coach_ec:
tmp = {'top_ec_' + k: v for k, v in self.coach_ec[0].items()}
summary_dict.update(tmp)
if self.coach_go_mf:
tmp = {'top_go_mf_' + k: v for k, v in self.coach_go_mf[0].items()}
summary_dict.update(tmp)
if self.coach_go_bp:
tmp = {'top_go_bp_' + k: v for k, v in self.coach_go_bp[0].items()}
summary_dict.update(tmp)
if self.coach_go_cc:
tmp = {'top_go_cc_' + k: v for k, v in self.coach_go_cc[0].items()}
summary_dict.update(tmp)
return summary_dict
|
Summarize the I-TASSER run in a dictionary containing modeling results and top predictions from COACH
Args:
only_attributes (str, list): Attributes that should be returned. If not provided, all are returned.
exclude_attributes (str, list): Attributes that should be excluded.
df_format (bool): If dictionary values should be formatted for a dataframe
(everything possible is transformed into strings, int, or float -
if something can't be transformed it is excluded)
Returns:
dict: Dictionary of attributes
|
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
session_in_kwargs = arg_session in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_session() as session:
kwargs[arg_session] = session
return func(*args, **kwargs)
return wrapper
|
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
|
def write_monitor_keyring(keyring, monitor_keyring, uid=-1, gid=-1):
"""create the monitor keyring file"""
write_file(keyring, monitor_keyring, 0o600, None, uid, gid)
|
create the monitor keyring file
|
def open(self):
'''Open and return a stream for the dataset contents.'''
return self.workspace._rest.open_intermediate_dataset_contents(
self.workspace.workspace_id,
self.experiment.experiment_id,
self.node_id,
self.port_name
)
|
Open and return a stream for the dataset contents.
|
def state_fidelity(state0: State, state1: State) -> bk.BKTensor:
"""Return the quantum fidelity between pure states."""
assert state0.qubits == state1.qubits # FIXME
tensor = bk.absolute(bk.inner(state0.tensor, state1.tensor))**bk.fcast(2)
return tensor
|
Return the quantum fidelity between pure states.
|
def render(self):
"Re-render Jupyter cell for batch of images."
clear_output()
self.write_csv()
if self.empty() and self._skipped>0:
return display(f'No images to show :). {self._skipped} pairs were '
f'skipped since at least one of the images was deleted by the user.')
elif self.empty():
return display('No images to show :)')
if self.batch_contains_deleted():
self.next_batch(None)
self._skipped += 1
else:
display(self.make_horizontal_box(self.get_widgets(self._duplicates)))
display(self.make_button_widget('Next Batch', handler=self.next_batch, style="primary"))
|
Re-render Jupyter cell for batch of images.
|
def var_explained(y_true, y_pred):
"""Fraction of variance explained.
"""
var_resid = K.var(y_true - y_pred)
var_y_true = K.var(y_true)
return 1 - var_resid / var_y_true
|
Fraction of variance explained.
|
def _allocate_address_nova(self, instance, network_ids):
"""
Allocates a floating/public ip address to the given instance,
using the OpenStack Compute ('Nova') API.
:param instance: instance to assign address to
:param list network_id: List of IDs (as strings) of networks
where to request allocation the floating IP. **Ignored**
(only used by the corresponding Neutron API function).
:return: public ip address
"""
self._init_os_api()
with OpenStackCloudProvider.__node_start_lock:
# Use the `novaclient` API (works with python-novaclient <8.0.0)
free_ips = [ip for ip in self.nova_client.floating_ips.list() if not ip.fixed_ip]
if not free_ips:
log.debug("Trying to allocate a new floating IP ...")
free_ips.append(self.nova_client.floating_ips.create())
if free_ips:
ip = free_ips.pop()
else:
raise RuntimeError(
"Could not allocate floating IP for VM {0}"
.format(instance_id))
instance.add_floating_ip(ip)
return ip.ip
|
Allocates a floating/public ip address to the given instance,
using the OpenStack Compute ('Nova') API.
:param instance: instance to assign address to
:param list network_id: List of IDs (as strings) of networks
where to request allocation the floating IP. **Ignored**
(only used by the corresponding Neutron API function).
:return: public ip address
|
def RR_calc(classes, TOP):
"""
Calculate RR (Global performance index).
:param classes: classes
:type classes : list
:param TOP: test outcome positive
:type TOP : dict
:return: RR as float
"""
try:
class_number = len(classes)
result = sum(list(TOP.values()))
return result / class_number
except Exception:
return "None"
|
Calculate RR (Global performance index).
:param classes: classes
:type classes : list
:param TOP: test outcome positive
:type TOP : dict
:return: RR as float
|
def latex(self, force=False):
"""
Build PDF documentation.
"""
if sys.platform == 'win32':
sys.stderr.write('latex build has not been tested on windows\n')
else:
ret_code = self._sphinx_build('latex')
os.chdir(os.path.join(BUILD_PATH, 'latex'))
if force:
for i in range(3):
self._run_os('pdflatex',
'-interaction=nonstopmode',
'pandas.tex')
raise SystemExit('You should check the file '
'"build/latex/pandas.pdf" for problems.')
else:
self._run_os('make')
return ret_code
|
Build PDF documentation.
|
def avl_join(t1, t2, node):
"""
Joins two trees `t1` and `t1` with an intermediate key-value pair
CommandLine:
python -m utool.experimental.euler_tour_tree_avl avl_join
Example:
>>> # DISABLE_DOCTEST
>>> from utool.experimental.euler_tour_tree_avl import * # NOQA
>>> self = EulerTourTree(['a', 'b', 'c', 'b', 'd', 'b', 'a'])
>>> other = EulerTourTree(['E', 'F', 'G', 'F', 'E'])
>>> node = Node(value='Q')
>>> root = avl_join(self.root, other.root, node)
>>> new = EulerTourTree(root=root)
>>> print('new = %r' % (new,))
>>> ut.quit_if_noshow()
>>> self.print_tree()
>>> other.print_tree()
>>> new.print_tree()
Example:
>>> # DISABLE_DOCTEST
>>> from utool.experimental.euler_tour_tree_avl import * # NOQA
>>> self = EulerTourTree(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
>>> other = EulerTourTree(['X'])
>>> node = Node(value='Q')
>>> root = avl_join(self.root, other.root, node)
>>> new = EulerTourTree(root=root)
>>> print('new = %r' % (new,))
>>> ut.quit_if_noshow()
>>> ut.qtensure()
>>> #self.show_nx(fnum=1)
>>> #other.show_nx(fnum=2)
>>> new.show_nx()
Running Time:
O(abs(r(t1) - r(t2)))
O(abs(height(t1) - height(t2)))
"""
if DEBUG_JOIN:
print('-- JOIN node=%r' % (node,))
if t1 is None and t2 is None:
if DEBUG_JOIN:
print('Join Case 1')
top = node
elif t1 is None:
# FIXME keep track of count if possible
if DEBUG_JOIN:
print('Join Case 2')
top = avl_insert_dir(t2, node, 0)
elif t2 is None:
if DEBUG_JOIN:
print('Join Case 3')
top = avl_insert_dir(t1, node, 1)
else:
h1 = height(t1)
h2 = height(t2)
if h1 > h2 + 1:
if DEBUG_JOIN:
print('Join Case 4')
top = avl_join_dir_recursive(t1, t2, node, 1)
if DEBUG_JOIN:
ascii_tree(t1, 'top')
elif h2 > h1 + 1:
if DEBUG_JOIN:
print('Join Case 5')
ascii_tree(t1)
ascii_tree(t2)
top = avl_join_dir_recursive(t1, t2, node, 0)
if DEBUG_JOIN:
ascii_tree(top)
else:
if DEBUG_JOIN:
print('Join Case 6')
# Insert at the top of the tree
top = avl_new_top(t1, t2, node, 0)
return top
|
Joins two trees `t1` and `t1` with an intermediate key-value pair
CommandLine:
python -m utool.experimental.euler_tour_tree_avl avl_join
Example:
>>> # DISABLE_DOCTEST
>>> from utool.experimental.euler_tour_tree_avl import * # NOQA
>>> self = EulerTourTree(['a', 'b', 'c', 'b', 'd', 'b', 'a'])
>>> other = EulerTourTree(['E', 'F', 'G', 'F', 'E'])
>>> node = Node(value='Q')
>>> root = avl_join(self.root, other.root, node)
>>> new = EulerTourTree(root=root)
>>> print('new = %r' % (new,))
>>> ut.quit_if_noshow()
>>> self.print_tree()
>>> other.print_tree()
>>> new.print_tree()
Example:
>>> # DISABLE_DOCTEST
>>> from utool.experimental.euler_tour_tree_avl import * # NOQA
>>> self = EulerTourTree(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
>>> other = EulerTourTree(['X'])
>>> node = Node(value='Q')
>>> root = avl_join(self.root, other.root, node)
>>> new = EulerTourTree(root=root)
>>> print('new = %r' % (new,))
>>> ut.quit_if_noshow()
>>> ut.qtensure()
>>> #self.show_nx(fnum=1)
>>> #other.show_nx(fnum=2)
>>> new.show_nx()
Running Time:
O(abs(r(t1) - r(t2)))
O(abs(height(t1) - height(t2)))
|
def filter_query(self, query, field, value):
"""Filter a query."""
return query.where(field ** "%{}%".format(value.lower()))
|
Filter a query.
|
def get_column_keys_and_names(table):
"""
Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table.
"""
ins = inspect(table)
return ((k, c.name) for k, c in ins.mapper.c.items())
|
Return a generator of tuples k, c such that k is the name of the python attribute for
the column and c is the name of the column in the sql table.
|
def md5(filename:str)->str:
"""
Given a filename produce an md5 hash of the contents.
>>> import tempfile, os
>>> f = tempfile.NamedTemporaryFile(delete=False)
>>> f.write(b'Hello Wirld!')
12
>>> f.close()
>>> md5(f.name)
'997c62b6afe9712cad3baffb49cb8c8a'
>>> os.unlink(f.name)
"""
hash_md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
|
Given a filename produce an md5 hash of the contents.
>>> import tempfile, os
>>> f = tempfile.NamedTemporaryFile(delete=False)
>>> f.write(b'Hello Wirld!')
12
>>> f.close()
>>> md5(f.name)
'997c62b6afe9712cad3baffb49cb8c8a'
>>> os.unlink(f.name)
|
def load_lang_conf():
"""
Load language setting from language config file if it exists, otherwise
try to use the local settings if Spyder provides a translation, or
return the default if no translation provided.
"""
if osp.isfile(LANG_FILE):
with open(LANG_FILE, 'r') as f:
lang = f.read()
else:
lang = get_interface_language()
save_lang_conf(lang)
# Save language again if it's been disabled
if lang.strip('\n') in DISABLED_LANGUAGES:
lang = DEFAULT_LANGUAGE
save_lang_conf(lang)
return lang
|
Load language setting from language config file if it exists, otherwise
try to use the local settings if Spyder provides a translation, or
return the default if no translation provided.
|
def hmget(self, key, *fields):
"""
Returns the values associated with the specified `fields` in a hash.
For every ``field`` that does not exist in the hash, :data:`None`
is returned. Because a non-existing keys are treated as empty
hashes, calling :meth:`hmget` against a non-existing key will
return a list of :data:`None` values.
.. note::
*Time complexity*: ``O(N)`` where ``N`` is the number of fields
being requested.
:param key: The key of the hash
:type key: :class:`str`, :class:`bytes`
:param fields: iterable of field names to retrieve
:returns: a :class:`dict` of field name to value mappings for
each of the requested fields
:rtype: dict
"""
def format_response(val_array):
return dict(zip(fields, val_array))
command = [b'HMGET', key]
command.extend(fields)
return self._execute(command, format_callback=format_response)
|
Returns the values associated with the specified `fields` in a hash.
For every ``field`` that does not exist in the hash, :data:`None`
is returned. Because a non-existing keys are treated as empty
hashes, calling :meth:`hmget` against a non-existing key will
return a list of :data:`None` values.
.. note::
*Time complexity*: ``O(N)`` where ``N`` is the number of fields
being requested.
:param key: The key of the hash
:type key: :class:`str`, :class:`bytes`
:param fields: iterable of field names to retrieve
:returns: a :class:`dict` of field name to value mappings for
each of the requested fields
:rtype: dict
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
"""Extract device information from the iPod plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
devices = match.get('Devices', {})
for device_identifier, device_information in iter(devices.items()):
datetime_value = device_information.get('Connected', None)
if not datetime_value:
continue
event_data = IPodPlistEventData()
event_data.device_id = device_identifier
# TODO: refactor.
for key, value in iter(device_information.items()):
if key == 'Connected':
continue
attribute_name = key.lower().replace(' ', '_')
setattr(event_data, attribute_name, value)
event = time_events.PythonDatetimeEvent(
datetime_value, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extract device information from the iPod plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
def add_fields(self, log_record, record, message_dict):
"""
Override this method to implement custom logic for adding fields.
"""
for field in self._required_fields:
log_record[field] = record.__dict__.get(field)
log_record.update(message_dict)
merge_record_extra(record, log_record, reserved=self._skip_fields)
if self.timestamp:
key = self.timestamp if type(self.timestamp) == str else 'timestamp'
log_record[key] = datetime.utcnow()
|
Override this method to implement custom logic for adding fields.
|
def rgb(self, **kwargs):
''' Convert the image to a 3 band RGB for plotting
This method shares the same arguments as plot(). It will perform visual adjustment on the
image and prepare the data for plotting in MatplotLib. Values are converted to an
appropriate precision and the axis order is changed to put the band axis last.
'''
if "bands" in kwargs:
use_bands = kwargs["bands"]
assert len(use_bands) == 3, 'Plot method only supports single or 3-band outputs'
del kwargs["bands"]
else:
use_bands = self._rgb_bands
if kwargs.get('blm') == True:
return self.histogram_match(use_bands, **kwargs)
# if not specified or DRA'ed, default to a 2-98 stretch
if "histogram" not in kwargs:
if "stretch" not in kwargs:
if not self.options.get('dra'):
kwargs['stretch'] = [2,98]
return self.histogram_stretch(use_bands, **kwargs)
elif kwargs["histogram"] == "equalize":
return self.histogram_equalize(use_bands, **kwargs)
elif kwargs["histogram"] == "match":
return self.histogram_match(use_bands, **kwargs)
elif kwargs["histogram"] == "minmax":
return self.histogram_stretch(use_bands, stretch=[0, 100], **kwargs)
# DRA'ed images should be left alone if not explicitly adjusted
elif kwargs["histogram"] == "ignore" or self.options.get('dra'):
data = self._read(self[use_bands,...], **kwargs)
return np.rollaxis(data, 0, 3)
else:
raise KeyError('Unknown histogram parameter, use "equalize", "match", "minmax", or "ignore"')
|
Convert the image to a 3 band RGB for plotting
This method shares the same arguments as plot(). It will perform visual adjustment on the
image and prepare the data for plotting in MatplotLib. Values are converted to an
appropriate precision and the axis order is changed to put the band axis last.
|
def ttl(self, key):
"""
Emulate ttl
Even though the official redis commands documentation at http://redis.io/commands/ttl
states "Return value: Integer reply: TTL in seconds, -2 when key does not exist or -1
when key does not have a timeout." the redis-py lib returns None for both these cases.
The lib behavior has been emulated here.
:param key: key for which ttl is requested.
:returns: the number of seconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
value = self.pttl(key)
if value is None or value < 0:
return value
return value // 1000
|
Emulate ttl
Even though the official redis commands documentation at http://redis.io/commands/ttl
states "Return value: Integer reply: TTL in seconds, -2 when key does not exist or -1
when key does not have a timeout." the redis-py lib returns None for both these cases.
The lib behavior has been emulated here.
:param key: key for which ttl is requested.
:returns: the number of seconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
|
def api_key(value=None):
"""Set or get the API key.
Also set via environment variable GRAPHISTRY_API_KEY."""
if value is None:
return PyGraphistry._config['api_key']
# setter
if value is not PyGraphistry._config['api_key']:
PyGraphistry._config['api_key'] = value.strip()
PyGraphistry._is_authenticated = False
|
Set or get the API key.
Also set via environment variable GRAPHISTRY_API_KEY.
|
def add_uppercase(table):
"""
Extend the table with uppercase options
>>> print("а" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["а"] == "a")
True
>>> print("А" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["А"] == "A")
True
>>> print(len(add_uppercase({"а": "a"}).keys()))
2
>>> print("Аа" in add_uppercase({"аа": "aa"}))
True
>>> print(add_uppercase({"аа": "aa"})["Аа"] == "Aa")
True
"""
orig = table.copy()
orig.update(
dict((k.capitalize(), v.capitalize()) for k, v in table.items()))
return orig
|
Extend the table with uppercase options
>>> print("а" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["а"] == "a")
True
>>> print("А" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["А"] == "A")
True
>>> print(len(add_uppercase({"а": "a"}).keys()))
2
>>> print("Аа" in add_uppercase({"аа": "aa"}))
True
>>> print(add_uppercase({"аа": "aa"})["Аа"] == "Aa")
True
|
def _get_controllers(self):
"""Iterate through the installed controller entry points and import
the module and assign the handle to the CLI._controllers dict.
:return: dict
"""
controllers = dict()
for pkg in pkg_resources.iter_entry_points(group=self.CONTROLLERS):
LOGGER.debug('Loading %s controller', pkg.name)
controllers[pkg.name] = importlib.import_module(pkg.module_name)
return controllers
|
Iterate through the installed controller entry points and import
the module and assign the handle to the CLI._controllers dict.
:return: dict
|
def object2code(key, code):
"""Returns code for widget from dict object"""
if key in ["xscale", "yscale"]:
if code == "log":
code = True
else:
code = False
else:
code = unicode(code)
return code
|
Returns code for widget from dict object
|
def _init_go_sets(self, go_fins):
"""Get lists of GO IDs."""
go_sets = []
assert go_fins, "EXPECTED FILES CONTAINING GO IDs"
assert len(go_fins) >= 2, "EXPECTED 2+ GO LISTS. FOUND: {L}".format(
L=' '.join(go_fins))
obj = GetGOs(self.godag)
for fin in go_fins:
assert os.path.exists(fin), "GO FILE({F}) DOES NOT EXIST".format(F=fin)
go_sets.append(obj.get_usrgos(fin, sys.stdout))
return go_sets
|
Get lists of GO IDs.
|
def atom_fractions(atoms):
r'''Calculates the atomic fractions of each element in a compound,
given a dictionary of its atoms and their counts, in the format
{symbol: count}.
.. math::
a_i = \frac{n_i}{\sum_i n_i}
Parameters
----------
atoms : dict
dictionary of counts of individual atoms, indexed by symbol with
proper capitalization, [-]
Returns
-------
afracs : dict
dictionary of atomic fractions of individual atoms, indexed by symbol
with proper capitalization, [-]
Notes
-----
No actual data on the elements is used, so incorrect or custom compounds
would not raise an error.
Examples
--------
>>> atom_fractions({'H': 12, 'C': 20, 'O': 5})
{'H': 0.32432432432432434, 'C': 0.5405405405405406, 'O': 0.13513513513513514}
References
----------
.. [1] RDKit: Open-source cheminformatics; http://www.rdkit.org
'''
count = sum(atoms.values())
afracs = {}
for i in atoms:
afracs[i] = atoms[i]/count
return afracs
|
r'''Calculates the atomic fractions of each element in a compound,
given a dictionary of its atoms and their counts, in the format
{symbol: count}.
.. math::
a_i = \frac{n_i}{\sum_i n_i}
Parameters
----------
atoms : dict
dictionary of counts of individual atoms, indexed by symbol with
proper capitalization, [-]
Returns
-------
afracs : dict
dictionary of atomic fractions of individual atoms, indexed by symbol
with proper capitalization, [-]
Notes
-----
No actual data on the elements is used, so incorrect or custom compounds
would not raise an error.
Examples
--------
>>> atom_fractions({'H': 12, 'C': 20, 'O': 5})
{'H': 0.32432432432432434, 'C': 0.5405405405405406, 'O': 0.13513513513513514}
References
----------
.. [1] RDKit: Open-source cheminformatics; http://www.rdkit.org
|
def V_vertical_conical_concave(D, a, h):
r'''Calculates volume of a vertical tank with a concave conical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2}{12} \left(3h + a - \frac{(a+h)^3}{a^2}\right)
,\;\; 0 \le h < |a|
.. math::
V = \frac{\pi D^2}{12} (3h + a ),\;\; h \ge |a|
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
a : float
Negative distance the cone head extends inside the main cylinder, [m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_conical_concave(D=113., a=-33, h=15)/231
251.15825565795188
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
'''
if h < abs(a):
Vf = pi*D**2/12.*(3*h + a - (a+h)**3/a**2)
else:
Vf = pi*D**2/12.*(3*h + a)
return Vf
|
r'''Calculates volume of a vertical tank with a concave conical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2}{12} \left(3h + a - \frac{(a+h)^3}{a^2}\right)
,\;\; 0 \le h < |a|
.. math::
V = \frac{\pi D^2}{12} (3h + a ),\;\; h \ge |a|
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
a : float
Negative distance the cone head extends inside the main cylinder, [m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_conical_concave(D=113., a=-33, h=15)/231
251.15825565795188
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
|
def get_lang_array(self):
"""gets supported langs as an array"""
r = self.yandex_translate_request("getLangs", "")
self.handle_errors(r)
return r.json()["dirs"]
|
gets supported langs as an array
|
def format_timedelta(td_object):
"""Format a timedelta object for display to users
Returns
-------
str
"""
def get_total_seconds(td):
# timedelta.total_seconds not in py2.6
return (td.microseconds +
(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
seconds = int(get_total_seconds(td_object))
periods = [('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1)]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings)
|
Format a timedelta object for display to users
Returns
-------
str
|
def get_context_json(self, context):
'''
Return a base answer for a json answer
'''
# Initialize answer
answer = {}
# Metadata builder
answer['meta'] = self.__jcontext_metadata(context)
# Filter builder
answer['filter'] = self.__jcontext_filter(context)
# Head builder
answer['table'] = {}
answer['table']['head'] = self.__jcontext_tablehead(context)
answer['table']['body'] = None
answer['table']['header'] = None
answer['table']['summary'] = None
# Return answer
return answer
|
Return a base answer for a json answer
|
def action_logging(f):
"""
Decorator to log user actions
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
with create_session() as session:
if g.user.is_anonymous:
user = 'anonymous'
else:
user = g.user.username
log = Log(
event=f.__name__,
task_instance=None,
owner=user,
extra=str(list(request.args.items())),
task_id=request.args.get('task_id'),
dag_id=request.args.get('dag_id'))
if 'execution_date' in request.args:
log.execution_date = pendulum.parse(
request.args.get('execution_date'))
session.add(log)
return f(*args, **kwargs)
return wrapper
|
Decorator to log user actions
|
def _edge_mapping(G):
"""Assigns a variable for each edge in G.
(u, v) and (v, u) map to the same variable.
"""
edge_mapping = {edge: idx for idx, edge in enumerate(G.edges)}
edge_mapping.update({(e1, e0): idx for (e0, e1), idx in edge_mapping.items()})
return edge_mapping
|
Assigns a variable for each edge in G.
(u, v) and (v, u) map to the same variable.
|
def ParseRow(header,
row):
"""Parses a single row of osquery output.
Args:
header: A parsed header describing the row format.
row: A row in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryRow` instance.
"""
precondition.AssertDictType(row, Text, Text)
result = rdf_osquery.OsqueryRow()
for column in header.columns:
result.values.append(row[column.name])
return result
|
Parses a single row of osquery output.
Args:
header: A parsed header describing the row format.
row: A row in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryRow` instance.
|
def repo_data(PACKAGES_TXT, repo, flag):
"""Grap data packages
"""
(name, location, size, unsize,
rname, rlocation, rsize, runsize) = ([] for i in range(8))
for line in PACKAGES_TXT.splitlines():
if _meta_.rsl_deps in ["on", "ON"] and "--resolve-off" not in flag:
status(0.000005)
if line.startswith("PACKAGE NAME:"):
name.append(line[15:].strip())
if line.startswith("PACKAGE LOCATION:"):
location.append(line[21:].strip())
if line.startswith("PACKAGE SIZE (compressed):"):
size.append(line[28:-2].strip())
if line.startswith("PACKAGE SIZE (uncompressed):"):
unsize.append(line[30:-2].strip())
if repo == "rlw":
(rname,
rlocation,
rsize,
runsize
) = rlw_filter(name, location, size, unsize)
elif repo == "alien":
(rname,
rlocation,
rsize,
runsize
) = alien_filter(name, location, size, unsize)
elif repo == "rested":
(rname,
rlocation,
rsize,
runsize
) = rested_filter(name, location, size, unsize)
elif repo == "ktown":
(rname,
rlocation,
rsize,
runsize
) = ktown_filter(name, location, size, unsize)
else:
rname, rlocation, rsize, runsize = name, location, size, unsize
return [rname, rlocation, rsize, runsize]
|
Grap data packages
|
def lal(self):
""" Returns a LAL Object that contains this data """
lal_data = None
if self._data.dtype == float32:
lal_data = _lal.CreateREAL4Vector(len(self))
elif self._data.dtype == float64:
lal_data = _lal.CreateREAL8Vector(len(self))
elif self._data.dtype == complex64:
lal_data = _lal.CreateCOMPLEX8Vector(len(self))
elif self._data.dtype == complex128:
lal_data = _lal.CreateCOMPLEX16Vector(len(self))
lal_data.data[:] = self.numpy()
return lal_data
|
Returns a LAL Object that contains this data
|
def correct_rates(rates, opt_qes, combs):
"""Applies optimal qes to rates.
Should be closer to fitted_rates afterwards.
Parameters
----------
rates: numpy array of rates of all PMT combinations
opt_qes: numpy array of optimal qe values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_rates: numpy array of corrected rates for all PMT combinations
"""
corrected_rates = np.array([
rate / opt_qes[comb[0]] / opt_qes[comb[1]]
for rate, comb in zip(rates, combs)
])
return corrected_rates
|
Applies optimal qes to rates.
Should be closer to fitted_rates afterwards.
Parameters
----------
rates: numpy array of rates of all PMT combinations
opt_qes: numpy array of optimal qe values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_rates: numpy array of corrected rates for all PMT combinations
|
def create(args):
"""
cdstarcat create PATH
Create objects in CDSTAR specified by PATH.
When PATH is a file, a single object (possibly with multiple bitstreams) is created;
When PATH is a directory, an object will be created for each file in the directory
(recursing into subdirectories).
"""
with _catalog(args) as cat:
for fname, created, obj in cat.create(args.args[0], {}):
args.log.info('{0} -> {1} object {2.id}'.format(
fname, 'new' if created else 'existing', obj))
|
cdstarcat create PATH
Create objects in CDSTAR specified by PATH.
When PATH is a file, a single object (possibly with multiple bitstreams) is created;
When PATH is a directory, an object will be created for each file in the directory
(recursing into subdirectories).
|
def extract_package_name(line):
"""Return package name in import statement."""
assert '\\' not in line
assert '(' not in line
assert ')' not in line
assert ';' not in line
if line.lstrip().startswith(('import', 'from')):
word = line.split()[1]
else:
# Ignore doctests.
return None
package = word.split('.')[0]
assert ' ' not in package
return package
|
Return package name in import statement.
|
def getSimilarTermsForTerm(self, term, contextId=None, posType=None, getFingerprint=None, startIndex=0, maxResults=10):
"""Get the similar terms of a given term
Args:
term, str: A term in the retina (required)
contextId, int: The identifier of a context (optional)
posType, str: Part of speech (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful
"""
return self._terms.getSimilarTerms(self._retina, term, contextId, posType, getFingerprint, startIndex, maxResults)
|
Get the similar terms of a given term
Args:
term, str: A term in the retina (required)
contextId, int: The identifier of a context (optional)
posType, str: Part of speech (optional)
getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
startIndex, int: The start-index for pagination (optional)
maxResults, int: Max results per page (optional)
Returns:
list of Term
Raises:
CorticalioException: if the request was not successful
|
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read()
|
Decrypts a message from f.
|
def create_aside(self, block_type, keys):
"""
The aside version of construct_xblock: take a type and key. Return an instance
"""
aside_cls = XBlockAside.load_class(block_type)
return aside_cls(runtime=self, scope_ids=keys)
|
The aside version of construct_xblock: take a type and key. Return an instance
|
def _make_string_formatter(f, offset=None):
""" A closure-izer for string arguments that include a format and possibly an offset. """
format = f
delta = offset
return lambda v: time.strftime(format, (_date(v, delta)).timetuple())
|
A closure-izer for string arguments that include a format and possibly an offset.
|
def com_google_fonts_check_ttx_roundtrip(font):
"""Checking with fontTools.ttx"""
from fontTools import ttx
import sys
ttFont = ttx.TTFont(font)
failed = False
class TTXLogger:
msgs = []
def __init__(self):
self.original_stderr = sys.stderr
self.original_stdout = sys.stdout
sys.stderr = self
sys.stdout = self
def write(self, data):
if data not in self.msgs:
self.msgs.append(data)
def restore(self):
sys.stderr = self.original_stderr
sys.stdout = self.original_stdout
from xml.parsers.expat import ExpatError
try:
logger = TTXLogger()
xml_file = font + ".xml"
ttFont.saveXML(xml_file)
export_error_msgs = logger.msgs
if len(export_error_msgs):
failed = True
yield INFO, ("While converting TTF into an XML file,"
" ttx emited the messages listed below.")
for msg in export_error_msgs:
yield FAIL, msg.strip()
f = ttx.TTFont()
f.importXML(font + ".xml")
import_error_msgs = [msg for msg in logger.msgs if msg not in export_error_msgs]
if len(import_error_msgs):
failed = True
yield INFO, ("While importing an XML file and converting"
" it back to TTF, ttx emited the messages"
" listed below.")
for msg in import_error_msgs:
yield FAIL, msg.strip()
logger.restore()
except ExpatError as e:
failed = True
yield FAIL, ("TTX had some problem parsing the generated XML file."
" This most likely mean there's some problem in the font."
" Please inspect the output of ttx in order to find more"
" on what went wrong. A common problem is the presence of"
" control characteres outside the accepted character range"
" as defined in the XML spec. FontTools has got a bug which"
" causes TTX to generate corrupt XML files in those cases."
" So, check the entries of the name table and remove any"
" control chars that you find there."
" The full ttx error message was:\n"
"======\n{}\n======".format(e))
if not failed:
yield PASS, "Hey! It all looks good!"
# and then we need to cleanup our mess...
if os.path.exists(xml_file):
os.remove(xml_file)
|
Checking with fontTools.ttx
|
def flattenTrees(root, nodeSelector: Callable[[LNode], bool]):
"""
Walk all nodes and discover trees of nodes (usually operators)
and reduce them to single node with multiple outputs
:attention: selected nodes has to have single output
and has to be connected to nets with single driver
"""
for ch in root.children:
if ch.children:
flattenTrees(ch, nodeSelector)
# collect all nodes which can be potentialy reduced
reducibleChildren = set()
for ch in root.children:
if nodeSelector(ch):
reducibleChildren.add(ch)
while reducibleChildren:
# try to pick a node from random tree and search it's root
_treeRoot = reducibleChildren.pop()
reducibleChildren.add(_treeRoot)
# we need to keep order of inputs, use preorder
treeRoot = searchRootOfTree(reducibleChildren, _treeRoot)
reducedNodes, inputEdges = collectNodesInTree(treeRoot, reducibleChildren)
# if tree is big enoguh for reduction, reduce it to single node
if len(reducedNodes) > 1:
newName = reducedNodes[0].name
newNode = root.addNode(newName)
o = newNode.addPort("", PortType.OUTPUT, PortSide.EAST)
oEdges = treeRoot.east[0].outgoingEdges
for outputedge in list(oEdges):
dsts = list(outputedge.dsts)
assert len(dsts) > 0
outputedge.remove()
root.addHyperEdge([o, ], dsts, originObj=outputedge.originObj)
for i, (iN, iP, iE) in enumerate(inputEdges):
name = None
index = len(inputEdges) - i - 1
if hasattr(iE.originObj, "_dtype"):
w = iE.originObj._dtype.bit_length()
if w > 1:
name = "[%d:%d]" % ((index + 1) * w, index * w)
else:
name = None
if name is None:
name = "[%d]" % (index)
inp = newNode.addPort(name,
PortType.INPUT, PortSide.WEST)
iE.removeTarget(iP)
iE.addTarget(inp)
for n in reducedNodes:
root.children.remove(n)
reducibleChildren.remove(n)
else:
reducibleChildren.remove(reducedNodes[0])
|
Walk all nodes and discover trees of nodes (usually operators)
and reduce them to single node with multiple outputs
:attention: selected nodes has to have single output
and has to be connected to nets with single driver
|
def woodbury_inv(self):
"""
The inverse of the woodbury matrix, in the gaussian likelihood case it is defined as
$$
(K_{xx} + \Sigma_{xx})^{-1}
\Sigma_{xx} := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_inv is None:
if self._woodbury_chol is not None:
self._woodbury_inv, _ = dpotri(self._woodbury_chol, lower=1)
# self._woodbury_inv, _ = dpotrs(self.woodbury_chol, np.eye(self.woodbury_chol.shape[0]), lower=1)
symmetrify(self._woodbury_inv)
elif self._covariance is not None:
B = np.atleast_3d(self._K) - np.atleast_3d(self._covariance)
self._woodbury_inv = np.empty_like(B)
for i in range(B.shape[-1]):
tmp, _ = dpotrs(self.K_chol, B[:, :, i])
self._woodbury_inv[:, :, i], _ = dpotrs(self.K_chol, tmp.T)
return self._woodbury_inv
|
The inverse of the woodbury matrix, in the gaussian likelihood case it is defined as
$$
(K_{xx} + \Sigma_{xx})^{-1}
\Sigma_{xx} := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
|
def shape(self):
"""
Returns (rowCount, valueCount)
"""
bf = self.copy()
content = requests.get(bf.dataset_url).json()
rowCount = content['status']['rowCount']
valueCount = content['status']['valueCount']
return (rowCount, valueCount)
|
Returns (rowCount, valueCount)
|
def start_stack(awsclient, stack_name, use_suspend=False):
"""Start an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
"""
exit_code = 0
# check for DisableStop
#disable_stop = conf.get('deployment', {}).get('DisableStop', False)
#if disable_stop:
# log.warn('\'DisableStop\' is set - nothing to do!')
#else:
if not stack_exists(awsclient, stack_name):
log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name)
else:
client_cfn = awsclient.get_client('cloudformation')
client_autoscaling = awsclient.get_client('autoscaling')
client_rds = awsclient.get_client('rds')
resources = all_pages(
client_cfn.list_stack_resources,
{ 'StackName': stack_name },
lambda r: r['StackResourceSummaries']
)
autoscaling_groups = [
r for r in resources
if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup'
]
# lookup all types of scaling processes
# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance
# AlarmNotification, ScheduledActions, AddToLoadBalancer]
response = client_autoscaling.describe_scaling_process_types()
scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])]
# starting db instances
db_instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::RDS::DBInstance'
]
stopped_db_instances = _filter_db_instances_by_status(
awsclient, db_instances, ['stopped']
)
for db in stopped_db_instances:
log.info('Starting RDS instance \'%s\'', db)
client_rds.start_db_instance(DBInstanceIdentifier=db)
# wait for db instances to become available
for db in stopped_db_instances:
waiter_db_available = client_rds.get_waiter('db_instance_available')
waiter_db_available.wait(DBInstanceIdentifier=db)
# starting ec2 instances
instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::EC2::Instance'
]
_start_ec2_instances(awsclient, instances)
services = [
r for r in resources
if r['ResourceType'] == 'AWS::ECS::Service'
]
if (autoscaling_groups and not use_suspend) or services:
template, parameters = _get_template_parameters(awsclient, stack_name)
# setting ECS desiredCount back
if services:
_start_ecs_services(awsclient, services, template, parameters)
for asg in autoscaling_groups:
if use_suspend:
# alternative implementation to speed up start
# only problem is that instances must survive stop & start
# find instances in autoscaling group
instances = all_pages(
client_autoscaling.describe_auto_scaling_instances,
{},
lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', [])
if i['AutoScalingGroupName'] == asg['PhysicalResourceId']],
)
_start_ec2_instances(awsclient, instances)
# resume all autoscaling processes
log.info('Resuming all autoscaling processes for \'%s\'',
asg['LogicalResourceId'])
response = client_autoscaling.resume_processes(
AutoScalingGroupName=asg['PhysicalResourceId'],
ScalingProcesses=scaling_process_types
)
else:
# resize autoscaling group back to its original values
log.info('Resize autoscaling group \'%s\' back to original values',
asg['LogicalResourceId'])
min, max = _get_autoscaling_min_max(
template, parameters, asg['LogicalResourceId'])
response = client_autoscaling.update_auto_scaling_group(
AutoScalingGroupName=asg['PhysicalResourceId'],
MinSize=min,
MaxSize=max
)
return exit_code
|
Start an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
|
def _derive_charge(self, config):
"""Use a temperature window to identify the roast charge.
The charge will manifest as a sudden downward trend on the temperature.
Once found, we save it and avoid overwriting. The charge is needed in
order to derive the turning point.
:param config: Current snapshot of the configuration
:type config: dict
:returns: None
"""
if self._roast.get('charge'):
return None
self._window.append(config)
time, temp = list(), list()
for x in list(self._window):
time.append(x['time'])
temp.append(x['bean_temp'])
slope, intercept, r_value, p_value, std_err = linregress(time, temp)
if slope < 0:
self._roast['charge'] = self._roast['last']
self.add_roast_event({'event': 'Charge'})
return config
return None
|
Use a temperature window to identify the roast charge.
The charge will manifest as a sudden downward trend on the temperature.
Once found, we save it and avoid overwriting. The charge is needed in
order to derive the turning point.
:param config: Current snapshot of the configuration
:type config: dict
:returns: None
|
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
opts=__opts__,
provider=__active_provider_name__ or __virtualname__,
aliases=__virtual_aliases__,
required_keys=('personal_access_token',)
)
|
Return the first configured instance.
|
def addresses(self):
"""
Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Addresses(self.config)
|
Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses`
|
def read_status(self, num_bytes=2):
"""Read up to 24 bits (num_bytes) of SPI flash status register contents
via RDSR, RDSR2, RDSR3 commands
Not all SPI flash supports all three commands. The upper 1 or 2
bytes may be 0xFF.
"""
SPIFLASH_RDSR = 0x05
SPIFLASH_RDSR2 = 0x35
SPIFLASH_RDSR3 = 0x15
status = 0
shift = 0
for cmd in [SPIFLASH_RDSR, SPIFLASH_RDSR2, SPIFLASH_RDSR3][0:num_bytes]:
status += self.run_spiflash_command(cmd, read_bits=8) << shift
shift += 8
return status
|
Read up to 24 bits (num_bytes) of SPI flash status register contents
via RDSR, RDSR2, RDSR3 commands
Not all SPI flash supports all three commands. The upper 1 or 2
bytes may be 0xFF.
|
def match_url(self, request):
"""
Match the request against a file in the adapter directory
:param request: The request
:type request: :class:`requests.Request`
:return: Path to the file
:rtype: ``str``
"""
parsed_url = urlparse(request.path_url)
path_url = parsed_url.path
query_params = parsed_url.query
match = None
for path in self.paths:
for item in self.index:
target_path = os.path.join(BASE_PATH, path, path_url[1:])
query_path = target_path.lower() + quote(
'?' + query_params).lower()
if target_path.lower() == item[0]:
match = item[1]
break
elif query_path == item[0]:
match = item[1]
break
return match
|
Match the request against a file in the adapter directory
:param request: The request
:type request: :class:`requests.Request`
:return: Path to the file
:rtype: ``str``
|
def precondition_u_kn(u_kn, N_k, f_k):
"""Subtract a sample-dependent constant from u_kn to improve precision
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
Notes
-----
Returns u_kn - x_n, where x_n is based on the current estimate of f_k.
Upon subtraction of x_n, the MBAR objective function changes by an
additive constant, but its derivatives remain unchanged. We choose
x_n such that the current objective function value is zero, which
should give maximum precision in the objective function.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
u_kn = u_kn - u_kn.min(0)
u_kn += (logsumexp(f_k - u_kn.T, b=N_k, axis=1)) - N_k.dot(f_k) / float(N_k.sum())
return u_kn
|
Subtract a sample-dependent constant from u_kn to improve precision
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
Notes
-----
Returns u_kn - x_n, where x_n is based on the current estimate of f_k.
Upon subtraction of x_n, the MBAR objective function changes by an
additive constant, but its derivatives remain unchanged. We choose
x_n such that the current objective function value is zero, which
should give maximum precision in the objective function.
|
def mouseDoubleClickEvent(self, event):
"""Reimplement Qt method"""
index_clicked = self.indexAt(event.pos())
if self.model.breakpoints:
filename = self.model.breakpoints[index_clicked.row()][0]
line_number_str = self.model.breakpoints[index_clicked.row()][1]
self.edit_goto.emit(filename, int(line_number_str), '')
if index_clicked.column()==2:
self.set_or_edit_conditional_breakpoint.emit()
|
Reimplement Qt method
|
def _transform_legacy_stats(self, stats):
"""Convert legacy stats to new stats with pools key."""
# Fill pools for legacy driver reports
if stats and 'pools' not in stats:
pool = stats.copy()
pool['pool_name'] = self.id
for key in ('driver_version', 'shared_targets',
'sparse_copy_volume', 'storage_protocol',
'vendor_name', 'volume_backend_name'):
pool.pop(key, None)
stats['pools'] = [pool]
return stats
|
Convert legacy stats to new stats with pools key.
|
def visible_fields(self):
"""
Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple.
"""
form_visible_fields = self.form.visible_fields()
if self.render_fields:
fields = self.render_fields
else:
fields = [field.name for field in form_visible_fields]
filtered_fields = [field for field in fields if field not in self.exclude_fields]
return [field for field in form_visible_fields if field.name in filtered_fields]
|
Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple.
|
def predict_compound_pairs_iterated(
reactions, formulas, prior=(1, 43), max_iterations=None,
element_weight=element_weight):
"""Predict reaction pairs using iterated method.
Returns a tuple containing a dictionary of predictions keyed by the
reaction IDs, and the final number of iterations. Each reaction prediction
entry contains a tuple with a dictionary of transfers and a dictionary of
unbalanced compounds. The dictionary of unbalanced compounds is empty only
if the reaction is balanced.
Args:
reactions: Dictionary or pair-iterable of (id, equation) pairs.
IDs must be any hashable reaction identifier (e.g. string) and
equation must be :class:`psamm.reaction.Reaction` objects.
formulas: Dictionary mapping compound IDs to
:class:`psamm.formula.Formula`. Formulas must be flattened.
prior: Tuple of (alpha, beta) parameters for the MAP inference.
If not provided, the default parameters will be used: (1, 43).
max_iterations: Maximum iterations to run before stopping. If the
stopping condition is reached before this number of iterations,
the procedure also stops. If None, the procedure only stops when
the stopping condition is reached.
element_weight: A function providing returning weight value for the
given :class:`psamm.formula.Atom` or
:class:`psamm.formula.Radical`. If not provided, the default weight
will be used (H=0, C=1, *=0.82)
"""
prior_alpha, prior_beta = prior
reactions = dict(reactions)
pair_reactions = {}
possible_pairs = Counter()
for reaction_id, equation in iteritems(reactions):
for (c1, _), (c2, _) in product(equation.left, equation.right):
spair = tuple(sorted([c1.name, c2.name]))
possible_pairs[spair] += 1
pair_reactions.setdefault(spair, set()).add(reaction_id)
next_reactions = set(reactions)
pairs_predicted = None
prediction = {}
weights = {}
iteration = 0
while len(next_reactions) > 0:
iteration += 1
if max_iterations is not None and iteration > max_iterations:
break
logger.info('Iteration {}: {} reactions...'.format(
iteration, len(next_reactions)))
for reaction_id in next_reactions:
result = predict_compound_pairs(
reactions[reaction_id], formulas, weights, element_weight)
if result is None:
continue
transfer, balance = result
rpairs = {}
for ((c1, _), (c2, _)), form in iteritems(transfer):
rpairs.setdefault((c1, c2), []).append(form)
prediction[reaction_id] = rpairs, balance
pairs_predicted = Counter()
for reaction_id, (rpairs, _) in iteritems(prediction):
for c1, c2 in rpairs:
spair = tuple(sorted([c1.name, c2.name]))
pairs_predicted[spair] += 1
next_reactions = set()
for spair, total in sorted(iteritems(possible_pairs)):
pred = pairs_predicted[spair]
# The weight is set to the maximum a posteriori (MAP) estimate
# of the primary pair probability distribution.
posterior_alpha = prior_alpha + pred
posterior_beta = prior_beta + total - pred
pair_weight = ((posterior_alpha - 1) /
(posterior_alpha + posterior_beta - 2))
if (spair not in weights or
abs(pair_weight - weights[spair]) > 1e-5):
next_reactions.update(pair_reactions[spair])
c1, c2 = spair
weights[c1, c2] = pair_weight
weights[c2, c1] = pair_weight
return prediction, iteration
|
Predict reaction pairs using iterated method.
Returns a tuple containing a dictionary of predictions keyed by the
reaction IDs, and the final number of iterations. Each reaction prediction
entry contains a tuple with a dictionary of transfers and a dictionary of
unbalanced compounds. The dictionary of unbalanced compounds is empty only
if the reaction is balanced.
Args:
reactions: Dictionary or pair-iterable of (id, equation) pairs.
IDs must be any hashable reaction identifier (e.g. string) and
equation must be :class:`psamm.reaction.Reaction` objects.
formulas: Dictionary mapping compound IDs to
:class:`psamm.formula.Formula`. Formulas must be flattened.
prior: Tuple of (alpha, beta) parameters for the MAP inference.
If not provided, the default parameters will be used: (1, 43).
max_iterations: Maximum iterations to run before stopping. If the
stopping condition is reached before this number of iterations,
the procedure also stops. If None, the procedure only stops when
the stopping condition is reached.
element_weight: A function providing returning weight value for the
given :class:`psamm.formula.Atom` or
:class:`psamm.formula.Radical`. If not provided, the default weight
will be used (H=0, C=1, *=0.82)
|
def unpack(self, buff, offset=0):
"""Unpack a binary message into this object's attributes.
Pass the correct length for list unpacking.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
"""
unpack_length = UBInt16()
unpack_length.unpack(buff, offset)
super().unpack(buff[:offset+unpack_length], offset)
|
Unpack a binary message into this object's attributes.
Pass the correct length for list unpacking.
Args:
buff (bytes): Binary data package to be unpacked.
offset (int): Where to begin unpacking.
|
def load(self):
"""Read and parse the message file."""
try:
self._read()
self._parse()
except Exception as exc:
self.failed = True
params = {'path': self._path, 'exception': exc}
if self.fail_silently:
LOG.warning("Error processing message json file '%(path)s': "
"%(exception)s", params)
else:
raise exceptions.MessageFailure(
_("Error processing message json file '%(path)s': "
"%(exception)s") % params)
|
Read and parse the message file.
|
def start_login_server(self, ):
"""Start a server that will get a request from a user logging in.
This uses the Implicit Grant Flow of OAuth2. The user is asked
to login to twitch and grant PyTwitcher authorization.
Once the user agrees, he is redirected to an url.
This server will respond to that url and get the oauth token.
The server serves in another thread. To shut him down, call
:meth:`TwitchSession.shutdown_login_server`.
This sets the :data:`TwitchSession.login_server`,
:data:`TwitchSession.login_thread` variables.
:returns: The created server
:rtype: :class:`BaseHTTPServer.HTTPServer`
:raises: None
"""
self.login_server = oauth.LoginServer(session=self)
target = self.login_server.serve_forever
self.login_thread = threading.Thread(target=target)
self.login_thread.setDaemon(True)
log.debug('Starting login server thread.')
self.login_thread.start()
|
Start a server that will get a request from a user logging in.
This uses the Implicit Grant Flow of OAuth2. The user is asked
to login to twitch and grant PyTwitcher authorization.
Once the user agrees, he is redirected to an url.
This server will respond to that url and get the oauth token.
The server serves in another thread. To shut him down, call
:meth:`TwitchSession.shutdown_login_server`.
This sets the :data:`TwitchSession.login_server`,
:data:`TwitchSession.login_thread` variables.
:returns: The created server
:rtype: :class:`BaseHTTPServer.HTTPServer`
:raises: None
|
def find_element_by_name(self, name, update=False) -> Elements:
'''Finds an element by name.
Args:
name: The name of the element to be found.
update: If the interface has changed, this option should be True.
Returns:
The element if it was found.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
element = driver.find_element_by_name('foo')
'''
return self.find_element(by=By.NAME, value=name, update=update)
|
Finds an element by name.
Args:
name: The name of the element to be found.
update: If the interface has changed, this option should be True.
Returns:
The element if it was found.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
element = driver.find_element_by_name('foo')
|
def project_parensemble(self,par_file=None,nsing=None,
inplace=True,enforce_bounds='reset'):
""" perform the null-space projection operations for null-space monte carlo
Parameters
----------
par_file: str
an optional file of parameter values to use
nsing: int
number of singular values to in forming null subspace matrix
inplace: bool
overwrite the existing parameter ensemble with the
projected values
enforce_bounds: str
how to enforce parameter bounds. can be None, 'reset', or 'drop'.
Default is None
Returns
-------
par_en : pyemu.ParameterEnsemble
if inplace is False, otherwise None
Note
----
to use this method, the MonteCarlo instance must have been constructed
with the ``jco`` argument.
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(jco="pest.jcb")``
``>>>mc.draw(1000)``
``>>>mc.project_parensemble(par_file="final.par",nsing=100)``
"""
assert self.jco is not None,"MonteCarlo.project_parensemble()" +\
"requires a jacobian attribute"
if par_file is not None:
assert os.path.exists(par_file),"monte_carlo.draw() error: par_file not found:" +\
par_file
self.parensemble.pst.parrep(par_file)
# project the ensemble
self.log("projecting parameter ensemble")
en = self.parensemble.project(self.get_null_proj(nsing),inplace=inplace,log=self.log)
self.log("projecting parameter ensemble")
return en
|
perform the null-space projection operations for null-space monte carlo
Parameters
----------
par_file: str
an optional file of parameter values to use
nsing: int
number of singular values to in forming null subspace matrix
inplace: bool
overwrite the existing parameter ensemble with the
projected values
enforce_bounds: str
how to enforce parameter bounds. can be None, 'reset', or 'drop'.
Default is None
Returns
-------
par_en : pyemu.ParameterEnsemble
if inplace is False, otherwise None
Note
----
to use this method, the MonteCarlo instance must have been constructed
with the ``jco`` argument.
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(jco="pest.jcb")``
``>>>mc.draw(1000)``
``>>>mc.project_parensemble(par_file="final.par",nsing=100)``
|
def mrc_header_from_params(shape, dtype, kind, **kwargs):
"""Create a minimal MRC2014 header from the given parameters.
Parameters
----------
shape : 3-sequence of ints
3D shape of the stored data. The values are used as
``'nx', 'ny', 'nz'`` header entries, in this order. Note that
this is different from the actual data storage shape for
non-trivial ``axis_order``.
dtype : {'int8', 'int16', 'float32', 'uint16'}
Data type specifier as understood by `numpy.dtype`. It is
translated to a ``'mode'`` header entry. See `this page
<http://www.ccpem.ac.uk/mrc_format/mrc2014.php>`_ for valid
modes.
kind : {'volume', 'projections'}
Interpretation of the 3D data, either as single 3D volume or as
a stack of 2D projections. The value is used for the ``'ispg'``
header entry.
extent : 3-sequence of floats, optional
Size of the 3D volume in meters. The values are used for
the ``'cella'`` header entry.
Default: ``shape``, resulting in ``(1, 1, 1)`` unit cells
axis_order : permutation of ``(0, 1, 2)`` optional
Order of the data axes as they should appear in the stored file.
The values are used for the ``'mapc', 'mapr', 'maps'`` header
entries.
Default: ``(0, 1, 2)``
dmin, dmax : float, optional
Minimum and maximum values of the data, used for header entries
``'dmin'`` and ``'dmax'``, resp.
Default: 1.0, 0.0. These values indicate according to [Che+2015]
that the values are considered as undetermined.
dmean, rms : float, optional
Mean and variance of the data, used for header entries ``'dmean'``
and ``'rms'``, resp.
Default: ``min(dmin, dmax) - 1, -1.0``. These values indicate
according to [Che+2015] that the values are considered as
undetermined.
mrc_version : 2-tuple of int, optional
Version identifier for the MRC file, used for the ``'nversion'``
header entry.
Default: ``(2014, 0)``
text_labels : sequence of strings, optional
Maximal 10 strings with 80 characters each, used for the
``'nlabl'`` and ``'label'`` header entries.
Default: ``[]``
Returns
-------
header : `OrderedDict`
Header stored in an ordered dictionary, where each entry has the
following form::
'name': {'value': value_as_array,
'offset': offset_in_bytes
'description': description_string}
All ``'value'``'s are `numpy.ndarray`'s with at least one
dimension.
References
----------
[Che+2015] Cheng, A et al. *MRC2014: Extensions to the MRC format header
for electron cryo-microscopy and tomography*. Journal of Structural
Biology, 129 (2015), pp 146--150.
"""
# Positional args
shape = [int(n) for n in shape]
kind, kind_in = str(kind).lower(), kind
if kind not in ('volume', 'projections'):
raise ValueError("`kind '{}' not understood".format(kind_in))
# Keyword args
extent = kwargs.pop('extent', shape)
axis_order = kwargs.pop('axis_order', (0, 1, 2))
if tuple(axis_order) not in permutations((0, 1, 2)):
raise ValueError('`axis_order` must be a permutation of (0, 1, 2), '
'got {}'.format(axis_order))
dmin = kwargs.pop('dmin', 1.0)
dmax = kwargs.pop('dmax', 0.0)
dmean = kwargs.pop('dmean', min(dmin, dmax) - 1.0)
rms = kwargs.pop('rms', -1.0)
mrc_version = kwargs.pop('mrc_version', (2014, 0))
if len(mrc_version) != 2:
raise ValueError('`mrc_version` must be a sequence of length 2, got '
'{}'.format(mrc_version))
# Text labels: fill each label up with whitespace to 80 characters.
# Create the remaining labels as 80 * '\x00'
text_labels_in = kwargs.pop('text_labels', [])
nlabl = len(text_labels_in)
if nlabl > 10:
raise ValueError('expexted maximum of 10 labels, got {} labels'
''.format(nlabl))
text_labels = [str(label).ljust(80) for label in text_labels_in]
if any(len(label) > 80 for label in text_labels):
raise ValueError('labels cannot have more than 80 characters each')
# Convert to header-friendly form. Names are required to match
# exactly the header field names, and all of them must exist,
# so that `eval` below succeeds for all fields.
nx, ny, nz = [np.array(n, dtype='int32').reshape([1]) for n in shape]
mode = np.array(NPY_DTYPE_TO_MRC_MODE[np.dtype(dtype)],
dtype='int32').reshape([1])
mx, my, mz = nx, ny, nz
cella = np.array(extent).reshape([3]).astype('float32')
mapc, mapr, maps = [np.array(m, dtype='int32').reshape([1]) + 1
for m in axis_order]
dmin, dmax, dmean, rms = [np.array(x, dtype='float32').reshape([1])
for x in (dmin, dmax, dmean, rms)]
ispg = 1 if kind == 'volume' else 0
ispg = np.array(ispg, dtype='int32', ndmin=1)
nsymbt = np.array([0], dtype='int32')
exttype = np.fromstring(' ', dtype='S1')
nversion = np.array(10 * mrc_version[0] + mrc_version[1],
dtype='int32').reshape([1])
origin = np.zeros(3, dtype='int32')
map = np.fromstring('MAP ', dtype='S1')
# TODO: no idea how to properly choose the machine stamp
machst = np.fromiter(b'DD ', dtype='S1')
nlabl = np.array(nlabl, dtype='int32').reshape([1])
label = np.zeros((10, 80), dtype='S1') # ensure correct size
for i, label_i in enumerate(text_labels):
label[i] = np.fromstring(label_i, dtype='S1')
# Make the header
# We use again the specification to set the values
header_fields = header_fields_from_table(
MRC_2014_SPEC_TABLE, MRC_SPEC_KEYS, MRC_DTYPE_TO_NPY_DTYPE)
header = OrderedDict()
for field in header_fields:
header[field['name']] = {'offset': field['offset'],
'value': eval(field['name'])}
return header
|
Create a minimal MRC2014 header from the given parameters.
Parameters
----------
shape : 3-sequence of ints
3D shape of the stored data. The values are used as
``'nx', 'ny', 'nz'`` header entries, in this order. Note that
this is different from the actual data storage shape for
non-trivial ``axis_order``.
dtype : {'int8', 'int16', 'float32', 'uint16'}
Data type specifier as understood by `numpy.dtype`. It is
translated to a ``'mode'`` header entry. See `this page
<http://www.ccpem.ac.uk/mrc_format/mrc2014.php>`_ for valid
modes.
kind : {'volume', 'projections'}
Interpretation of the 3D data, either as single 3D volume or as
a stack of 2D projections. The value is used for the ``'ispg'``
header entry.
extent : 3-sequence of floats, optional
Size of the 3D volume in meters. The values are used for
the ``'cella'`` header entry.
Default: ``shape``, resulting in ``(1, 1, 1)`` unit cells
axis_order : permutation of ``(0, 1, 2)`` optional
Order of the data axes as they should appear in the stored file.
The values are used for the ``'mapc', 'mapr', 'maps'`` header
entries.
Default: ``(0, 1, 2)``
dmin, dmax : float, optional
Minimum and maximum values of the data, used for header entries
``'dmin'`` and ``'dmax'``, resp.
Default: 1.0, 0.0. These values indicate according to [Che+2015]
that the values are considered as undetermined.
dmean, rms : float, optional
Mean and variance of the data, used for header entries ``'dmean'``
and ``'rms'``, resp.
Default: ``min(dmin, dmax) - 1, -1.0``. These values indicate
according to [Che+2015] that the values are considered as
undetermined.
mrc_version : 2-tuple of int, optional
Version identifier for the MRC file, used for the ``'nversion'``
header entry.
Default: ``(2014, 0)``
text_labels : sequence of strings, optional
Maximal 10 strings with 80 characters each, used for the
``'nlabl'`` and ``'label'`` header entries.
Default: ``[]``
Returns
-------
header : `OrderedDict`
Header stored in an ordered dictionary, where each entry has the
following form::
'name': {'value': value_as_array,
'offset': offset_in_bytes
'description': description_string}
All ``'value'``'s are `numpy.ndarray`'s with at least one
dimension.
References
----------
[Che+2015] Cheng, A et al. *MRC2014: Extensions to the MRC format header
for electron cryo-microscopy and tomography*. Journal of Structural
Biology, 129 (2015), pp 146--150.
|
def text_iter(self, context):
"""
Iterates over all the elements in an iterparse context
(here: <text> elements) and yields an ExportXMLDocumentGraph instance
for each of them. For efficiency, the elements are removed from the
DOM / main memory after processing them.
If ``self.debug`` is set to ``True`` (in the ``__init__`` method),
this method will yield <text> elements, which can be used to construct
``ExportXMLDocumentGraph``s manually.
"""
for _event, elem in context:
if not self.debug:
yield ExportXMLDocumentGraph(elem, name=elem.attrib[add_ns('id')])
else:
yield elem
# removes element (and references to it) from memory after processing it
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
del context
|
Iterates over all the elements in an iterparse context
(here: <text> elements) and yields an ExportXMLDocumentGraph instance
for each of them. For efficiency, the elements are removed from the
DOM / main memory after processing them.
If ``self.debug`` is set to ``True`` (in the ``__init__`` method),
this method will yield <text> elements, which can be used to construct
``ExportXMLDocumentGraph``s manually.
|
def next_non_holiday_weekday(holidays, dt):
"""
If a holiday falls on a Sunday, observe it on the next non-holiday weekday.
Parameters
----------
holidays : list[pd.tseries.holiday.Holiday]
list of holidays
dt : pd.Timestamp
date of holiday.
"""
day_of_week = dt.weekday()
if day_of_week == SUNDAY:
while is_holiday_or_weekend(holidays, dt):
dt += timedelta(1)
return dt
|
If a holiday falls on a Sunday, observe it on the next non-holiday weekday.
Parameters
----------
holidays : list[pd.tseries.holiday.Holiday]
list of holidays
dt : pd.Timestamp
date of holiday.
|
def _set_ospf_level12(self, v, load=False):
"""
Setter method for ospf_level12, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level12 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf_level12 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf_level12() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ospf-level12", rest_name="level-1-2", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level12'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1-2 routes', u'alt-name': u'level-1-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ospf_level12 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ospf-level12", rest_name="level-1-2", parent=self, choice=(u'ch-ospf-levels', u'ca-ospf-level12'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'IS-IS Level-1-2 routes', u'alt-name': u'level-1-2', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='empty', is_config=True)""",
})
self.__ospf_level12 = t
if hasattr(self, '_set'):
self._set()
|
Setter method for ospf_level12, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/ospf/ospf_level12 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf_level12 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf_level12() directly.
|
def grep(source, regex, stop_on_first=False):
"""Grep the constant pool of all classes in source."""
loader = ClassLoader(source, max_cache=-1)
r = re.compile(regex)
def _matches(constant):
return r.match(constant.value)
for klass in loader.classes:
it = loader.search_constant_pool(path=klass, type_=UTF8, f=_matches)
if next(it, None):
print(klass)
if stop_on_first:
break
|
Grep the constant pool of all classes in source.
|
def create_pointing(self,event):
"""Plot the sky coverage of pointing at event.x,event.y on the canavas"""
import math
(ra,dec)=self.c2p((self.canvasx(event.x),
self.canvasy(event.y)))
this_camera=camera(camera=self.camera.get())
ccds=this_camera.getGeometry(ra,dec)
items=[]
for ccd in ccds:
(x1,y1)=self.p2c((ccd[0],ccd[1]))
(x2,y2)=self.p2c((ccd[2],ccd[3]))
item=self.create_rectangle(x1,y1,x2,y2)
items.append(item)
label={}
label['text']=w.plabel.get()
label['id']=self.label(this_camera.ra,this_camera.dec,label['text'])
self.pointings.append({
"label": label,
"items": items,
"camera": this_camera} )
self.current_pointing(len(self.pointings)-1)
|
Plot the sky coverage of pointing at event.x,event.y on the canavas
|
def _can_process_pre_prepare(self, pre_prepare: PrePrepare, sender: str) -> Optional[int]:
"""
Decide whether this replica is eligible to process a PRE-PREPARE.
:param pre_prepare: a PRE-PREPARE msg to process
:param sender: the name of the node that sent the PRE-PREPARE msg
"""
# TODO: Check whether it is rejecting PRE-PREPARE from previous view
# PRE-PREPARE should not be sent from non primary
if not self.isMsgFromPrimary(pre_prepare, sender):
return PP_CHECK_NOT_FROM_PRIMARY
# Already has a PRE-PREPARE with same 3 phase key
if (pre_prepare.viewNo, pre_prepare.ppSeqNo) in self.prePrepares:
return PP_CHECK_DUPLICATE
if not self.is_pre_prepare_time_acceptable(pre_prepare, sender):
return PP_CHECK_WRONG_TIME
if compare_3PC_keys((pre_prepare.viewNo, pre_prepare.ppSeqNo),
self.__last_pp_3pc) > 0:
return PP_CHECK_OLD # ignore old pre-prepare
if self.nonFinalisedReqs(pre_prepare.reqIdr):
return PP_CHECK_REQUEST_NOT_FINALIZED
if not self.__is_next_pre_prepare(pre_prepare.viewNo,
pre_prepare.ppSeqNo):
return PP_CHECK_NOT_NEXT
if f.POOL_STATE_ROOT_HASH.nm in pre_prepare and \
pre_prepare.poolStateRootHash != self.stateRootHash(POOL_LEDGER_ID):
return PP_CHECK_INCORRECT_POOL_STATE_ROOT
# BLS multi-sig:
status = self._bls_bft_replica.validate_pre_prepare(pre_prepare,
sender)
if status is not None:
return status
return None
|
Decide whether this replica is eligible to process a PRE-PREPARE.
:param pre_prepare: a PRE-PREPARE msg to process
:param sender: the name of the node that sent the PRE-PREPARE msg
|
def cal_pth(self, v, temp):
"""
calculate thermal pressure
:param v: unit-cell volume in A^3
:param temp: temperature in K
:return: thermal pressure in GPa
"""
params_t = self._set_params(self.params_therm)
return constq_pth(v, temp, *params_t, self.n, self.z,
t_ref=self.t_ref, three_r=self.three_r)
|
calculate thermal pressure
:param v: unit-cell volume in A^3
:param temp: temperature in K
:return: thermal pressure in GPa
|
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
|
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
|
def save_state(internal_request, state):
"""
Saves all necessary information needed by the UserIdHasher
:type internal_request: satosa.internal_data.InternalRequest
:param internal_request: The request
:param state: The current state
"""
state_data = {"hash_type": internal_request.user_id_hash_type}
state[UserIdHasher.STATE_KEY] = state_data
|
Saves all necessary information needed by the UserIdHasher
:type internal_request: satosa.internal_data.InternalRequest
:param internal_request: The request
:param state: The current state
|
def read_leader_status(self):
"""Read the high availability status and current leader instance of Vault.
Supported methods:
GET: /sys/leader. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/leader'
response = self._adapter.get(
url=api_path,
)
return response.json()
|
Read the high availability status and current leader instance of Vault.
Supported methods:
GET: /sys/leader. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
|
def OnCopyResult(self, event):
"""Clipboard copy results event handler"""
selection = self.main_window.grid.selection
data = self.main_window.actions.copy_result(selection)
# Check if result is a bitmap
if type(data) is wx._gdi.Bitmap:
# Copy bitmap to clipboard
self.main_window.clipboard.set_clipboard(data, datatype="bitmap")
else:
# Copy string representation of result to clipboard
self.main_window.clipboard.set_clipboard(data, datatype="text")
event.Skip()
|
Clipboard copy results event handler
|
def get_filters(self):
""" Returns a collection of momentjs filters """
return dict(
moment_format=self.format,
moment_calendar=self.calendar,
moment_fromnow=self.from_now,
)
|
Returns a collection of momentjs filters
|
def set_printoptions(**kwargs):
"""Set printing options.
These options determine the way JPEG 2000 boxes are displayed.
Parameters
----------
short : bool, optional
When True, only the box ID, offset, and length are displayed. Useful
for displaying only the basic structure or skeleton of a JPEG 2000
file.
xml : bool, optional
When False, printing of the XML contents of any XML boxes or UUID XMP
boxes is suppressed.
codestream : bool, optional
When False, the codestream segments are not printed. Otherwise the
segments are printed depending on how set_parseoptions has been used.
See also
--------
get_printoptions
Examples
--------
To put back the default options, you can use:
>>> import glymur
>>> glymur.set_printoptions(short=False, xml=True, codestream=True)
"""
warnings.warn('Use set_option instead of set_printoptions.',
DeprecationWarning)
for key, value in kwargs.items():
if key not in ['short', 'xml', 'codestream']:
raise KeyError('"{0}" not a valid keyword parameter.'.format(key))
set_option('print.' + key, value)
|
Set printing options.
These options determine the way JPEG 2000 boxes are displayed.
Parameters
----------
short : bool, optional
When True, only the box ID, offset, and length are displayed. Useful
for displaying only the basic structure or skeleton of a JPEG 2000
file.
xml : bool, optional
When False, printing of the XML contents of any XML boxes or UUID XMP
boxes is suppressed.
codestream : bool, optional
When False, the codestream segments are not printed. Otherwise the
segments are printed depending on how set_parseoptions has been used.
See also
--------
get_printoptions
Examples
--------
To put back the default options, you can use:
>>> import glymur
>>> glymur.set_printoptions(short=False, xml=True, codestream=True)
|
def checkout(self, ref, branch=None):
"""Do a git checkout of `ref`."""
return git_checkout(self.repo_dir, ref, branch=branch)
|
Do a git checkout of `ref`.
|
def set(self, section, key, value, comment=None):
"""
Set config value with data type transformation (to str)
:param str section: Section to set config for
:param str key: Key to set config for
:param value: Value for key. It can be any primitive type.
:param str comment: Comment for the key
"""
self._read_sources()
if (section, key) in self._dot_keys:
section, key = self._dot_keys[(section, key)]
elif section in self._dot_keys:
section = self._dot_keys[section]
if not isinstance(value, str):
value = str(value)
self._parser.set(section, key, value)
self._add_dot_key(section, key)
if comment:
self._set_comment(section, comment, key)
|
Set config value with data type transformation (to str)
:param str section: Section to set config for
:param str key: Key to set config for
:param value: Value for key. It can be any primitive type.
:param str comment: Comment for the key
|
def trial(log_dir=None,
upload_dir=None,
sync_period=None,
trial_prefix="",
param_map=None,
init_logging=True):
"""
Generates a trial within a with context.
"""
global _trial # pylint: disable=global-statement
if _trial:
# TODO: would be nice to stack crawl at creation time to report
# where that initial trial was created, and that creation line
# info is helpful to keep around anyway.
raise ValueError("A trial already exists in the current context")
local_trial = Trial(
log_dir=log_dir,
upload_dir=upload_dir,
sync_period=sync_period,
trial_prefix=trial_prefix,
param_map=param_map,
init_logging=True)
try:
_trial = local_trial
_trial.start()
yield local_trial
finally:
_trial = None
local_trial.close()
|
Generates a trial within a with context.
|
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
|
Initialises the socket used for communicating with a q service,
|
def create(cls, bucket, key, value):
"""Create a new tag for bucket."""
with db.session.begin_nested():
obj = cls(
bucket_id=as_bucket_id(bucket),
key=key,
value=value
)
db.session.add(obj)
return obj
|
Create a new tag for bucket.
|
def cnst_AT(self, Y):
r"""Compute :math:`A^T \mathbf{y}`. In this case
:math:`A^T \mathbf{y} = (I \;\; \Gamma_0^T \;\; \Gamma_1^T \;\;
\ldots) \mathbf{y}`.
"""
return self.cnst_A0T(self.block_sep0(Y)) + \
np.sum(self.cnst_A1T(self.block_sep1(Y)), axis=-1)
|
r"""Compute :math:`A^T \mathbf{y}`. In this case
:math:`A^T \mathbf{y} = (I \;\; \Gamma_0^T \;\; \Gamma_1^T \;\;
\ldots) \mathbf{y}`.
|
def me(self):
"""Get the details of the person accessing the API.
Raises:
ApiError: If the Webex Teams cloud returns an error.
"""
# API request
json_data = self._session.get(API_ENDPOINT + '/me')
# Return a person object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data)
|
Get the details of the person accessing the API.
Raises:
ApiError: If the Webex Teams cloud returns an error.
|
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`RepositorySet` requires that a ``product`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(product=self.product.id)
"""
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
product=self.product, # pylint:disable=no-member
)
if ignore is None:
ignore = set()
return super(RepositorySet, self).read(entity, attrs, ignore, params)
|
Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`RepositorySet` requires that a ``product`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(product=self.product.id)
|
def caption_mentions(self) -> List[str]:
"""List of all lowercased profiles that are mentioned in the Post's caption, without preceeding @."""
if not self.caption:
return []
# This regular expression is from jStassen, adjusted to use Python's \w to support Unicode
# http://blog.jstassen.com/2016/03/code-regex-for-instagram-username-and-hashtags/
mention_regex = re.compile(r"(?:@)(\w(?:(?:\w|(?:\.(?!\.))){0,28}(?:\w))?)")
return re.findall(mention_regex, self.caption.lower())
|
List of all lowercased profiles that are mentioned in the Post's caption, without preceeding @.
|
def height_to_pressure_std(height):
r"""Convert height data to pressures using the U.S. standard atmosphere.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
The corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
"""
t0 = 288. * units.kelvin
gamma = 6.5 * units('K/km')
p0 = 1013.25 * units.mbar
return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma))
|
r"""Convert height data to pressures using the U.S. standard atmosphere.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
The corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
|
def operation(self, url, idp_entity_id, op, **opargs):
"""
This is the method that should be used by someone that wants
to authenticate using SAML ECP
:param url: The page that access is sought for
:param idp_entity_id: The entity ID of the IdP that should be
used for authentication
:param op: Which HTTP operation (GET/POST/PUT/DELETE)
:param opargs: Arguments to the HTTP call
:return: The page
"""
sp_url = self._sp
# ********************************************
# Phase 1 - First conversation with the SP
# ********************************************
# headers needed to indicate to the SP that I'm ECP enabled
opargs["headers"] = self.add_paos_headers(opargs["headers"])
response = self.send(sp_url, op, **opargs)
logger.debug("[Op] SP response: %s" % response)
print(response.text)
if response.status_code != 200:
raise SAMLError(
"Request to SP failed: %s" % response.text)
# The response might be a AuthnRequest instance in a SOAP envelope
# body. If so it's the start of the ECP conversation
# Two SOAP header blocks; paos:Request and ecp:Request
# may also contain a ecp:RelayState SOAP header block
# If channel-binding was part of the PAOS header any number of
# <cb:ChannelBindings> header blocks may also be present
# if 'holder-of-key' option then one or more <ecp:SubjectConfirmation>
# header blocks may also be present
try:
respdict = self.parse_soap_message(response.text)
self.ecp_conversation(respdict, idp_entity_id)
# should by now be authenticated so this should go smoothly
response = self.send(url, op, **opargs)
except (soap.XmlParseError, AssertionError, KeyError):
raise
if response.status_code >= 400:
raise SAMLError("Error performing operation: %s" % (
response.text,))
return response
|
This is the method that should be used by someone that wants
to authenticate using SAML ECP
:param url: The page that access is sought for
:param idp_entity_id: The entity ID of the IdP that should be
used for authentication
:param op: Which HTTP operation (GET/POST/PUT/DELETE)
:param opargs: Arguments to the HTTP call
:return: The page
|
def main(self, config_filename):
"""
The "main" of the wrapper generator. Returns 0 on success, 1 if one or more errors occurred.
:param str config_filename: The name of the configuration file.
:rtype: int
"""
self._read_configuration_file(config_filename)
if self._wrapper_class_name:
self._io.title('Wrapper')
self.__generate_wrapper_class()
else:
self._io.log_verbose('Wrapper not enabled')
return 0
|
The "main" of the wrapper generator. Returns 0 on success, 1 if one or more errors occurred.
:param str config_filename: The name of the configuration file.
:rtype: int
|
def set_image(self, image):
"""
Update the current comparison (real) image
"""
if isinstance(image, np.ndarray):
image = util.Image(image)
if isinstance(image, util.NullImage):
self.model_as_data = True
else:
self.model_as_data = False
self.image = image
self._data = self.image.get_padded_image(self.pad)
# set up various slicers and Tiles associated with the image and pad
self.oshape = util.Tile(self._data.shape)
self.ishape = self.oshape.pad(-self.pad)
self.inner = self.ishape.slicer
for c in self.comps:
c.set_shape(self.oshape, self.ishape)
self._model = np.zeros(self._data.shape, dtype=np.float64)
self._residuals = np.zeros(self._data.shape, dtype=np.float64)
self.calculate_model()
|
Update the current comparison (real) image
|
def rotatePoints(points, rotationDegrees, pivotx=0, pivoty=0):
"""
Rotates each x and y tuple in `points`` by `rotationDegrees`. The points
are rotated around the origin by default, but can be rotated around another
pivot point by specifying `pivotx` and `pivoty`.
The points are rotated counterclockwise.
Returns a generator that produces an x and y tuple for each point in `points`.
>>> list(rotatePoints([(10, 0), (7, 7)], 45))
[(7, 7), (0, 9)]
"""
rotationRadians = math.radians(rotationDegrees % 360)
for x, y in points:
_checkForIntOrFloat(x)
_checkForIntOrFloat(y)
x -= pivotx
y -= pivoty
x, y = x * math.cos(rotationRadians) - y * math.sin(rotationRadians), x * math.sin(rotationRadians) + y * math.cos(rotationRadians)
x += pivotx
y += pivoty
yield int(x), int(y)
|
Rotates each x and y tuple in `points`` by `rotationDegrees`. The points
are rotated around the origin by default, but can be rotated around another
pivot point by specifying `pivotx` and `pivoty`.
The points are rotated counterclockwise.
Returns a generator that produces an x and y tuple for each point in `points`.
>>> list(rotatePoints([(10, 0), (7, 7)], 45))
[(7, 7), (0, 9)]
|
def ping(self, message=_NOTSET, *, encoding=_NOTSET):
"""Ping the server.
Accept optional echo message.
"""
if message is not _NOTSET:
args = (message,)
else:
args = ()
return self.execute('PING', *args, encoding=encoding)
|
Ping the server.
Accept optional echo message.
|
def from_name(api_url, name, dry_run=False):
"""
doesn't require a token config param
as all of our data is currently public
"""
return DataSet(
'/'.join([api_url, name]).rstrip('/'),
token=None,
dry_run=dry_run
)
|
doesn't require a token config param
as all of our data is currently public
|
def trigger_event(self, source, event, args):
"""
Trigger an event on the Entity
* \a source: The source of the event
* \a event: The event being triggered
* \a args: A list of arguments to pass to the callback
"""
actions = []
for action in event.actions:
if callable(action):
ac = action(self, *args)
if not ac:
# Handle falsy returns
continue
if not hasattr(ac, "__iter__"):
actions.append(ac)
else:
actions += action(self, *args)
else:
actions.append(action)
ret = source.game.trigger(self, actions, args)
if event.once:
self._events.remove(event)
return ret
|
Trigger an event on the Entity
* \a source: The source of the event
* \a event: The event being triggered
* \a args: A list of arguments to pass to the callback
|
def set_legend(self):
"""Create a legend for this product
"""
leg = super(Coherence, self).set_legend()
if leg is not None:
leg.set_title('Coherence with:')
return leg
|
Create a legend for this product
|
def det_residual(model,
guess,
start,
final,
shocks,
diff=True,
jactype='sparse'):
'''
Computes the residuals, the derivatives of the stacked-time system.
:param model: an fga model
:param guess: the guess for the simulated values. An `(n_s.n_x) x N` array,
where n_s is the number of states,
n_x the number of controls, and `N` the length of the simulation.
:param start: initial boundary condition (initial value of the states)
:param final: final boundary condition (last value of the controls)
:param shocks: values for the exogenous shocks
:param diff: if True, the derivatives are computes
:return: a list with two elements:
- an `(n_s.n_x) x N` array with the residuals of the system
- a `(n_s.n_x) x N x (n_s.n_x) x N` array representing the jacobian of
the system
'''
# TODO: compute a sparse derivative and ensure the solvers can deal with it
n_s = len(model.symbols['states'])
n_x = len(model.symbols['controls'])
# n_e = len(model.symbols['shocks'])
N = guess.shape[0]
p = model.calibration['parameters']
f = model.functions['arbitrage']
g = model.functions['transition']
vec = guess[:-1, :]
vec_f = guess[1:, :]
s = vec[:, :n_s]
x = vec[:, n_s:]
S = vec_f[:, :n_s]
X = vec_f[:, n_s:]
m = shocks[:-1, :]
M = shocks[1:, :]
if diff:
SS, SS_m, SS_s, SS_x, SS_M = g(m, s, x, M, p, diff=True)
R, R_m, R_s, R_x, R_M, R_S, R_X = f(m, s, x, M, S, X, p, diff=True)
else:
SS = g(m, s, x, M, p)
R = f(m, s, x, M, S, X, p)
res_s = SS - S
res_x = R
res = np.zeros((N, n_s + n_x))
res[1:, :n_s] = res_s
res[:-1, n_s:] = res_x
res[0, :n_s] = -(guess[0, :n_s] - start)
res[-1, n_s:] = -(guess[-1, n_s:] - guess[-2, n_s:])
if not diff:
return res
else:
sparse_jac = False
if not sparse_jac:
# we compute the derivative matrix
res_s_s = SS_s
res_s_x = SS_x
# next block is probably very inefficient
jac = np.zeros((N, n_s + n_x, N, n_s + n_x))
for i in range(N - 1):
jac[i, n_s:, i, :n_s] = R_s[i, :, :]
jac[i, n_s:, i, n_s:] = R_x[i, :, :]
jac[i, n_s:, i + 1, :n_s] = R_S[i, :, :]
jac[i, n_s:, i + 1, n_s:] = R_X[i, :, :]
jac[i + 1, :n_s, i, :n_s] = SS_s[i, :, :]
jac[i + 1, :n_s, i, n_s:] = SS_x[i, :, :]
jac[i + 1, :n_s, i + 1, :n_s] = -np.eye(n_s)
# jac[i,n_s:,i,:n_s] = R_s[i,:,:]
# jac[i,n_s:,i,n_s:] = R_x[i,:,:]
# jac[i+1,n_s:,i,:n_s] = R_S[i,:,:]
# jac[i+1,n_s:,i,n_s:] = R_X[i,:,:]
# jac[i,:n_s,i+1,:n_s] = SS_s[i,:,:]
# jac[i,:n_s,i+1,n_s:] = SS_x[i,:,:]
# jac[i+1,:n_s,i+1,:n_s] = -np.eye(n_s)
jac[0, :n_s, 0, :n_s] = -np.eye(n_s)
jac[-1, n_s:, -1, n_s:] = -np.eye(n_x)
jac[-1, n_s:, -2, n_s:] = +np.eye(n_x)
nn = jac.shape[0] * jac.shape[1]
res = res.ravel()
jac = jac.reshape((nn, nn))
if jactype == 'sparse':
from scipy.sparse import csc_matrix, csr_matrix
jac = csc_matrix(jac)
# scipy bug ? I don't get the same with csr
return [res, jac]
|
Computes the residuals, the derivatives of the stacked-time system.
:param model: an fga model
:param guess: the guess for the simulated values. An `(n_s.n_x) x N` array,
where n_s is the number of states,
n_x the number of controls, and `N` the length of the simulation.
:param start: initial boundary condition (initial value of the states)
:param final: final boundary condition (last value of the controls)
:param shocks: values for the exogenous shocks
:param diff: if True, the derivatives are computes
:return: a list with two elements:
- an `(n_s.n_x) x N` array with the residuals of the system
- a `(n_s.n_x) x N x (n_s.n_x) x N` array representing the jacobian of
the system
|
def main(argv=None):
"""
Make a confidence report and save it to disk.
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print(filepath)
make_confidence_report_bundled(filepath=filepath,
test_start=FLAGS.test_start,
test_end=FLAGS.test_end,
which_set=FLAGS.which_set,
recipe=FLAGS.recipe,
report_path=FLAGS.report_path, batch_size=FLAGS.batch_size)
|
Make a confidence report and save it to disk.
|
def compile_protofile(proto_file_path):
"""Compile proto file to descriptor set.
Args:
proto_file_path: Path to proto file to compile.
Returns:
Path to file containing compiled descriptor set.
Raises:
SystemExit if the compilation fails.
"""
out_file = tempfile.mkstemp()[1]
try:
subprocess.check_output(['protoc', '--include_source_info',
'--descriptor_set_out', out_file,
proto_file_path])
except subprocess.CalledProcessError as e:
sys.exit('protoc returned status {}'.format(e.returncode))
return out_file
|
Compile proto file to descriptor set.
Args:
proto_file_path: Path to proto file to compile.
Returns:
Path to file containing compiled descriptor set.
Raises:
SystemExit if the compilation fails.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.